repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
davidrpugh/pyCollocation
|
pycollocation/solvers/solvers.py
|
SolverLike._compute_residuals
|
python
|
def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem):
coefs_list = self._array_to_list(coefs_array, problem.number_odes)
derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list)
resids = self._assess_approximation(boundary_points, derivs, funcs,
nodes, problem)
return resids
|
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L124-L143
| null |
class SolverLike(object):
"""
Class describing the protocol the all SolverLike objects should satisfy.
Notes
-----
Subclasses should implement `solve` method as described below.
"""
@property
def basis_functions(self):
r"""
Functions used to approximate the solution to a boundary value problem.
:getter: Return the current basis functions.
:type: `basis_functions.BasisFunctions`
"""
return self._basis_functions
@staticmethod
def _array_to_list(coefs_array, indices_or_sections, axis=0):
"""Split an array into a list of arrays."""
return np.split(coefs_array, indices_or_sections, axis)
@staticmethod
def _evaluate_functions(funcs, points):
"""Evaluate a list of functions at some points."""
return [func(points) for func in funcs]
@classmethod
def _evaluate_rhs(cls, funcs, nodes, problem):
"""
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
"""
evald_funcs = cls._evaluate_functions(funcs, nodes)
evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params)
return evald_rhs
@classmethod
def _lower_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_lower(ts, *evald_funcs, **problem.params)
@classmethod
def _upper_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_upper(ts, *evald_funcs, **problem.params)
@classmethod
def _compute_boundary_residuals(cls, boundary_points, funcs, problem):
boundary_residuals = []
if problem.bcs_lower is not None:
residual = cls._lower_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[0]))
if problem.bcs_upper is not None:
residual = cls._upper_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[1]))
return boundary_residuals
@classmethod
def _compute_interior_residuals(cls, derivs, funcs, nodes, problem):
interior_residuals = cls._interior_residuals_factory(derivs, funcs, problem)
residuals = interior_residuals(nodes)
return residuals
@classmethod
def _interior_residuals(cls, derivs, funcs, problem, ts):
evaluated_lhs = cls._evaluate_functions(derivs, ts)
evaluated_rhs = cls._evaluate_rhs(funcs, ts, problem)
return [lhs - rhs for lhs, rhs in zip(evaluated_lhs, evaluated_rhs)]
@classmethod
def _interior_residuals_factory(cls, derivs, funcs, problem):
return functools.partial(cls._interior_residuals, derivs, funcs, problem)
@classmethod
def _lower_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._lower_boundary_residual, funcs, problem)
@classmethod
def _upper_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._upper_boundary_residual, funcs, problem)
def _assess_approximation(self, boundary_points, derivs, funcs, nodes, problem):
"""
Parameters
----------
basis_derivs : list(function)
basis_funcs : list(function)
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
interior_residuals = self._compute_interior_residuals(derivs, funcs,
nodes, problem)
boundary_residuals = self._compute_boundary_residuals(boundary_points,
funcs, problem)
return np.hstack(interior_residuals + boundary_residuals)
def _construct_approximation(self, basis_kwargs, coefs_list):
"""
Construct a collection of derivatives and functions that approximate
the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str: )
coefs_list : list(numpy.ndarray)
Returns
-------
basis_derivs : list(function)
basis_funcs : list(function)
"""
derivs = self._construct_derivatives(coefs_list, **basis_kwargs)
funcs = self._construct_functions(coefs_list, **basis_kwargs)
return derivs, funcs
def _construct_derivatives(self, coefs, **kwargs):
"""Return a list of derivatives given a list of coefficients."""
return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
def _construct_functions(self, coefs, **kwargs):
"""Return a list of functions given a list of coefficients."""
return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]
def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result):
"""
Construct a representation of the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str : )
coefs_array : numpy.ndarray
problem : TwoPointBVPLike
result : OptimizeResult
Returns
-------
solution : SolutionLike
"""
soln_coefs = self._array_to_list(coefs_array, problem.number_odes)
soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs)
soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs)
soln_residual_func = self._interior_residuals_factory(soln_derivs,
soln_funcs,
problem)
solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem,
soln_residual_func, result)
return solution
def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem,
**solver_options):
"""
Solve a boundary value problem using the collocation method.
Parameters
----------
basis_kwargs : dict
Dictionary of keyword arguments used to build basis functions.
coefs_array : numpy.ndarray
Array of coefficients for basis functions defining the initial
condition.
problem : bvp.TwoPointBVPLike
A two-point boundary value problem (BVP) to solve.
solver_options : dict
Dictionary of options to pass to the non-linear equation solver.
Return
------
solution: solutions.SolutionLike
An instance of the SolutionLike class representing the solution to
the two-point boundary value problem (BVP)
Notes
-----
"""
raise NotImplementedError
|
davidrpugh/pyCollocation
|
pycollocation/solvers/solvers.py
|
SolverLike._construct_approximation
|
python
|
def _construct_approximation(self, basis_kwargs, coefs_list):
derivs = self._construct_derivatives(coefs_list, **basis_kwargs)
funcs = self._construct_functions(coefs_list, **basis_kwargs)
return derivs, funcs
|
Construct a collection of derivatives and functions that approximate
the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str: )
coefs_list : list(numpy.ndarray)
Returns
-------
basis_derivs : list(function)
basis_funcs : list(function)
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L145-L163
| null |
class SolverLike(object):
"""
Class describing the protocol the all SolverLike objects should satisfy.
Notes
-----
Subclasses should implement `solve` method as described below.
"""
@property
def basis_functions(self):
r"""
Functions used to approximate the solution to a boundary value problem.
:getter: Return the current basis functions.
:type: `basis_functions.BasisFunctions`
"""
return self._basis_functions
@staticmethod
def _array_to_list(coefs_array, indices_or_sections, axis=0):
"""Split an array into a list of arrays."""
return np.split(coefs_array, indices_or_sections, axis)
@staticmethod
def _evaluate_functions(funcs, points):
"""Evaluate a list of functions at some points."""
return [func(points) for func in funcs]
@classmethod
def _evaluate_rhs(cls, funcs, nodes, problem):
"""
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
"""
evald_funcs = cls._evaluate_functions(funcs, nodes)
evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params)
return evald_rhs
@classmethod
def _lower_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_lower(ts, *evald_funcs, **problem.params)
@classmethod
def _upper_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_upper(ts, *evald_funcs, **problem.params)
@classmethod
def _compute_boundary_residuals(cls, boundary_points, funcs, problem):
boundary_residuals = []
if problem.bcs_lower is not None:
residual = cls._lower_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[0]))
if problem.bcs_upper is not None:
residual = cls._upper_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[1]))
return boundary_residuals
@classmethod
def _compute_interior_residuals(cls, derivs, funcs, nodes, problem):
interior_residuals = cls._interior_residuals_factory(derivs, funcs, problem)
residuals = interior_residuals(nodes)
return residuals
@classmethod
def _interior_residuals(cls, derivs, funcs, problem, ts):
evaluated_lhs = cls._evaluate_functions(derivs, ts)
evaluated_rhs = cls._evaluate_rhs(funcs, ts, problem)
return [lhs - rhs for lhs, rhs in zip(evaluated_lhs, evaluated_rhs)]
@classmethod
def _interior_residuals_factory(cls, derivs, funcs, problem):
return functools.partial(cls._interior_residuals, derivs, funcs, problem)
@classmethod
def _lower_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._lower_boundary_residual, funcs, problem)
@classmethod
def _upper_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._upper_boundary_residual, funcs, problem)
def _assess_approximation(self, boundary_points, derivs, funcs, nodes, problem):
"""
Parameters
----------
basis_derivs : list(function)
basis_funcs : list(function)
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
interior_residuals = self._compute_interior_residuals(derivs, funcs,
nodes, problem)
boundary_residuals = self._compute_boundary_residuals(boundary_points,
funcs, problem)
return np.hstack(interior_residuals + boundary_residuals)
def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem):
"""
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
coefs_list = self._array_to_list(coefs_array, problem.number_odes)
derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list)
resids = self._assess_approximation(boundary_points, derivs, funcs,
nodes, problem)
return resids
def _construct_derivatives(self, coefs, **kwargs):
"""Return a list of derivatives given a list of coefficients."""
return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
def _construct_functions(self, coefs, **kwargs):
"""Return a list of functions given a list of coefficients."""
return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]
def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result):
"""
Construct a representation of the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str : )
coefs_array : numpy.ndarray
problem : TwoPointBVPLike
result : OptimizeResult
Returns
-------
solution : SolutionLike
"""
soln_coefs = self._array_to_list(coefs_array, problem.number_odes)
soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs)
soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs)
soln_residual_func = self._interior_residuals_factory(soln_derivs,
soln_funcs,
problem)
solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem,
soln_residual_func, result)
return solution
def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem,
**solver_options):
"""
Solve a boundary value problem using the collocation method.
Parameters
----------
basis_kwargs : dict
Dictionary of keyword arguments used to build basis functions.
coefs_array : numpy.ndarray
Array of coefficients for basis functions defining the initial
condition.
problem : bvp.TwoPointBVPLike
A two-point boundary value problem (BVP) to solve.
solver_options : dict
Dictionary of options to pass to the non-linear equation solver.
Return
------
solution: solutions.SolutionLike
An instance of the SolutionLike class representing the solution to
the two-point boundary value problem (BVP)
Notes
-----
"""
raise NotImplementedError
|
davidrpugh/pyCollocation
|
pycollocation/solvers/solvers.py
|
SolverLike._construct_derivatives
|
python
|
def _construct_derivatives(self, coefs, **kwargs):
return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
|
Return a list of derivatives given a list of coefficients.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L165-L167
| null |
class SolverLike(object):
"""
Class describing the protocol the all SolverLike objects should satisfy.
Notes
-----
Subclasses should implement `solve` method as described below.
"""
@property
def basis_functions(self):
r"""
Functions used to approximate the solution to a boundary value problem.
:getter: Return the current basis functions.
:type: `basis_functions.BasisFunctions`
"""
return self._basis_functions
@staticmethod
def _array_to_list(coefs_array, indices_or_sections, axis=0):
"""Split an array into a list of arrays."""
return np.split(coefs_array, indices_or_sections, axis)
@staticmethod
def _evaluate_functions(funcs, points):
"""Evaluate a list of functions at some points."""
return [func(points) for func in funcs]
@classmethod
def _evaluate_rhs(cls, funcs, nodes, problem):
"""
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
"""
evald_funcs = cls._evaluate_functions(funcs, nodes)
evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params)
return evald_rhs
@classmethod
def _lower_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_lower(ts, *evald_funcs, **problem.params)
@classmethod
def _upper_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_upper(ts, *evald_funcs, **problem.params)
@classmethod
def _compute_boundary_residuals(cls, boundary_points, funcs, problem):
boundary_residuals = []
if problem.bcs_lower is not None:
residual = cls._lower_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[0]))
if problem.bcs_upper is not None:
residual = cls._upper_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[1]))
return boundary_residuals
@classmethod
def _compute_interior_residuals(cls, derivs, funcs, nodes, problem):
interior_residuals = cls._interior_residuals_factory(derivs, funcs, problem)
residuals = interior_residuals(nodes)
return residuals
@classmethod
def _interior_residuals(cls, derivs, funcs, problem, ts):
evaluated_lhs = cls._evaluate_functions(derivs, ts)
evaluated_rhs = cls._evaluate_rhs(funcs, ts, problem)
return [lhs - rhs for lhs, rhs in zip(evaluated_lhs, evaluated_rhs)]
@classmethod
def _interior_residuals_factory(cls, derivs, funcs, problem):
return functools.partial(cls._interior_residuals, derivs, funcs, problem)
@classmethod
def _lower_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._lower_boundary_residual, funcs, problem)
@classmethod
def _upper_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._upper_boundary_residual, funcs, problem)
def _assess_approximation(self, boundary_points, derivs, funcs, nodes, problem):
"""
Parameters
----------
basis_derivs : list(function)
basis_funcs : list(function)
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
interior_residuals = self._compute_interior_residuals(derivs, funcs,
nodes, problem)
boundary_residuals = self._compute_boundary_residuals(boundary_points,
funcs, problem)
return np.hstack(interior_residuals + boundary_residuals)
def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem):
"""
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
coefs_list = self._array_to_list(coefs_array, problem.number_odes)
derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list)
resids = self._assess_approximation(boundary_points, derivs, funcs,
nodes, problem)
return resids
def _construct_approximation(self, basis_kwargs, coefs_list):
"""
Construct a collection of derivatives and functions that approximate
the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str: )
coefs_list : list(numpy.ndarray)
Returns
-------
basis_derivs : list(function)
basis_funcs : list(function)
"""
derivs = self._construct_derivatives(coefs_list, **basis_kwargs)
funcs = self._construct_functions(coefs_list, **basis_kwargs)
return derivs, funcs
def _construct_functions(self, coefs, **kwargs):
"""Return a list of functions given a list of coefficients."""
return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]
def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result):
"""
Construct a representation of the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str : )
coefs_array : numpy.ndarray
problem : TwoPointBVPLike
result : OptimizeResult
Returns
-------
solution : SolutionLike
"""
soln_coefs = self._array_to_list(coefs_array, problem.number_odes)
soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs)
soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs)
soln_residual_func = self._interior_residuals_factory(soln_derivs,
soln_funcs,
problem)
solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem,
soln_residual_func, result)
return solution
def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem,
**solver_options):
"""
Solve a boundary value problem using the collocation method.
Parameters
----------
basis_kwargs : dict
Dictionary of keyword arguments used to build basis functions.
coefs_array : numpy.ndarray
Array of coefficients for basis functions defining the initial
condition.
problem : bvp.TwoPointBVPLike
A two-point boundary value problem (BVP) to solve.
solver_options : dict
Dictionary of options to pass to the non-linear equation solver.
Return
------
solution: solutions.SolutionLike
An instance of the SolutionLike class representing the solution to
the two-point boundary value problem (BVP)
Notes
-----
"""
raise NotImplementedError
|
davidrpugh/pyCollocation
|
pycollocation/solvers/solvers.py
|
SolverLike._construct_functions
|
python
|
def _construct_functions(self, coefs, **kwargs):
return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]
|
Return a list of functions given a list of coefficients.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L169-L171
| null |
class SolverLike(object):
"""
Class describing the protocol the all SolverLike objects should satisfy.
Notes
-----
Subclasses should implement `solve` method as described below.
"""
@property
def basis_functions(self):
r"""
Functions used to approximate the solution to a boundary value problem.
:getter: Return the current basis functions.
:type: `basis_functions.BasisFunctions`
"""
return self._basis_functions
@staticmethod
def _array_to_list(coefs_array, indices_or_sections, axis=0):
"""Split an array into a list of arrays."""
return np.split(coefs_array, indices_or_sections, axis)
@staticmethod
def _evaluate_functions(funcs, points):
"""Evaluate a list of functions at some points."""
return [func(points) for func in funcs]
@classmethod
def _evaluate_rhs(cls, funcs, nodes, problem):
"""
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
"""
evald_funcs = cls._evaluate_functions(funcs, nodes)
evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params)
return evald_rhs
@classmethod
def _lower_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_lower(ts, *evald_funcs, **problem.params)
@classmethod
def _upper_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_upper(ts, *evald_funcs, **problem.params)
@classmethod
def _compute_boundary_residuals(cls, boundary_points, funcs, problem):
boundary_residuals = []
if problem.bcs_lower is not None:
residual = cls._lower_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[0]))
if problem.bcs_upper is not None:
residual = cls._upper_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[1]))
return boundary_residuals
@classmethod
def _compute_interior_residuals(cls, derivs, funcs, nodes, problem):
interior_residuals = cls._interior_residuals_factory(derivs, funcs, problem)
residuals = interior_residuals(nodes)
return residuals
@classmethod
def _interior_residuals(cls, derivs, funcs, problem, ts):
evaluated_lhs = cls._evaluate_functions(derivs, ts)
evaluated_rhs = cls._evaluate_rhs(funcs, ts, problem)
return [lhs - rhs for lhs, rhs in zip(evaluated_lhs, evaluated_rhs)]
@classmethod
def _interior_residuals_factory(cls, derivs, funcs, problem):
return functools.partial(cls._interior_residuals, derivs, funcs, problem)
@classmethod
def _lower_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._lower_boundary_residual, funcs, problem)
@classmethod
def _upper_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._upper_boundary_residual, funcs, problem)
def _assess_approximation(self, boundary_points, derivs, funcs, nodes, problem):
"""
Parameters
----------
basis_derivs : list(function)
basis_funcs : list(function)
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
interior_residuals = self._compute_interior_residuals(derivs, funcs,
nodes, problem)
boundary_residuals = self._compute_boundary_residuals(boundary_points,
funcs, problem)
return np.hstack(interior_residuals + boundary_residuals)
def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem):
"""
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
coefs_list = self._array_to_list(coefs_array, problem.number_odes)
derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list)
resids = self._assess_approximation(boundary_points, derivs, funcs,
nodes, problem)
return resids
def _construct_approximation(self, basis_kwargs, coefs_list):
"""
Construct a collection of derivatives and functions that approximate
the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str: )
coefs_list : list(numpy.ndarray)
Returns
-------
basis_derivs : list(function)
basis_funcs : list(function)
"""
derivs = self._construct_derivatives(coefs_list, **basis_kwargs)
funcs = self._construct_functions(coefs_list, **basis_kwargs)
return derivs, funcs
def _construct_derivatives(self, coefs, **kwargs):
"""Return a list of derivatives given a list of coefficients."""
return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result):
"""
Construct a representation of the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str : )
coefs_array : numpy.ndarray
problem : TwoPointBVPLike
result : OptimizeResult
Returns
-------
solution : SolutionLike
"""
soln_coefs = self._array_to_list(coefs_array, problem.number_odes)
soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs)
soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs)
soln_residual_func = self._interior_residuals_factory(soln_derivs,
soln_funcs,
problem)
solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem,
soln_residual_func, result)
return solution
def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem,
**solver_options):
"""
Solve a boundary value problem using the collocation method.
Parameters
----------
basis_kwargs : dict
Dictionary of keyword arguments used to build basis functions.
coefs_array : numpy.ndarray
Array of coefficients for basis functions defining the initial
condition.
problem : bvp.TwoPointBVPLike
A two-point boundary value problem (BVP) to solve.
solver_options : dict
Dictionary of options to pass to the non-linear equation solver.
Return
------
solution: solutions.SolutionLike
An instance of the SolutionLike class representing the solution to
the two-point boundary value problem (BVP)
Notes
-----
"""
raise NotImplementedError
|
davidrpugh/pyCollocation
|
pycollocation/solvers/solvers.py
|
SolverLike._solution_factory
|
python
|
def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result):
soln_coefs = self._array_to_list(coefs_array, problem.number_odes)
soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs)
soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs)
soln_residual_func = self._interior_residuals_factory(soln_derivs,
soln_funcs,
problem)
solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem,
soln_residual_func, result)
return solution
|
Construct a representation of the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str : )
coefs_array : numpy.ndarray
problem : TwoPointBVPLike
result : OptimizeResult
Returns
-------
solution : SolutionLike
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L173-L197
|
[
"def _array_to_list(coefs_array, indices_or_sections, axis=0):\n \"\"\"Split an array into a list of arrays.\"\"\"\n return np.split(coefs_array, indices_or_sections, axis)\n",
"def _interior_residuals_factory(cls, derivs, funcs, problem):\n return functools.partial(cls._interior_residuals, derivs, funcs, problem)\n",
"def _construct_derivatives(self, coefs, **kwargs):\n \"\"\"Return a list of derivatives given a list of coefficients.\"\"\"\n return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]\n",
"def _construct_functions(self, coefs, **kwargs):\n \"\"\"Return a list of functions given a list of coefficients.\"\"\"\n return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]\n"
] |
class SolverLike(object):
"""
Class describing the protocol the all SolverLike objects should satisfy.
Notes
-----
Subclasses should implement `solve` method as described below.
"""
@property
def basis_functions(self):
r"""
Functions used to approximate the solution to a boundary value problem.
:getter: Return the current basis functions.
:type: `basis_functions.BasisFunctions`
"""
return self._basis_functions
@staticmethod
def _array_to_list(coefs_array, indices_or_sections, axis=0):
"""Split an array into a list of arrays."""
return np.split(coefs_array, indices_or_sections, axis)
@staticmethod
def _evaluate_functions(funcs, points):
"""Evaluate a list of functions at some points."""
return [func(points) for func in funcs]
@classmethod
def _evaluate_rhs(cls, funcs, nodes, problem):
"""
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
"""
evald_funcs = cls._evaluate_functions(funcs, nodes)
evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params)
return evald_rhs
@classmethod
def _lower_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_lower(ts, *evald_funcs, **problem.params)
@classmethod
def _upper_boundary_residual(cls, funcs, problem, ts):
evald_funcs = cls._evaluate_functions(funcs, ts)
return problem.bcs_upper(ts, *evald_funcs, **problem.params)
@classmethod
def _compute_boundary_residuals(cls, boundary_points, funcs, problem):
boundary_residuals = []
if problem.bcs_lower is not None:
residual = cls._lower_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[0]))
if problem.bcs_upper is not None:
residual = cls._upper_boundary_residual_factory(funcs, problem)
boundary_residuals.append(residual(boundary_points[1]))
return boundary_residuals
@classmethod
def _compute_interior_residuals(cls, derivs, funcs, nodes, problem):
interior_residuals = cls._interior_residuals_factory(derivs, funcs, problem)
residuals = interior_residuals(nodes)
return residuals
@classmethod
def _interior_residuals(cls, derivs, funcs, problem, ts):
evaluated_lhs = cls._evaluate_functions(derivs, ts)
evaluated_rhs = cls._evaluate_rhs(funcs, ts, problem)
return [lhs - rhs for lhs, rhs in zip(evaluated_lhs, evaluated_rhs)]
@classmethod
def _interior_residuals_factory(cls, derivs, funcs, problem):
return functools.partial(cls._interior_residuals, derivs, funcs, problem)
@classmethod
def _lower_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._lower_boundary_residual, funcs, problem)
@classmethod
def _upper_boundary_residual_factory(cls, funcs, problem):
return functools.partial(cls._upper_boundary_residual, funcs, problem)
def _assess_approximation(self, boundary_points, derivs, funcs, nodes, problem):
"""
Parameters
----------
basis_derivs : list(function)
basis_funcs : list(function)
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
interior_residuals = self._compute_interior_residuals(derivs, funcs,
nodes, problem)
boundary_residuals = self._compute_boundary_residuals(boundary_points,
funcs, problem)
return np.hstack(interior_residuals + boundary_residuals)
def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem):
"""
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
coefs_list = self._array_to_list(coefs_array, problem.number_odes)
derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list)
resids = self._assess_approximation(boundary_points, derivs, funcs,
nodes, problem)
return resids
def _construct_approximation(self, basis_kwargs, coefs_list):
"""
Construct a collection of derivatives and functions that approximate
the solution to the boundary value problem.
Parameters
----------
basis_kwargs : dict(str: )
coefs_list : list(numpy.ndarray)
Returns
-------
basis_derivs : list(function)
basis_funcs : list(function)
"""
derivs = self._construct_derivatives(coefs_list, **basis_kwargs)
funcs = self._construct_functions(coefs_list, **basis_kwargs)
return derivs, funcs
def _construct_derivatives(self, coefs, **kwargs):
"""Return a list of derivatives given a list of coefficients."""
return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
def _construct_functions(self, coefs, **kwargs):
"""Return a list of functions given a list of coefficients."""
return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]
def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem,
**solver_options):
"""
Solve a boundary value problem using the collocation method.
Parameters
----------
basis_kwargs : dict
Dictionary of keyword arguments used to build basis functions.
coefs_array : numpy.ndarray
Array of coefficients for basis functions defining the initial
condition.
problem : bvp.TwoPointBVPLike
A two-point boundary value problem (BVP) to solve.
solver_options : dict
Dictionary of options to pass to the non-linear equation solver.
Return
------
solution: solutions.SolutionLike
An instance of the SolutionLike class representing the solution to
the two-point boundary value problem (BVP)
Notes
-----
"""
raise NotImplementedError
|
davidrpugh/pyCollocation
|
pycollocation/solvers/solvers.py
|
Solver.solve
|
python
|
def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem,
**solver_options):
result = optimize.root(self._compute_residuals,
x0=coefs_array,
args=(basis_kwargs, boundary_points, nodes, problem),
**solver_options)
solution = self._solution_factory(basis_kwargs, result.x, nodes,
problem, result)
return solution
|
Solve a boundary value problem using the collocation method.
Parameters
----------
basis_kwargs : dict
Dictionary of keyword arguments used to build basis functions.
coefs_array : numpy.ndarray
Array of coefficients for basis functions defining the initial
condition.
problem : bvp.TwoPointBVPLike
A two-point boundary value problem (BVP) to solve.
solver_options : dict
Dictionary of options to pass to the non-linear equation solver.
Return
------
solution: solutions.SolutionLike
An instance of the SolutionLike class representing the solution to
the two-point boundary value problem (BVP)
Notes
-----
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L234-L267
|
[
"def _solution_factory(self, basis_kwargs, coefs_array, nodes, problem, result):\n \"\"\"\n Construct a representation of the solution to the boundary value problem.\n\n Parameters\n ----------\n basis_kwargs : dict(str : )\n coefs_array : numpy.ndarray\n problem : TwoPointBVPLike\n result : OptimizeResult\n\n Returns\n -------\n solution : SolutionLike\n\n \"\"\"\n soln_coefs = self._array_to_list(coefs_array, problem.number_odes)\n soln_derivs = self._construct_derivatives(soln_coefs, **basis_kwargs)\n soln_funcs = self._construct_functions(soln_coefs, **basis_kwargs)\n soln_residual_func = self._interior_residuals_factory(soln_derivs,\n soln_funcs,\n problem)\n solution = solutions.Solution(basis_kwargs, soln_funcs, nodes, problem,\n soln_residual_func, result)\n return solution\n"
] |
class Solver(SolverLike):
def __init__(self, basis_functions):
self._basis_functions = basis_functions
|
davidrpugh/pyCollocation
|
pycollocation/solvers/solutions.py
|
Solution.normalize_residuals
|
python
|
def normalize_residuals(self, points):
residuals = self.evaluate_residual(points)
solutions = self.evaluate_solution(points)
return [resid / soln for resid, soln in zip(residuals, solutions)]
|
Normalize residuals by the level of the variable.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solutions.py#L66-L70
|
[
"def evaluate_residual(self, points):\n return self.residual_function(points)\n",
"def evaluate_solution(self, points):\n return [f(points) for f in self.functions]\n"
] |
class Solution(SolutionLike):
"""Class representing the solution to a Boundary Value Problem (BVP)."""
def __init__(self, basis_kwargs, functions, nodes, problem, residual_function, result):
"""
Initialize an instance of the Solution class.
Parameters
----------
basis_kwargs : dict
functions : list
nodes : numpy.ndarray
problem : TwoPointBVPLike
residual_function : callable
result : OptimizeResult
"""
self._basis_kwargs = basis_kwargs
self._functions = functions
self._nodes = nodes
self._problem = problem
self._residual_function = residual_function
self._result = result
def evaluate_residual(self, points):
return self.residual_function(points)
def evaluate_solution(self, points):
return [f(points) for f in self.functions]
|
davidrpugh/pyCollocation
|
pycollocation/basis_functions/polynomials.py
|
PolynomialBasis._basis_polynomial_factory
|
python
|
def _basis_polynomial_factory(cls, kind):
valid_kind = cls._validate(kind)
basis_polynomial = getattr(np.polynomial, valid_kind)
return basis_polynomial
|
Return a polynomial given some coefficients.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/basis_functions/polynomials.py#L23-L27
| null |
class PolynomialBasis(basis_functions.BasisFunctionLike):
_valid_kinds = ['Polynomial', 'Chebyshev', 'Legendre', 'Laguerre', 'Hermite']
@staticmethod
def _basis_monomial_coefs(degree):
"""Return coefficients for a monomial of a given degree."""
return np.append(np.zeros(degree), 1)
@classmethod
@classmethod
def _validate(cls, kind):
"""Validate the kind argument."""
if kind not in cls._valid_kinds:
mesg = "'kind' must be one of {}, {}, {}, or {}."
raise ValueError(mesg.format(*cls._valid_kinds))
else:
return kind
@classmethod
def derivatives_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a the derivative of a certain kind of
orthogonal polynomial defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain).deriv()
@classmethod
def fit(cls, ts, xs, degree, domain, kind):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial.fit(ts, xs, degree, domain)
@classmethod
def functions_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a certain kind of orthogonal polynomial
defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain)
@classmethod
def roots(cls, degree, domain, kind):
"""Return optimal collocation nodes for some orthogonal polynomial."""
basis_coefs = cls._basis_monomial_coefs(degree)
basis_poly = cls.functions_factory(basis_coefs, domain, kind)
return basis_poly.roots()
|
davidrpugh/pyCollocation
|
pycollocation/basis_functions/polynomials.py
|
PolynomialBasis._validate
|
python
|
def _validate(cls, kind):
if kind not in cls._valid_kinds:
mesg = "'kind' must be one of {}, {}, {}, or {}."
raise ValueError(mesg.format(*cls._valid_kinds))
else:
return kind
|
Validate the kind argument.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/basis_functions/polynomials.py#L30-L36
| null |
class PolynomialBasis(basis_functions.BasisFunctionLike):
_valid_kinds = ['Polynomial', 'Chebyshev', 'Legendre', 'Laguerre', 'Hermite']
@staticmethod
def _basis_monomial_coefs(degree):
"""Return coefficients for a monomial of a given degree."""
return np.append(np.zeros(degree), 1)
@classmethod
def _basis_polynomial_factory(cls, kind):
"""Return a polynomial given some coefficients."""
valid_kind = cls._validate(kind)
basis_polynomial = getattr(np.polynomial, valid_kind)
return basis_polynomial
@classmethod
@classmethod
def derivatives_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a the derivative of a certain kind of
orthogonal polynomial defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain).deriv()
@classmethod
def fit(cls, ts, xs, degree, domain, kind):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial.fit(ts, xs, degree, domain)
@classmethod
def functions_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a certain kind of orthogonal polynomial
defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain)
@classmethod
def roots(cls, degree, domain, kind):
"""Return optimal collocation nodes for some orthogonal polynomial."""
basis_coefs = cls._basis_monomial_coefs(degree)
basis_poly = cls.functions_factory(basis_coefs, domain, kind)
return basis_poly.roots()
|
davidrpugh/pyCollocation
|
pycollocation/basis_functions/polynomials.py
|
PolynomialBasis.derivatives_factory
|
python
|
def derivatives_factory(cls, coef, domain, kind, **kwargs):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain).deriv()
|
Given some coefficients, return a the derivative of a certain kind of
orthogonal polynomial defined over a specific domain.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/basis_functions/polynomials.py#L39-L46
| null |
class PolynomialBasis(basis_functions.BasisFunctionLike):
_valid_kinds = ['Polynomial', 'Chebyshev', 'Legendre', 'Laguerre', 'Hermite']
@staticmethod
def _basis_monomial_coefs(degree):
"""Return coefficients for a monomial of a given degree."""
return np.append(np.zeros(degree), 1)
@classmethod
def _basis_polynomial_factory(cls, kind):
"""Return a polynomial given some coefficients."""
valid_kind = cls._validate(kind)
basis_polynomial = getattr(np.polynomial, valid_kind)
return basis_polynomial
@classmethod
def _validate(cls, kind):
"""Validate the kind argument."""
if kind not in cls._valid_kinds:
mesg = "'kind' must be one of {}, {}, {}, or {}."
raise ValueError(mesg.format(*cls._valid_kinds))
else:
return kind
@classmethod
@classmethod
def fit(cls, ts, xs, degree, domain, kind):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial.fit(ts, xs, degree, domain)
@classmethod
def functions_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a certain kind of orthogonal polynomial
defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain)
@classmethod
def roots(cls, degree, domain, kind):
"""Return optimal collocation nodes for some orthogonal polynomial."""
basis_coefs = cls._basis_monomial_coefs(degree)
basis_poly = cls.functions_factory(basis_coefs, domain, kind)
return basis_poly.roots()
|
davidrpugh/pyCollocation
|
pycollocation/basis_functions/polynomials.py
|
PolynomialBasis.functions_factory
|
python
|
def functions_factory(cls, coef, domain, kind, **kwargs):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain)
|
Given some coefficients, return a certain kind of orthogonal polynomial
defined over a specific domain.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/basis_functions/polynomials.py#L54-L61
| null |
class PolynomialBasis(basis_functions.BasisFunctionLike):
_valid_kinds = ['Polynomial', 'Chebyshev', 'Legendre', 'Laguerre', 'Hermite']
@staticmethod
def _basis_monomial_coefs(degree):
"""Return coefficients for a monomial of a given degree."""
return np.append(np.zeros(degree), 1)
@classmethod
def _basis_polynomial_factory(cls, kind):
"""Return a polynomial given some coefficients."""
valid_kind = cls._validate(kind)
basis_polynomial = getattr(np.polynomial, valid_kind)
return basis_polynomial
@classmethod
def _validate(cls, kind):
"""Validate the kind argument."""
if kind not in cls._valid_kinds:
mesg = "'kind' must be one of {}, {}, {}, or {}."
raise ValueError(mesg.format(*cls._valid_kinds))
else:
return kind
@classmethod
def derivatives_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a the derivative of a certain kind of
orthogonal polynomial defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain).deriv()
@classmethod
def fit(cls, ts, xs, degree, domain, kind):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial.fit(ts, xs, degree, domain)
@classmethod
@classmethod
def roots(cls, degree, domain, kind):
"""Return optimal collocation nodes for some orthogonal polynomial."""
basis_coefs = cls._basis_monomial_coefs(degree)
basis_poly = cls.functions_factory(basis_coefs, domain, kind)
return basis_poly.roots()
|
davidrpugh/pyCollocation
|
pycollocation/basis_functions/polynomials.py
|
PolynomialBasis.roots
|
python
|
def roots(cls, degree, domain, kind):
basis_coefs = cls._basis_monomial_coefs(degree)
basis_poly = cls.functions_factory(basis_coefs, domain, kind)
return basis_poly.roots()
|
Return optimal collocation nodes for some orthogonal polynomial.
|
train
|
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/basis_functions/polynomials.py#L64-L68
| null |
class PolynomialBasis(basis_functions.BasisFunctionLike):
_valid_kinds = ['Polynomial', 'Chebyshev', 'Legendre', 'Laguerre', 'Hermite']
@staticmethod
def _basis_monomial_coefs(degree):
"""Return coefficients for a monomial of a given degree."""
return np.append(np.zeros(degree), 1)
@classmethod
def _basis_polynomial_factory(cls, kind):
"""Return a polynomial given some coefficients."""
valid_kind = cls._validate(kind)
basis_polynomial = getattr(np.polynomial, valid_kind)
return basis_polynomial
@classmethod
def _validate(cls, kind):
"""Validate the kind argument."""
if kind not in cls._valid_kinds:
mesg = "'kind' must be one of {}, {}, {}, or {}."
raise ValueError(mesg.format(*cls._valid_kinds))
else:
return kind
@classmethod
def derivatives_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a the derivative of a certain kind of
orthogonal polynomial defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain).deriv()
@classmethod
def fit(cls, ts, xs, degree, domain, kind):
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial.fit(ts, xs, degree, domain)
@classmethod
def functions_factory(cls, coef, domain, kind, **kwargs):
"""
Given some coefficients, return a certain kind of orthogonal polynomial
defined over a specific domain.
"""
basis_polynomial = cls._basis_polynomial_factory(kind)
return basis_polynomial(coef, domain)
@classmethod
|
brunobord/meuhdb
|
meuhdb/core.py
|
autocommit
|
python
|
def autocommit(f):
"A decorator to commit to the storage if autocommit is set to True."
@wraps(f)
def wrapper(self, *args, **kwargs):
result = f(self, *args, **kwargs)
if self._meta.commit_ready():
self.commit()
return result
return wrapper
|
A decorator to commit to the storage if autocommit is set to True.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L18-L26
| null |
#-*- coding: utf-8 -*-
"""
MeuhDB, a database that says "meuh".
"""
from __future__ import unicode_literals
from copy import deepcopy
from functools import wraps
import os
from uuid import uuid4
import warnings
import six
from .backends import DEFAULT_BACKEND, BACKENDS
from .exceptions import BadValueError
def intersect(d1, d2):
"""Intersect dictionaries d1 and d2 by key *and* value."""
return dict((k, d1[k]) for k in d1 if k in d2 and d1[k] == d2[k])
class Meta(object):
"""Meta-information, not directly related to the database itself, but
the way it's being accessed.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
self.path = path
self.lazy_indexes = lazy_indexes
# Commits / Autocommit
self.autocommit = autocommit
self.autocommit_after = autocommit_after
# Counter
self.uses_counter = False
if self.autocommit_after is not None:
self.uses_counter = True
self.init_counter = int(self.autocommit_after)
self.counter = self.init_counter
if backend not in BACKENDS:
warnings.warn('{} backend not available, falling '
'back to standard json'.format(backend))
backend = "json"
self.backend = backend
@property
def serializer(self):
return BACKENDS[self.backend]['dumper']
@property
def deserializer(self):
return BACKENDS[self.backend]['loader']
def commit_ready(self):
if self.autocommit:
return True
if self.uses_counter:
self.counter -= 1
if self.counter:
return False
else:
# counter == 0 -> reset and commit
self.counter = self.init_counter
return True
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
intersect
|
python
|
def intersect(d1, d2):
return dict((k, d1[k]) for k in d1 if k in d2 and d1[k] == d2[k])
|
Intersect dictionaries d1 and d2 by key *and* value.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L29-L31
| null |
#-*- coding: utf-8 -*-
"""
MeuhDB, a database that says "meuh".
"""
from __future__ import unicode_literals
from copy import deepcopy
from functools import wraps
import os
from uuid import uuid4
import warnings
import six
from .backends import DEFAULT_BACKEND, BACKENDS
from .exceptions import BadValueError
def autocommit(f):
"A decorator to commit to the storage if autocommit is set to True."
@wraps(f)
def wrapper(self, *args, **kwargs):
result = f(self, *args, **kwargs)
if self._meta.commit_ready():
self.commit()
return result
return wrapper
class Meta(object):
"""Meta-information, not directly related to the database itself, but
the way it's being accessed.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
self.path = path
self.lazy_indexes = lazy_indexes
# Commits / Autocommit
self.autocommit = autocommit
self.autocommit_after = autocommit_after
# Counter
self.uses_counter = False
if self.autocommit_after is not None:
self.uses_counter = True
self.init_counter = int(self.autocommit_after)
self.counter = self.init_counter
if backend not in BACKENDS:
warnings.warn('{} backend not available, falling '
'back to standard json'.format(backend))
backend = "json"
self.backend = backend
@property
def serializer(self):
return BACKENDS[self.backend]['dumper']
@property
def deserializer(self):
return BACKENDS[self.backend]['loader']
def commit_ready(self):
if self.autocommit:
return True
if self.uses_counter:
self.counter -= 1
if self.counter:
return False
else:
# counter == 0 -> reset and commit
self.counter = self.init_counter
return True
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.set
|
python
|
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
|
Set value to the key store.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L157-L168
|
[
"def delete_from_index(self, key):\n \"Delete references from the index of the key old value(s).\"\n old_value = self.data[key]\n keys = set(old_value.keys()).intersection(self.indexes.keys())\n for index_name in keys:\n if old_value[index_name] in self.indexes[index_name]:\n del self.indexes[index_name][old_value[index_name]]\n",
"def update_index(self, key, value):\n \"Update the index with the new key/values.\"\n for k, v in value.items():\n if k in self.indexes:\n # A non-string index value switches it into a lazy one.\n if not isinstance(v, six.string_types):\n self.index_defs[k]['type'] = 'lazy'\n if v not in self.indexes[k]:\n self.indexes[k][v] = set([])\n self.indexes[k][v].add(key)\n"
] |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.insert
|
python
|
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
|
Insert value in the keystore. Return the UUID key.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L171-L175
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.delete
|
python
|
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
|
Delete a `key` from the keystore.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L178-L182
|
[
"def delete_from_index(self, key):\n \"Delete references from the index of the key old value(s).\"\n old_value = self.data[key]\n keys = set(old_value.keys()).intersection(self.indexes.keys())\n for index_name in keys:\n if old_value[index_name] in self.indexes[index_name]:\n del self.indexes[index_name][old_value[index_name]]\n"
] |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.update
|
python
|
def update(self, key, value):
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
|
Update a `key` in the keystore.
If the key is non-existent, it's being created
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L185-L198
|
[
"def get(self, key):\n \"\"\"\n Return value of 'key' if it's in the database.\n Raise KeyError if not.\n \"\"\"\n return self.data[key]\n"
] |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.del_key
|
python
|
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
|
Delete the `key_to_delete` for the record found with `key`.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L208-L213
|
[
"def get(self, key):\n \"\"\"\n Return value of 'key' if it's in the database.\n Raise KeyError if not.\n \"\"\"\n return self.data[key]\n"
] |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.commit
|
python
|
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
|
Commit data to the storage.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L215-L236
|
[
"def serialize(self, obj):\n return self._meta.serializer(obj)\n"
] |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.keys_to_values
|
python
|
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
|
Return the items in the keystore with keys in `keys`.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L242-L244
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.filter_keys
|
python
|
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
|
Return a set of keys filtered according to the given arguments.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L246-L261
|
[
"def simple_filter(self, key, value):\n \"Search keys whose values match with the searched values\"\n searched = {key: value}\n return set([k for k, v in self.data.items() if\n intersect(searched, v) == searched])\n"
] |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.simple_filter
|
python
|
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
|
Search keys whose values match with the searched values
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L263-L267
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.filter
|
python
|
def filter(self, **kwargs):
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
|
Filter data according to the given arguments.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L269-L274
|
[
"def keys_to_values(self, keys):\n \"Return the items in the keystore with keys in `keys`.\"\n return dict((k, v) for k, v in self.data.items() if k in keys)\n",
"def filter_keys(self, **kwargs):\n \"Return a set of keys filtered according to the given arguments.\"\n self._used_index = False\n keys = set(self.data.keys())\n for key_filter, v_filter in kwargs.items():\n if key_filter in self.indexes:\n self._used_index = True\n if v_filter not in self.indexes[key_filter]:\n keys = set([])\n else:\n keys = keys.intersection(\n self.indexes[key_filter][v_filter])\n else:\n keys = keys.intersection(\n self.simple_filter(key_filter, v_filter))\n return keys\n"
] |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.delete_from_index
|
python
|
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
|
Delete references from the index of the key old value(s).
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L276-L282
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.update_index
|
python
|
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
|
Update the index with the new key/values.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L284-L293
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.create_index
|
python
|
def create_index(self, name, recreate=False, _type='default'):
if name not in self.indexes or recreate:
self.build_index(name, _type)
|
Create an index.
If recreate is True, recreate even if already there.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L296-L302
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb.build_index
|
python
|
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
|
Build the index related to the `name`.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L305-L322
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
brunobord/meuhdb
|
meuhdb/core.py
|
MeuhDb._clean_index
|
python
|
def _clean_index(self):
"Clean index values after loading."
for idx_name, idx_def in self.index_defs.items():
if idx_def['type'] == 'lazy':
self.build_index(idx_name)
for index_name, values in self.indexes.items():
for value in values:
if not isinstance(values[value], set):
values[value] = set(values[value])
|
Clean index values after loading.
|
train
|
https://github.com/brunobord/meuhdb/blob/2ef2ea0b1065768d88f52bacf1b94b3d3ce3d9eb/meuhdb/core.py#L330-L338
| null |
class MeuhDb(object):
"""
MeuhDb is a key / JSON value store.
"""
def __init__(self,
path=None, autocommit=False, autocommit_after=None,
lazy_indexes=False,
backend=DEFAULT_BACKEND):
"""
Options:
* ``path``: Path to the DB filename,
* ``autocommit``: If set to True, will save data to the DB at every
'write' operation,
* ``autocommit_after``: A numeric value. If set, the database will be
committed every "n" write operations,
* ``lazy_indexes``: When set to True, when the DB is written to the
database, only the definition of the indexes is stored, not the index
values themselves. This means the DB is faster at writing times, but
will load slower, because we'll need to rebuild all indexes,
* ``backend``: Set which backend to use. Will default to the fastest
backend available, or the stdlib ``json`` module.
"""
self._meta = Meta(
path,
autocommit=autocommit, autocommit_after=autocommit_after,
lazy_indexes=lazy_indexes,
backend=backend)
self.raw = {}
self.raw['indexes'] = {}
self.raw['data'] = {}
self.raw['index_defs'] = {}
if path:
if os.path.exists(path):
try:
data = self.deserialize(open(path).read())
self.raw.update(data)
except ValueError:
pass
self._clean_index()
def serialize(self, obj):
return self._meta.serializer(obj)
def deserialize(self, obj):
return self._meta.deserializer(obj)
@property
def data(self):
"Return raw data."
return self.raw['data']
@property
def indexes(self):
"Return index data"
return self.raw['indexes']
@property
def index_defs(self):
"Return the index definitions"
return self.raw['index_defs']
def exists(self, key):
"Return True if key is in the keystore."
return key in self.data
def get(self, key):
"""
Return value of 'key' if it's in the database.
Raise KeyError if not.
"""
return self.data[key]
@autocommit
def set(self, key, value):
"Set value to the key store."
# if key already in data, update indexes
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
_value = deepcopy(value)
if key in self.data:
self.delete_from_index(key)
self.data[key] = _value
self.update_index(key, _value)
@autocommit
def insert(self, value):
"Insert value in the keystore. Return the UUID key."
key = str(uuid4())
self.set(key, value)
return key
@autocommit
def delete(self, key):
"Delete a `key` from the keystore."
if key in self.data:
self.delete_from_index(key)
del self.data[key]
@autocommit
def update(self, key, value):
"""Update a `key` in the keystore.
If the key is non-existent, it's being created
"""
if not isinstance(value, dict):
raise BadValueError(
'The value {} is incorrect.'
' Values should be strings'.format(value))
if key in self.data:
v = self.get(key)
v.update(value)
else:
v = value
self.set(key, v)
@property
def lazy_indexes(self):
return set([
idx_name for (idx_name, value) in self.raw['index_defs'].items()
if value['type'] == 'lazy'
])
@autocommit
def del_key(self, key, key_to_delete):
"Delete the `key_to_delete` for the record found with `key`."
v = self.get(key)
if key_to_delete in v:
del v[key_to_delete]
self.set(key, v)
def commit(self):
"Commit data to the storage."
if self._meta.path:
with open(self._meta.path, 'wb') as fd:
raw = deepcopy(self.raw)
# LAZY INDEX PROCESSING
# Save indexes only if not lazy
lazy_indexes = self.lazy_indexes # Keep this list safe
if not self._meta.lazy_indexes:
# Remove indexes if needed
for idx_name in lazy_indexes:
del raw['indexes'][idx_name]
for index_name, values in raw['indexes'].items():
for value, keys in values.items():
raw['indexes'][index_name][value] = list(keys)
# don't store indexes if not needed
if not raw['indexes'] or self._meta.lazy_indexes:
del raw['indexes']
try:
fd.write(six.u(self.serialize(raw)))
except TypeError:
fd.write(six.b(self.serialize(raw)))
def all(self):
"Retrieve the data from the keystore"
return self.data
def keys_to_values(self, keys):
"Return the items in the keystore with keys in `keys`."
return dict((k, v) for k, v in self.data.items() if k in keys)
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched])
def filter(self, **kwargs):
"""
Filter data according to the given arguments.
"""
keys = self.filter_keys(**kwargs)
return self.keys_to_values(keys)
def delete_from_index(self, key):
"Delete references from the index of the key old value(s)."
old_value = self.data[key]
keys = set(old_value.keys()).intersection(self.indexes.keys())
for index_name in keys:
if old_value[index_name] in self.indexes[index_name]:
del self.indexes[index_name][old_value[index_name]]
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key)
@autocommit
def create_index(self, name, recreate=False, _type='default'):
"""
Create an index.
If recreate is True, recreate even if already there.
"""
if name not in self.indexes or recreate:
self.build_index(name, _type)
@autocommit
def build_index(self, idx_name, _type='default'):
"Build the index related to the `name`."
indexes = {}
has_non_string_values = False
for key, item in self.data.items():
if idx_name in item:
value = item[idx_name]
# A non-string index value switches it into a lazy one.
if not isinstance(value, six.string_types):
has_non_string_values = True
if value not in indexes:
indexes[value] = set([])
indexes[value].add(key)
self.indexes[idx_name] = indexes
if self._meta.lazy_indexes or has_non_string_values:
# Every index is lazy
_type = 'lazy'
self.index_defs[idx_name] = {'type': _type}
@autocommit
def remove_index(self, idx_name):
"Remove an index from the database."
if idx_name in self.indexes:
del self.indexes[idx_name]
|
pasztorpisti/json-cfg
|
src/jsoncfg/tree_python.py
|
default_number_converter
|
python
|
def default_number_converter(number_str):
is_int = (number_str.startswith('-') and number_str[1:].isdigit()) or number_str.isdigit()
# FIXME: this handles a wider range of numbers than allowed by the json standard,
# etc.: float('nan') and float('inf'). But is this a problem?
return int(number_str) if is_int else float(number_str)
|
Converts the string representation of a json number into its python object equivalent, an
int, long, float or whatever type suits.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/tree_python.py#L61-L69
| null |
"""
Contains factories to be used with the ObjectBuilderParserListener in order to build
a json tree that consists of pure python objects. With these factories we can load
json string into python object hierarchies just like the python standard json.loads()
but our parser allows an extended syntax (unquoted keys, comments, trailing commas)
and this parser have other extras, for example you can provide your own dictionary
and list objects.
"""
from collections import OrderedDict
from .parser_listener import ObjectBuilderParams
class DefaultObjectCreator(object):
"""
A factory that creates json objects (dict like objects) when parsing the json
string into a python object hierarchy.
"""
def __init__(self, dict_class=OrderedDict):
"""
:param dict_class: A class that will be instantiated in order to be used as
a json object in the python object hierarchy. The instances of this class must have
at least a __setitem__ and a __contains__.
"""
self.dict_class = dict_class
def __call__(self, listener):
"""
:param listener: The parser listener that builds the python object hierarchy.
As an example: the config parser uses listener.parser.line and listener.parser.column
to get the line/column info for each node of the python object hierarchy.
"""
obj = self.dict_class()
def insert_function(key, value):
obj[key] = value
return obj, insert_function
class DefaultArrayCreator(object):
"""
A factory that creates json arrays (list like objects) when parsing the json
string into a python object hierarchy.
"""
def __init__(self, list_class=list):
self.list_class = list_class
def __call__(self, listener):
"""
:param listener: The parser listener that builds the python object hierarchy.
As an example: the config parser uses listener.parser.line and listener.parser.column
to get the line/column info for each node of the python object hierarchy.
"""
array = self.list_class()
def append_function(item):
array.append(item)
return array, append_function
class DefaultStringToScalarConverter(object):
"""
A callable that converts the string representation of json scalars into python objects.
The JSONParser works only with quoted and non-quoted strings, it doesn't interpret different
types of json scalars like bool, null, number, string. It is the responsibility of this
callable to convert the strings (emitted by the parser) into their python equivalent.
"""
def __init__(self,
number_converter=default_number_converter,
scalar_const_literals=None):
"""
:param number_converter: This number converter will be called with every non-quoted
string that isn't present in the dictionary passed to the scalar_const_literals parameter.
:param scalar_const_literals: A dictionary that maps non-quoted string representation of
json scalars to any user supplied python objects.
If you don't supply this parameter then the default dictionary
that will be used is {'null': None, 'true': True, 'false': False}. Note that
you can use this parameter to easily define your own "constants" in the json file.
"""
self.number_converter = number_converter
if scalar_const_literals is None:
self.scalar_const_literals = {'null': None, 'true': True, 'false': False}
else:
self.scalar_const_literals = scalar_const_literals
_not_scalar_const_literal = object()
def __call__(self, listener, scalar_str, scalar_str_quoted):
"""
:return: After interpreting the string representation of the parsed scalar (scalar_str, and
scalar_str_quoted) you have to convert it into a python object that will be inserted in the
object hierarchy.
"""
if scalar_str_quoted:
return scalar_str
value = self.scalar_const_literals.get(scalar_str, self._not_scalar_const_literal)
if value is self._not_scalar_const_literal:
try:
value = self.number_converter(scalar_str)
except ValueError:
listener.error('Invalid json scalar: "%s"' % (scalar_str,))
return value
class PythonObjectBuilderParams(ObjectBuilderParams):
default_object_creator = DefaultObjectCreator()
default_array_creator = DefaultArrayCreator()
default_string_to_scalar_converter = DefaultStringToScalarConverter()
|
pasztorpisti/json-cfg
|
src/jsoncfg/text_encoding.py
|
load_utf_text_file
|
python
|
def load_utf_text_file(file_, default_encoding='UTF-8', use_utf8_strings=True):
if isinstance(file_, my_basestring):
with open(file_, 'rb') as f:
buf = f.read()
else:
buf = file_.read()
return decode_utf_text_buffer(buf, default_encoding, use_utf8_strings)
|
Loads the specified text file and tries to decode it using one of the UTF encodings.
:param file_: The path to the loadable text file or a file-like object with a read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
:return: A unicode object. In case of python2 it can optionally be an str object
containing utf-8 encoded text.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/text_encoding.py#L4-L20
|
[
"def decode_utf_text_buffer(buf, default_encoding='UTF-8', use_utf8_strings=True):\n \"\"\"\n :param buf: Binary file contents with optional BOM prefix.\n :param default_encoding: The encoding to be used if the buffer\n doesn't have a BOM prefix.\n :param use_utf8_strings: Used only in case of python2: You can choose utf-8\n str in-memory string representation in case of python. If use_utf8_strings is\n False or you are using python3 then the text buffer is automatically loaded as a\n unicode object.\n :return: A unicode object. In case of python2 it can optionally be an str object\n containing utf-8 encoded text.\n \"\"\"\n buf, encoding = detect_encoding_and_remove_bom(buf, default_encoding)\n if python2 and use_utf8_strings:\n if are_encoding_names_equivalent(encoding, 'UTF-8'):\n return buf\n return buf.decode(encoding).encode('UTF-8')\n return buf.decode(encoding)\n"
] |
from .compatibility import python2, my_basestring
def decode_utf_text_buffer(buf, default_encoding='UTF-8', use_utf8_strings=True):
"""
:param buf: Binary file contents with optional BOM prefix.
:param default_encoding: The encoding to be used if the buffer
doesn't have a BOM prefix.
:param use_utf8_strings: Used only in case of python2: You can choose utf-8
str in-memory string representation in case of python. If use_utf8_strings is
False or you are using python3 then the text buffer is automatically loaded as a
unicode object.
:return: A unicode object. In case of python2 it can optionally be an str object
containing utf-8 encoded text.
"""
buf, encoding = detect_encoding_and_remove_bom(buf, default_encoding)
if python2 and use_utf8_strings:
if are_encoding_names_equivalent(encoding, 'UTF-8'):
return buf
return buf.decode(encoding).encode('UTF-8')
return buf.decode(encoding)
def are_encoding_names_equivalent(encoding0, encoding1):
encoding0 = encoding0.replace('-', '').lower()
encoding1 = encoding1.replace('-', '').lower()
return encoding0 == encoding1
def detect_encoding_and_remove_bom(buf, default_encoding='UTF-8'):
"""
:param buf: Binary file contents with an optional BOM prefix.
:param default_encoding: The encoding to be used if the buffer
doesn't have a BOM prefix.
:return: (buf_without_bom_prefix, encoding)
"""
if not isinstance(buf, bytes):
raise TypeError('buf should be a bytes instance but it is a %s: ' % type(buf).__name__)
for bom, encoding in _byte_order_marks:
if buf.startswith(bom):
return buf[len(bom):], encoding
return buf, default_encoding
_byte_order_marks = (
(b'\xef\xbb\xbf', 'UTF-8'),
# It's important to check UTF-32-LE *before* UTF-16-LE.
(b'\xff\xfe\x00\x00', 'UTF-32-LE'),
(b'\x00\x00\xfe\xff', 'UTF-32-BE'),
(b'\xff\xfe', 'UTF-16-LE'),
(b'\xfe\xff', 'UTF-16-BE'),
)
|
pasztorpisti/json-cfg
|
src/jsoncfg/text_encoding.py
|
decode_utf_text_buffer
|
python
|
def decode_utf_text_buffer(buf, default_encoding='UTF-8', use_utf8_strings=True):
buf, encoding = detect_encoding_and_remove_bom(buf, default_encoding)
if python2 and use_utf8_strings:
if are_encoding_names_equivalent(encoding, 'UTF-8'):
return buf
return buf.decode(encoding).encode('UTF-8')
return buf.decode(encoding)
|
:param buf: Binary file contents with optional BOM prefix.
:param default_encoding: The encoding to be used if the buffer
doesn't have a BOM prefix.
:param use_utf8_strings: Used only in case of python2: You can choose utf-8
str in-memory string representation in case of python. If use_utf8_strings is
False or you are using python3 then the text buffer is automatically loaded as a
unicode object.
:return: A unicode object. In case of python2 it can optionally be an str object
containing utf-8 encoded text.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/text_encoding.py#L23-L40
|
[
"def detect_encoding_and_remove_bom(buf, default_encoding='UTF-8'):\n \"\"\"\n :param buf: Binary file contents with an optional BOM prefix.\n :param default_encoding: The encoding to be used if the buffer\n doesn't have a BOM prefix.\n :return: (buf_without_bom_prefix, encoding)\n \"\"\"\n if not isinstance(buf, bytes):\n raise TypeError('buf should be a bytes instance but it is a %s: ' % type(buf).__name__)\n for bom, encoding in _byte_order_marks:\n if buf.startswith(bom):\n return buf[len(bom):], encoding\n return buf, default_encoding\n"
] |
from .compatibility import python2, my_basestring
def load_utf_text_file(file_, default_encoding='UTF-8', use_utf8_strings=True):
"""
Loads the specified text file and tries to decode it using one of the UTF encodings.
:param file_: The path to the loadable text file or a file-like object with a read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
:return: A unicode object. In case of python2 it can optionally be an str object
containing utf-8 encoded text.
"""
if isinstance(file_, my_basestring):
with open(file_, 'rb') as f:
buf = f.read()
else:
buf = file_.read()
return decode_utf_text_buffer(buf, default_encoding, use_utf8_strings)
def are_encoding_names_equivalent(encoding0, encoding1):
encoding0 = encoding0.replace('-', '').lower()
encoding1 = encoding1.replace('-', '').lower()
return encoding0 == encoding1
def detect_encoding_and_remove_bom(buf, default_encoding='UTF-8'):
"""
:param buf: Binary file contents with an optional BOM prefix.
:param default_encoding: The encoding to be used if the buffer
doesn't have a BOM prefix.
:return: (buf_without_bom_prefix, encoding)
"""
if not isinstance(buf, bytes):
raise TypeError('buf should be a bytes instance but it is a %s: ' % type(buf).__name__)
for bom, encoding in _byte_order_marks:
if buf.startswith(bom):
return buf[len(bom):], encoding
return buf, default_encoding
_byte_order_marks = (
(b'\xef\xbb\xbf', 'UTF-8'),
# It's important to check UTF-32-LE *before* UTF-16-LE.
(b'\xff\xfe\x00\x00', 'UTF-32-LE'),
(b'\x00\x00\xfe\xff', 'UTF-32-BE'),
(b'\xff\xfe', 'UTF-16-LE'),
(b'\xfe\xff', 'UTF-16-BE'),
)
|
pasztorpisti/json-cfg
|
src/jsoncfg/text_encoding.py
|
detect_encoding_and_remove_bom
|
python
|
def detect_encoding_and_remove_bom(buf, default_encoding='UTF-8'):
if not isinstance(buf, bytes):
raise TypeError('buf should be a bytes instance but it is a %s: ' % type(buf).__name__)
for bom, encoding in _byte_order_marks:
if buf.startswith(bom):
return buf[len(bom):], encoding
return buf, default_encoding
|
:param buf: Binary file contents with an optional BOM prefix.
:param default_encoding: The encoding to be used if the buffer
doesn't have a BOM prefix.
:return: (buf_without_bom_prefix, encoding)
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/text_encoding.py#L49-L61
| null |
from .compatibility import python2, my_basestring
def load_utf_text_file(file_, default_encoding='UTF-8', use_utf8_strings=True):
"""
Loads the specified text file and tries to decode it using one of the UTF encodings.
:param file_: The path to the loadable text file or a file-like object with a read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
:return: A unicode object. In case of python2 it can optionally be an str object
containing utf-8 encoded text.
"""
if isinstance(file_, my_basestring):
with open(file_, 'rb') as f:
buf = f.read()
else:
buf = file_.read()
return decode_utf_text_buffer(buf, default_encoding, use_utf8_strings)
def decode_utf_text_buffer(buf, default_encoding='UTF-8', use_utf8_strings=True):
"""
:param buf: Binary file contents with optional BOM prefix.
:param default_encoding: The encoding to be used if the buffer
doesn't have a BOM prefix.
:param use_utf8_strings: Used only in case of python2: You can choose utf-8
str in-memory string representation in case of python. If use_utf8_strings is
False or you are using python3 then the text buffer is automatically loaded as a
unicode object.
:return: A unicode object. In case of python2 it can optionally be an str object
containing utf-8 encoded text.
"""
buf, encoding = detect_encoding_and_remove_bom(buf, default_encoding)
if python2 and use_utf8_strings:
if are_encoding_names_equivalent(encoding, 'UTF-8'):
return buf
return buf.decode(encoding).encode('UTF-8')
return buf.decode(encoding)
def are_encoding_names_equivalent(encoding0, encoding1):
encoding0 = encoding0.replace('-', '').lower()
encoding1 = encoding1.replace('-', '').lower()
return encoding0 == encoding1
_byte_order_marks = (
(b'\xef\xbb\xbf', 'UTF-8'),
# It's important to check UTF-32-LE *before* UTF-16-LE.
(b'\xff\xfe\x00\x00', 'UTF-32-LE'),
(b'\x00\x00\xfe\xff', 'UTF-32-BE'),
(b'\xff\xfe', 'UTF-16-LE'),
(b'\xfe\xff', 'UTF-16-BE'),
)
|
pasztorpisti/json-cfg
|
src/jsoncfg/config_classes.py
|
_process_value_fetcher_call_args
|
python
|
def _process_value_fetcher_call_args(args):
if not args:
return _undefined, ()
if isinstance(args[0], JSONValueMapper):
default = _undefined
mappers = args
else:
default = args[0]
mappers = args[1:]
for mapper in mappers:
if not isinstance(mapper, JSONValueMapper):
raise TypeError('%r isn\'t a JSONValueMapper instance!' % (mapper,))
return default, mappers
|
This function processes the incoming varargs of ValueNotFoundNode.__call__() and
_ConfigNode.__call__().
:param args: A list or tuple containing positional function call arguments. The optional
arguments we expect are the following: An optional default value followed by zero or more
JSONValueMapper instances.
:return: (default_value, list_or_tuple_of_JSONValueMapper_instances)
The default_value is _undefined if it is not present and the second item of the tuple is
an empty tuple/list if there are not JSONValueMapper instances.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/config_classes.py#L110-L135
| null |
import numbers
from collections import OrderedDict, namedtuple
from .compatibility import my_basestring
from .exceptions import JSONConfigException
_undefined = object()
class JSONConfigQueryError(JSONConfigException):
"""
The base class of every exceptions thrown by this library during config queries.
"""
def __init__(self, config_node, message):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
"""
self.config_node = config_node
self.line, self.column = node_location(config_node)
message += ' [line=%s;col=%s]' % (self.line, self.column)
super(JSONConfigQueryError, self).__init__(message)
class JSONConfigValueMapperError(JSONConfigQueryError):
"""
This is raised when someone fetches a value by specifying the "mapper" parameter
and the mapper function raises an exception. That exception is converted into this one.
"""
def __init__(self, config_node, mapper_exception):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
:param mapper_exception: The exception instance that was raised during conversion.
It can be anything...
"""
super(JSONConfigValueMapperError, self).__init__(config_node,
'Error converting json value: ' +
str(mapper_exception))
self.mapper_exception = mapper_exception
class JSONConfigValueNotFoundError(JSONConfigQueryError):
"""
Raised when the user tries to fetch a value that doesn't exist in the config.
"""
def __init__(self, value_not_found):
"""
:param value_not_found: A ValueNotFoundNode instance. Let's say that you query the
config.servers[1].ip_address() value from the config but the config.servers array
has only one item. In this case a JSONConfigValueNotFoundError is raised and
value_not_found._parent_config_node is set to config.servers (that is the last existing
component from our query path) and self.relative_path will be '[1].ip_address'.
This way the error location points to the config.servers node and the error message
says that you wanted to query it with the '[1].ip_address' relative_path that doesn't
exist.
:type value_not_found: ValueNotFoundNode
"""
self.value_not_found = value_not_found
path = []
for component in value_not_found._missing_query_path:
if isinstance(component, numbers.Integral):
path.append('[%s]' % component)
else:
path.append('.' + component)
self.relative_path = ''.join(path)
# TODO: improve the error message: it is possible to do so based on the info we have
message = 'Required config node not found. Missing query path: %s'\
' (relative to error location)' % self.relative_path
super(JSONConfigValueNotFoundError, self).__init__(value_not_found._parent_config_node,
message)
class JSONConfigNodeTypeError(JSONConfigQueryError):
"""
This error is raised when you try to handle a config node by assuming its type
to be something else than its actual type. For example you are trying to iterate
over the key-value pairs of a value that is not json object.
"""
def __init__(self, config_node, expected_type, error_message=None):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
:param expected_type: The expected type or a tuple/list of expected types.
"""
found_type_name = config_node.__class__.__name__
if not isinstance(expected_type, (list, tuple)):
expected_type = (expected_type,)
expected_names = [t.__name__ for t in expected_type]
message = 'Expected a %s but found %s.' % (' or '.join(expected_names), found_type_name)
if error_message is not None:
message += ' %s' % (error_message,)
super(JSONConfigNodeTypeError, self).__init__(config_node, message)
class JSONConfigIndexError(JSONConfigQueryError):
"""
This is raised when you try to index into an array node and the index is out of range. Indexing
into a different kind of node (object, scalar) doesn't raise this.
"""
def __init__(self, config_json_array, index):
self.index = index
message = 'Index (%s) is out of range [0, %s)' % (index, len(config_json_array))
super(JSONConfigIndexError, self).__init__(config_json_array, message)
class JSONValueMapper(object):
def __call__(self, json_value):
raise NotImplementedError()
class ValueNotFoundNode(object):
def __init__(self, parent_config_node, missing_query_path):
"""
If the user issues a config query like config.servers[2].ip_address but there is only
one server in the config (so config.servers[2] doesn't exist) then the existing part
of the query path is config.servers and the missing part is [2].ip_address. In this case
parent_config_node will be the last node of the existing part, in this case the servers
array, and the missing_query_path is [2].ip_address.
:param parent_config_node: The last existing config_node on the query path issued
by the user. missing_query_path is the non-existing part of the query path and it
is relative to the parent_config_node.
:param missing_query_path: The non-existing part (suffix) of the query path issued
by the user. This is relative to parent_config_node.
"""
self._parent_config_node = parent_config_node
self._missing_query_path = missing_query_path
def __call__(self, *args):
"""
This function expects the exact same parameters as _ConfigNode.__call__():
An optional default value followed by zero or more JSONValueMapper instances.
Since this is a not-found-node we know that this wrapper object doesn't contain any
json value so the mapper arguments are ignored.
If a default value is provided then we return it otherwise we raise an exception since
the user tries to fetch a required value that isn't in the config file.
"""
default, _ = _process_value_fetcher_call_args(args)
if default is _undefined:
raise JSONConfigValueNotFoundError(self)
return default
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, item):
return ValueNotFoundNode(self._parent_config_node, self._missing_query_path + [item])
def __len__(self):
raise JSONConfigValueNotFoundError(self)
def __iter__(self):
raise JSONConfigValueNotFoundError(self)
class ConfigNode(object):
"""
Base class for the actual classes whose instances build up the config
object hierarchy wrapping the actual json objects/arrays/scalars.
Note that this class and its subclasses should have only private members
with names that start with '_' because the keys in the json config
can be accessed using the member operator (dot) and the members of the
config node class instances should not conflict with the keys in the
config files.
"""
def __init__(self, line, column):
"""
:param line: Zero based line number. (Add 1 for human readable error reporting).
:param column: Zero based column number. (Add 1 for human readable error reporting).
"""
super(ConfigNode, self).__init__()
self._line = line
self._column = column
def __call__(self, *args):
"""
This function will fetch the wrapped json value from this wrapper config node.
We expect the following optional arguments:
An optional default value followed by zero or more JSONValueMapper instances.
Since this is not a not-found-node we know that there is a wrapped json value so the
default value is ignored. If we have JSONValueMapper instances then we apply them to
the wrapped json value in left-to-right order before returning the json value.
"""
_, mappers = _process_value_fetcher_call_args(args)
value = self._fetch_unwrapped_value()
try:
for mapper in mappers:
value = mapper(value)
except Exception as e:
raise JSONConfigValueMapperError(self, e)
return value
def _fetch_unwrapped_value(self):
raise NotImplementedError()
class ConfigJSONScalar(ConfigNode):
def __init__(self, value, line, column):
super(ConfigJSONScalar, self).__init__(line, column)
self.value = value
def __getattr__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from a scalar as if it was an object. item=%s' % (item,)
)
def __getitem__(self, item):
if not isinstance(item, (my_basestring, numbers.Integral)):
raise TypeError('You are allowed to index only with string or integer.')
if isinstance(item, numbers.Integral):
raise JSONConfigNodeTypeError(
self,
ConfigJSONArray,
'You are trying to index into a scalar as if it was an array. index=%s' % (item,)
)
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from a scalar as if it was an object. item=%s' % (item,)
)
def __contains__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to access the __contains__ magic method of a scalar config object.'
)
def __len__(self):
raise JSONConfigNodeTypeError(
self,
(ConfigJSONObject, ConfigJSONArray),
'You are trying to access the __len__ of a scalar config object.'
)
def __iter__(self):
raise JSONConfigNodeTypeError(
self,
(ConfigJSONObject, ConfigJSONArray),
'You are trying to iterate a scalar value.'
)
def __repr__(self):
return '%s(value=%r, line=%r, column=%r)' % (self.__class__.__name__,
self.value, self._line, self._column)
def _fetch_unwrapped_value(self):
return self.value
class ConfigJSONObject(ConfigNode):
def __init__(self, line, column):
super(ConfigJSONObject, self).__init__(line, column)
self._dict = OrderedDict()
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
raise JSONConfigNodeTypeError(
self,
ConfigJSONArray,
'You are trying to index into an object as if it was an array. index=%s' % (item,)
)
if not isinstance(item, my_basestring):
raise TypeError('You are allowed to index only with string or integer.')
if item in self._dict:
return self._dict[item]
return ValueNotFoundNode(self, [item])
def __contains__(self, item):
return item in self._dict
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict.items())
def __repr__(self):
return '%s(len=%r, line=%r, column=%r)' % (self.__class__.__name__,
len(self), self._line, self._column)
def _fetch_unwrapped_value(self):
return dict((key, node._fetch_unwrapped_value()) for key, node in self._dict.items())
def _insert(self, key, value):
self._dict[key] = value
class ConfigJSONArray(ConfigNode):
def __init__(self, line, column):
super(ConfigJSONArray, self).__init__(line, column)
self._list = []
def __getattr__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from an array as if it was an object. item=%s' % (item,)
)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
if item < 0:
item += len(self._list)
if 0 <= item < len(self._list):
return self._list[item]
raise JSONConfigIndexError(self, item)
if not isinstance(item, my_basestring):
raise TypeError('You are allowed to index only with string or integer.')
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from an array as if it was an object. item=%s' % (item,)
)
def __contains__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to access the __contains__ magic method of an array.'
)
def __len__(self):
return len(self._list)
def __iter__(self):
return iter(self._list)
def __repr__(self):
return '%s(len=%r, line=%r, column=%r)' % (self.__class__.__name__,
len(self), self._line, self._column)
def _fetch_unwrapped_value(self):
return [node._fetch_unwrapped_value() for node in self._list]
def _append(self, item):
self._list.append(item)
_NodeLocation = namedtuple('NodeLocation', 'line column')
def node_location(config_node):
""" Returns the location of this node in the file as a tuple (line, column).
Both line and column are 1 based. """
if isinstance(config_node, ConfigNode):
return _NodeLocation(config_node._line, config_node._column)
if isinstance(config_node, ValueNotFoundNode):
raise JSONConfigValueNotFoundError(config_node)
raise TypeError('Expected a config node but received a %s instance.' %
type(config_node).__name__)
def node_exists(config_node):
""" Returns True if the specified config node
refers to an existing config entry. """
return isinstance(config_node, ConfigNode)
def node_is_object(config_node):
""" Returns True if the specified config node refers
to an existing config entry that is a json object (dict). """
return isinstance(config_node, ConfigJSONObject)
def node_is_array(config_node):
""" Returns True if the specified config node refers
to an existing config entry that is a json array (list). """
return isinstance(config_node, ConfigJSONArray)
def node_is_scalar(config_node):
""" Returns True if the specified config node refers to an existing config
entry that isn't a json object (dict) or array (list) but something else. """
return isinstance(config_node, ConfigJSONScalar)
def _guarantee_node_class(config_node, node_class):
if isinstance(config_node, node_class):
return config_node
if isinstance(config_node, ValueNotFoundNode):
raise JSONConfigValueNotFoundError(config_node)
if isinstance(config_node, ConfigNode):
raise JSONConfigNodeTypeError(config_node, node_class)
raise TypeError('Expected a %s or %s instance but received %s.' % (
ConfigNode.__name__, ValueNotFoundNode.__name__, config_node.__class__.__name__))
def ensure_exists(config_node):
return _guarantee_node_class(config_node, ConfigNode)
def expect_object(config_node):
return _guarantee_node_class(config_node, ConfigJSONObject)
def expect_array(config_node):
return _guarantee_node_class(config_node, ConfigJSONArray)
def expect_scalar(config_node):
return _guarantee_node_class(config_node, ConfigJSONScalar)
|
pasztorpisti/json-cfg
|
src/jsoncfg/config_classes.py
|
node_location
|
python
|
def node_location(config_node):
if isinstance(config_node, ConfigNode):
return _NodeLocation(config_node._line, config_node._column)
if isinstance(config_node, ValueNotFoundNode):
raise JSONConfigValueNotFoundError(config_node)
raise TypeError('Expected a config node but received a %s instance.' %
type(config_node).__name__)
|
Returns the location of this node in the file as a tuple (line, column).
Both line and column are 1 based.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/config_classes.py#L375-L383
| null |
import numbers
from collections import OrderedDict, namedtuple
from .compatibility import my_basestring
from .exceptions import JSONConfigException
_undefined = object()
class JSONConfigQueryError(JSONConfigException):
"""
The base class of every exceptions thrown by this library during config queries.
"""
def __init__(self, config_node, message):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
"""
self.config_node = config_node
self.line, self.column = node_location(config_node)
message += ' [line=%s;col=%s]' % (self.line, self.column)
super(JSONConfigQueryError, self).__init__(message)
class JSONConfigValueMapperError(JSONConfigQueryError):
"""
This is raised when someone fetches a value by specifying the "mapper" parameter
and the mapper function raises an exception. That exception is converted into this one.
"""
def __init__(self, config_node, mapper_exception):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
:param mapper_exception: The exception instance that was raised during conversion.
It can be anything...
"""
super(JSONConfigValueMapperError, self).__init__(config_node,
'Error converting json value: ' +
str(mapper_exception))
self.mapper_exception = mapper_exception
class JSONConfigValueNotFoundError(JSONConfigQueryError):
"""
Raised when the user tries to fetch a value that doesn't exist in the config.
"""
def __init__(self, value_not_found):
"""
:param value_not_found: A ValueNotFoundNode instance. Let's say that you query the
config.servers[1].ip_address() value from the config but the config.servers array
has only one item. In this case a JSONConfigValueNotFoundError is raised and
value_not_found._parent_config_node is set to config.servers (that is the last existing
component from our query path) and self.relative_path will be '[1].ip_address'.
This way the error location points to the config.servers node and the error message
says that you wanted to query it with the '[1].ip_address' relative_path that doesn't
exist.
:type value_not_found: ValueNotFoundNode
"""
self.value_not_found = value_not_found
path = []
for component in value_not_found._missing_query_path:
if isinstance(component, numbers.Integral):
path.append('[%s]' % component)
else:
path.append('.' + component)
self.relative_path = ''.join(path)
# TODO: improve the error message: it is possible to do so based on the info we have
message = 'Required config node not found. Missing query path: %s'\
' (relative to error location)' % self.relative_path
super(JSONConfigValueNotFoundError, self).__init__(value_not_found._parent_config_node,
message)
class JSONConfigNodeTypeError(JSONConfigQueryError):
"""
This error is raised when you try to handle a config node by assuming its type
to be something else than its actual type. For example you are trying to iterate
over the key-value pairs of a value that is not json object.
"""
def __init__(self, config_node, expected_type, error_message=None):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
:param expected_type: The expected type or a tuple/list of expected types.
"""
found_type_name = config_node.__class__.__name__
if not isinstance(expected_type, (list, tuple)):
expected_type = (expected_type,)
expected_names = [t.__name__ for t in expected_type]
message = 'Expected a %s but found %s.' % (' or '.join(expected_names), found_type_name)
if error_message is not None:
message += ' %s' % (error_message,)
super(JSONConfigNodeTypeError, self).__init__(config_node, message)
class JSONConfigIndexError(JSONConfigQueryError):
"""
This is raised when you try to index into an array node and the index is out of range. Indexing
into a different kind of node (object, scalar) doesn't raise this.
"""
def __init__(self, config_json_array, index):
self.index = index
message = 'Index (%s) is out of range [0, %s)' % (index, len(config_json_array))
super(JSONConfigIndexError, self).__init__(config_json_array, message)
class JSONValueMapper(object):
def __call__(self, json_value):
raise NotImplementedError()
def _process_value_fetcher_call_args(args):
"""
This function processes the incoming varargs of ValueNotFoundNode.__call__() and
_ConfigNode.__call__().
:param args: A list or tuple containing positional function call arguments. The optional
arguments we expect are the following: An optional default value followed by zero or more
JSONValueMapper instances.
:return: (default_value, list_or_tuple_of_JSONValueMapper_instances)
The default_value is _undefined if it is not present and the second item of the tuple is
an empty tuple/list if there are not JSONValueMapper instances.
"""
if not args:
return _undefined, ()
if isinstance(args[0], JSONValueMapper):
default = _undefined
mappers = args
else:
default = args[0]
mappers = args[1:]
for mapper in mappers:
if not isinstance(mapper, JSONValueMapper):
raise TypeError('%r isn\'t a JSONValueMapper instance!' % (mapper,))
return default, mappers
class ValueNotFoundNode(object):
def __init__(self, parent_config_node, missing_query_path):
"""
If the user issues a config query like config.servers[2].ip_address but there is only
one server in the config (so config.servers[2] doesn't exist) then the existing part
of the query path is config.servers and the missing part is [2].ip_address. In this case
parent_config_node will be the last node of the existing part, in this case the servers
array, and the missing_query_path is [2].ip_address.
:param parent_config_node: The last existing config_node on the query path issued
by the user. missing_query_path is the non-existing part of the query path and it
is relative to the parent_config_node.
:param missing_query_path: The non-existing part (suffix) of the query path issued
by the user. This is relative to parent_config_node.
"""
self._parent_config_node = parent_config_node
self._missing_query_path = missing_query_path
def __call__(self, *args):
"""
This function expects the exact same parameters as _ConfigNode.__call__():
An optional default value followed by zero or more JSONValueMapper instances.
Since this is a not-found-node we know that this wrapper object doesn't contain any
json value so the mapper arguments are ignored.
If a default value is provided then we return it otherwise we raise an exception since
the user tries to fetch a required value that isn't in the config file.
"""
default, _ = _process_value_fetcher_call_args(args)
if default is _undefined:
raise JSONConfigValueNotFoundError(self)
return default
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, item):
return ValueNotFoundNode(self._parent_config_node, self._missing_query_path + [item])
def __len__(self):
raise JSONConfigValueNotFoundError(self)
def __iter__(self):
raise JSONConfigValueNotFoundError(self)
class ConfigNode(object):
"""
Base class for the actual classes whose instances build up the config
object hierarchy wrapping the actual json objects/arrays/scalars.
Note that this class and its subclasses should have only private members
with names that start with '_' because the keys in the json config
can be accessed using the member operator (dot) and the members of the
config node class instances should not conflict with the keys in the
config files.
"""
def __init__(self, line, column):
"""
:param line: Zero based line number. (Add 1 for human readable error reporting).
:param column: Zero based column number. (Add 1 for human readable error reporting).
"""
super(ConfigNode, self).__init__()
self._line = line
self._column = column
def __call__(self, *args):
"""
This function will fetch the wrapped json value from this wrapper config node.
We expect the following optional arguments:
An optional default value followed by zero or more JSONValueMapper instances.
Since this is not a not-found-node we know that there is a wrapped json value so the
default value is ignored. If we have JSONValueMapper instances then we apply them to
the wrapped json value in left-to-right order before returning the json value.
"""
_, mappers = _process_value_fetcher_call_args(args)
value = self._fetch_unwrapped_value()
try:
for mapper in mappers:
value = mapper(value)
except Exception as e:
raise JSONConfigValueMapperError(self, e)
return value
def _fetch_unwrapped_value(self):
raise NotImplementedError()
class ConfigJSONScalar(ConfigNode):
def __init__(self, value, line, column):
super(ConfigJSONScalar, self).__init__(line, column)
self.value = value
def __getattr__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from a scalar as if it was an object. item=%s' % (item,)
)
def __getitem__(self, item):
if not isinstance(item, (my_basestring, numbers.Integral)):
raise TypeError('You are allowed to index only with string or integer.')
if isinstance(item, numbers.Integral):
raise JSONConfigNodeTypeError(
self,
ConfigJSONArray,
'You are trying to index into a scalar as if it was an array. index=%s' % (item,)
)
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from a scalar as if it was an object. item=%s' % (item,)
)
def __contains__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to access the __contains__ magic method of a scalar config object.'
)
def __len__(self):
raise JSONConfigNodeTypeError(
self,
(ConfigJSONObject, ConfigJSONArray),
'You are trying to access the __len__ of a scalar config object.'
)
def __iter__(self):
raise JSONConfigNodeTypeError(
self,
(ConfigJSONObject, ConfigJSONArray),
'You are trying to iterate a scalar value.'
)
def __repr__(self):
return '%s(value=%r, line=%r, column=%r)' % (self.__class__.__name__,
self.value, self._line, self._column)
def _fetch_unwrapped_value(self):
return self.value
class ConfigJSONObject(ConfigNode):
def __init__(self, line, column):
super(ConfigJSONObject, self).__init__(line, column)
self._dict = OrderedDict()
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
raise JSONConfigNodeTypeError(
self,
ConfigJSONArray,
'You are trying to index into an object as if it was an array. index=%s' % (item,)
)
if not isinstance(item, my_basestring):
raise TypeError('You are allowed to index only with string or integer.')
if item in self._dict:
return self._dict[item]
return ValueNotFoundNode(self, [item])
def __contains__(self, item):
return item in self._dict
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict.items())
def __repr__(self):
return '%s(len=%r, line=%r, column=%r)' % (self.__class__.__name__,
len(self), self._line, self._column)
def _fetch_unwrapped_value(self):
return dict((key, node._fetch_unwrapped_value()) for key, node in self._dict.items())
def _insert(self, key, value):
self._dict[key] = value
class ConfigJSONArray(ConfigNode):
def __init__(self, line, column):
super(ConfigJSONArray, self).__init__(line, column)
self._list = []
def __getattr__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from an array as if it was an object. item=%s' % (item,)
)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
if item < 0:
item += len(self._list)
if 0 <= item < len(self._list):
return self._list[item]
raise JSONConfigIndexError(self, item)
if not isinstance(item, my_basestring):
raise TypeError('You are allowed to index only with string or integer.')
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from an array as if it was an object. item=%s' % (item,)
)
def __contains__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to access the __contains__ magic method of an array.'
)
def __len__(self):
return len(self._list)
def __iter__(self):
return iter(self._list)
def __repr__(self):
return '%s(len=%r, line=%r, column=%r)' % (self.__class__.__name__,
len(self), self._line, self._column)
def _fetch_unwrapped_value(self):
return [node._fetch_unwrapped_value() for node in self._list]
def _append(self, item):
self._list.append(item)
_NodeLocation = namedtuple('NodeLocation', 'line column')
def node_exists(config_node):
""" Returns True if the specified config node
refers to an existing config entry. """
return isinstance(config_node, ConfigNode)
def node_is_object(config_node):
""" Returns True if the specified config node refers
to an existing config entry that is a json object (dict). """
return isinstance(config_node, ConfigJSONObject)
def node_is_array(config_node):
""" Returns True if the specified config node refers
to an existing config entry that is a json array (list). """
return isinstance(config_node, ConfigJSONArray)
def node_is_scalar(config_node):
""" Returns True if the specified config node refers to an existing config
entry that isn't a json object (dict) or array (list) but something else. """
return isinstance(config_node, ConfigJSONScalar)
def _guarantee_node_class(config_node, node_class):
if isinstance(config_node, node_class):
return config_node
if isinstance(config_node, ValueNotFoundNode):
raise JSONConfigValueNotFoundError(config_node)
if isinstance(config_node, ConfigNode):
raise JSONConfigNodeTypeError(config_node, node_class)
raise TypeError('Expected a %s or %s instance but received %s.' % (
ConfigNode.__name__, ValueNotFoundNode.__name__, config_node.__class__.__name__))
def ensure_exists(config_node):
return _guarantee_node_class(config_node, ConfigNode)
def expect_object(config_node):
return _guarantee_node_class(config_node, ConfigJSONObject)
def expect_array(config_node):
return _guarantee_node_class(config_node, ConfigJSONArray)
def expect_scalar(config_node):
return _guarantee_node_class(config_node, ConfigJSONScalar)
|
pasztorpisti/json-cfg
|
src/jsoncfg/parser.py
|
TextParser.column
|
python
|
def column(self):
for i in my_xrange(self._column_query_pos, self.pos):
if self.text[i] == '\t':
self._column += self.tab_size
self._column -= self._column % self.tab_size
else:
self._column += 1
self._column_query_pos = self.pos
return self._column
|
Returns the zero based column number based on the
current position of the parser.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/parser.py#L36-L46
| null |
class TextParser(object):
"""
A base class for parsers. It handles the position in the parsed text and
tracks the current line/column number.
"""
def __init__(self, tab_size=4):
super(TextParser, self).__init__()
self.tab_size = tab_size
self.text = None
self.pos = 0
self.end = 0
self.line = 0
self.prev_newline_char = None
self._column = 0
self._column_query_pos = 0
@property
def init_text_parser(self, text):
assert self.text is None
self.text = text
self.end = len(text)
def error(self, message):
""" Raises an exception with the given message and with the current position of
the parser in the parsed json string. """
raise JSONConfigParserException(self, message)
def skip_chars(self, target_pos, is_char_skippable_func):
assert self.pos <= target_pos <= self.end
target_pos = min(target_pos, self.end)
for self.pos in my_xrange(self.pos, target_pos):
c = self.text[self.pos]
if not is_char_skippable_func(c):
break
if c in '\r\n':
if self.prev_newline_char is not None and self.prev_newline_char != c:
# this is the second char of a CRLF or LFCR
self.prev_newline_char = None
else:
self.prev_newline_char = c
self.line += 1
self._column_query_pos = self.pos + 1
self._column = 0
else:
self.prev_newline_char = None
else:
self.pos = target_pos
def skip_to(self, target_pos):
"""
Moves the pointer to target_pos (if the current position is less than target_pos)
and keeps track the current line/column.
"""
self.skip_chars(target_pos, lambda c: True)
def skip_char(self):
""" Skips a single character. """
self.skip_to(self.pos + 1)
def peek(self, offset=0):
""" Looking forward in the input text without actually stepping the current position.
returns None if the current position is at the end of the input. """
pos = self.pos + offset
if pos >= self.end:
return None
return self.text[pos]
def expect(self, c):
"""
If the current position doesn't hold the specified c character then it raises an
exception, otherwise it skips the specified character (moves the current position forward).
"""
if self.peek() != c:
self.error('Expected "%c"' % (c,))
self.skip_char()
|
pasztorpisti/json-cfg
|
src/jsoncfg/parser.py
|
TextParser.peek
|
python
|
def peek(self, offset=0):
pos = self.pos + offset
if pos >= self.end:
return None
return self.text[pos]
|
Looking forward in the input text without actually stepping the current position.
returns None if the current position is at the end of the input.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/parser.py#L90-L96
| null |
class TextParser(object):
"""
A base class for parsers. It handles the position in the parsed text and
tracks the current line/column number.
"""
def __init__(self, tab_size=4):
super(TextParser, self).__init__()
self.tab_size = tab_size
self.text = None
self.pos = 0
self.end = 0
self.line = 0
self.prev_newline_char = None
self._column = 0
self._column_query_pos = 0
@property
def column(self):
""" Returns the zero based column number based on the
current position of the parser. """
for i in my_xrange(self._column_query_pos, self.pos):
if self.text[i] == '\t':
self._column += self.tab_size
self._column -= self._column % self.tab_size
else:
self._column += 1
self._column_query_pos = self.pos
return self._column
def init_text_parser(self, text):
assert self.text is None
self.text = text
self.end = len(text)
def error(self, message):
""" Raises an exception with the given message and with the current position of
the parser in the parsed json string. """
raise JSONConfigParserException(self, message)
def skip_chars(self, target_pos, is_char_skippable_func):
assert self.pos <= target_pos <= self.end
target_pos = min(target_pos, self.end)
for self.pos in my_xrange(self.pos, target_pos):
c = self.text[self.pos]
if not is_char_skippable_func(c):
break
if c in '\r\n':
if self.prev_newline_char is not None and self.prev_newline_char != c:
# this is the second char of a CRLF or LFCR
self.prev_newline_char = None
else:
self.prev_newline_char = c
self.line += 1
self._column_query_pos = self.pos + 1
self._column = 0
else:
self.prev_newline_char = None
else:
self.pos = target_pos
def skip_to(self, target_pos):
"""
Moves the pointer to target_pos (if the current position is less than target_pos)
and keeps track the current line/column.
"""
self.skip_chars(target_pos, lambda c: True)
def skip_char(self):
""" Skips a single character. """
self.skip_to(self.pos + 1)
def expect(self, c):
"""
If the current position doesn't hold the specified c character then it raises an
exception, otherwise it skips the specified character (moves the current position forward).
"""
if self.peek() != c:
self.error('Expected "%c"' % (c,))
self.skip_char()
|
pasztorpisti/json-cfg
|
src/jsoncfg/parser.py
|
TextParser.expect
|
python
|
def expect(self, c):
if self.peek() != c:
self.error('Expected "%c"' % (c,))
self.skip_char()
|
If the current position doesn't hold the specified c character then it raises an
exception, otherwise it skips the specified character (moves the current position forward).
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/parser.py#L98-L105
|
[
"def peek(self, offset=0):\n \"\"\" Looking forward in the input text without actually stepping the current position.\n returns None if the current position is at the end of the input. \"\"\"\n pos = self.pos + offset\n if pos >= self.end:\n return None\n return self.text[pos]\n"
] |
class TextParser(object):
"""
A base class for parsers. It handles the position in the parsed text and
tracks the current line/column number.
"""
def __init__(self, tab_size=4):
super(TextParser, self).__init__()
self.tab_size = tab_size
self.text = None
self.pos = 0
self.end = 0
self.line = 0
self.prev_newline_char = None
self._column = 0
self._column_query_pos = 0
@property
def column(self):
""" Returns the zero based column number based on the
current position of the parser. """
for i in my_xrange(self._column_query_pos, self.pos):
if self.text[i] == '\t':
self._column += self.tab_size
self._column -= self._column % self.tab_size
else:
self._column += 1
self._column_query_pos = self.pos
return self._column
def init_text_parser(self, text):
assert self.text is None
self.text = text
self.end = len(text)
def error(self, message):
""" Raises an exception with the given message and with the current position of
the parser in the parsed json string. """
raise JSONConfigParserException(self, message)
def skip_chars(self, target_pos, is_char_skippable_func):
assert self.pos <= target_pos <= self.end
target_pos = min(target_pos, self.end)
for self.pos in my_xrange(self.pos, target_pos):
c = self.text[self.pos]
if not is_char_skippable_func(c):
break
if c in '\r\n':
if self.prev_newline_char is not None and self.prev_newline_char != c:
# this is the second char of a CRLF or LFCR
self.prev_newline_char = None
else:
self.prev_newline_char = c
self.line += 1
self._column_query_pos = self.pos + 1
self._column = 0
else:
self.prev_newline_char = None
else:
self.pos = target_pos
def skip_to(self, target_pos):
"""
Moves the pointer to target_pos (if the current position is less than target_pos)
and keeps track the current line/column.
"""
self.skip_chars(target_pos, lambda c: True)
def skip_char(self):
""" Skips a single character. """
self.skip_to(self.pos + 1)
def peek(self, offset=0):
""" Looking forward in the input text without actually stepping the current position.
returns None if the current position is at the end of the input. """
pos = self.pos + offset
if pos >= self.end:
return None
return self.text[pos]
|
pasztorpisti/json-cfg
|
src/jsoncfg/parser.py
|
JSONParser.parse
|
python
|
def parse(self, json_text, listener):
listener.begin_parsing(self)
try:
self.init_text_parser(json_text)
self.listener = listener
c = self._skip_spaces_and_peek()
if c == '{':
if self.params.root_is_array:
self.error('The root of the json is expected to be an array!')
self._parse_object()
elif c == '[':
if not self.params.root_is_array:
self.error('The root of the json is expected to be an object!')
self._parse_array()
else:
self.error('The json string should start with "%s"' % (
'[' if self.params.root_is_array else '{'))
if self._skip_spaces_and_peek() is not None:
self.error('Garbage detected after the parsed json!')
finally:
listener.end_parsing()
|
Parses the specified json_text and emits parser events to the listener.
If root_is_array then the root element of the json has to be an array/list,
otherwise the expected root is a json object/dict.
In case of python2 the json_text can be either an utf8 encoded string
or a unicode object and the fired parser events will use the same format.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/parser.py#L147-L177
|
[
"def init_text_parser(self, text):\n assert self.text is None\n self.text = text\n self.end = len(text)\n",
"def error(self, message):\n \"\"\" Raises an exception with the given message and with the current position of\n the parser in the parsed json string. \"\"\"\n raise JSONConfigParserException(self, message)\n",
"def _skip_spaces_and_peek(self):\n \"\"\" Skips all spaces and comments.\n :return: The first character that follows the skipped spaces and comments or\n None if the end of the json string has been reached.\n \"\"\"\n while 1:\n # skipping spaces\n self.skip_chars(self.end, lambda x: x in self.spaces)\n c = self.peek()\n if not self.params.allow_comments:\n return c\n if c != '/':\n return c\n d = self.peek(1)\n if d == '/':\n self.skip_to(self.pos + 2)\n self._skip_singleline_comment()\n elif d == '*':\n self.skip_to(self.pos + 2)\n self._skip_multiline_comment()\n else:\n return c\n",
"def _parse_object(self):\n assert self.peek() == '{'\n self.listener.begin_object()\n self.skip_char()\n first_item = True\n while 1:\n c = self._skip_spaces_and_peek()\n if c == '}':\n self.skip_char()\n self.listener.end_object()\n break\n\n if not first_item:\n self.expect(',')\n\n c = self._skip_spaces_and_peek()\n if c == '}':\n if not self.params.allow_trailing_commas:\n self.error('Trailing commas aren\\'t enabled for this parser.')\n self.skip_char()\n self.listener.end_object()\n break\n\n key, key_quoted, pos_after_key = self._parse_and_return_string(\n self.params.allow_unquoted_keys)\n self.listener.begin_object_item(key, key_quoted)\n # We step self.pos and self.line only after a successful call to the listener\n # because in case of an exception that is raised from the listener we want the\n # line/column number to point to the beginning of the parsed string.\n self.skip_to(pos_after_key)\n\n c = self._skip_spaces_and_peek()\n if c != ':':\n self.error('Expected \":\"')\n self.skip_char()\n\n self._parse_value()\n\n first_item = False\n",
"def _parse_array(self):\n assert self.peek() == '['\n self.listener.begin_array()\n self.skip_char()\n first_item = True\n while 1:\n c = self._skip_spaces_and_peek()\n if c == ']':\n self.skip_char()\n self.listener.end_array()\n break\n\n if not first_item:\n self.expect(',')\n\n c = self._skip_spaces_and_peek()\n if c == ']':\n if not self.params.allow_trailing_commas:\n self.error('Trailing commas aren\\'t enabled for this parser.')\n self.skip_char()\n self.listener.end_array()\n break\n\n self._parse_value()\n first_item = False\n",
"def begin_parsing(self, parser):\n self.parser = parser\n",
"def end_parsing(self):\n self.parser = None\n"
] |
class JSONParser(TextParser):
"""
A simple json parser that works with a fixed sized input buffer (without input streaming) but
this should not be a problem in case of config files that usually have a small limited size.
This parser emits events similarly to a SAX XML parser. The user of this class can implement
several different kind of event listeners. We will work with a listener that builds a json
object hierarchy but later we could implement for example a json validator listener...
"""
spaces = set(' \t\r\n')
special_chars = set('{}[]",:/*')
spaces_and_special_chars = spaces | special_chars
def __init__(self, params=JSONParserParams()):
super(JSONParser, self).__init__(tab_size=params.tab_size)
self.params = params
self.listener = None
def _skip_spaces_and_peek(self):
""" Skips all spaces and comments.
:return: The first character that follows the skipped spaces and comments or
None if the end of the json string has been reached.
"""
while 1:
# skipping spaces
self.skip_chars(self.end, lambda x: x in self.spaces)
c = self.peek()
if not self.params.allow_comments:
return c
if c != '/':
return c
d = self.peek(1)
if d == '/':
self.skip_to(self.pos + 2)
self._skip_singleline_comment()
elif d == '*':
self.skip_to(self.pos + 2)
self._skip_multiline_comment()
else:
return c
def _skip_singleline_comment(self):
for pos in my_xrange(self.pos, self.end):
if self.text[pos] in '\r\n':
self.skip_to(pos + 1)
break
else:
self.skip_to(self.end)
def _skip_multiline_comment(self):
for pos in my_xrange(self.pos, self.end-1):
if self.text[pos] == '*' and self.text[pos+1] == '/':
self.skip_to(pos + 2)
return
self.error('Multiline comment isn\'t closed.')
def _parse_object(self):
assert self.peek() == '{'
self.listener.begin_object()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == '}':
self.skip_char()
self.listener.end_object()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == '}':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_object()
break
key, key_quoted, pos_after_key = self._parse_and_return_string(
self.params.allow_unquoted_keys)
self.listener.begin_object_item(key, key_quoted)
# We step self.pos and self.line only after a successful call to the listener
# because in case of an exception that is raised from the listener we want the
# line/column number to point to the beginning of the parsed string.
self.skip_to(pos_after_key)
c = self._skip_spaces_and_peek()
if c != ':':
self.error('Expected ":"')
self.skip_char()
self._parse_value()
first_item = False
def _parse_array(self):
assert self.peek() == '['
self.listener.begin_array()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == ']':
self.skip_char()
self.listener.end_array()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == ']':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_array()
break
self._parse_value()
first_item = False
def _parse_value(self):
c = self._skip_spaces_and_peek()
if c == '{':
self._parse_object()
elif c == '[':
self._parse_array()
else:
self._parse_scalar()
def _parse_scalar(self):
scalar_str, scalar_str_quoted, pos_after_scalar = self._parse_and_return_string(True)
self.listener.scalar(scalar_str, scalar_str_quoted)
self.skip_to(pos_after_scalar)
def _parse_and_return_string(self, allow_unquoted):
c = self._skip_spaces_and_peek()
quoted = c == '"'
if not quoted and not allow_unquoted:
self.error('Unquoted keys arn\'t allowed.')
if quoted:
return self._parse_and_return_quoted_string()
return self._parse_and_return_unquoted_string()
def _parse_and_return_unquoted_string(self):
"""
Parses a string that has no quotation marks so it doesn't
contain any special characters and we don't have to interpret
any escape sequences.
:return: (string, quoted=False, end_of_string_pos)
"""
begin = self.pos
for end in my_xrange(self.pos, self.end):
if self.text[end] in self.spaces_and_special_chars:
break
else:
end = self.end
if begin == end:
self.error('Expected a scalar here.')
return self.text[begin:end], False, end
def _parse_and_return_quoted_string(self):
"""
Parses a string that has quotation marks so it may contain
special characters and escape sequences.
:return: (unescaped_string, quoted=True, end_of_string_pos)
"""
result = []
pos = self.pos + 1
segment_begin = pos
my_chr = my_unichr if isinstance(self.text, my_unicode) else utf8chr
while pos < self.end:
c = self.text[pos]
if c < ' ' and c != '\t':
self.skip_to(pos)
self.error('Encountered a control character that isn\'t allowed in quoted strings.')
elif c == '"':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
return ''.join(result), True, pos
elif c == '\\':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
if pos >= self.end:
break
c = self.text[pos]
if c == 'u':
code_point, pos = self._handle_unicode_escape(pos)
result.append(my_chr(code_point))
else:
char, pos = self._handle_escape(pos, c)
result.append(char)
segment_begin = pos
else:
pos += 1
self.error('Reached the end of stream while parsing quoted string.')
def _handle_unicode_escape(self, pos):
if self.end - pos < 5:
self.error('Reached the end of stream while parsing quoted string.')
pos += 1
try:
code_point = int(self.text[pos:pos+4], 16)
except ValueError:
self.skip_to(pos - 2)
self.error('Error decoding unicode escape sequence.')
else:
pos += 4
if 0xd800 <= code_point < 0xdc00 and self.end-pos >= 6 and\
self.text[pos] == '\\' and self.text[pos+1] == 'u':
try:
low_surrogate = int(self.text[pos+2:pos+6], 16)
except ValueError:
self.skip_to(pos)
self.error('Error decoding unicode escape sequence.')
else:
if 0xdc00 <= low_surrogate < 0xe000:
pos += 6
code_point = 0x10000 + (((code_point - 0xd800) << 10) |
(low_surrogate - 0xdc00))
return code_point, pos
def _handle_escape(self, pos, c):
char = {
'\\': '\\',
'/': '/',
'"': '"',
'b': '\b',
'f': '\f',
't': '\t',
'r': '\r',
'n': '\n',
}.get(c)
if char is None:
self.skip_to(pos - 1)
self.error('Quoted string contains an invalid escape sequence.')
return char, pos + 1
|
pasztorpisti/json-cfg
|
src/jsoncfg/parser.py
|
JSONParser._skip_spaces_and_peek
|
python
|
def _skip_spaces_and_peek(self):
while 1:
# skipping spaces
self.skip_chars(self.end, lambda x: x in self.spaces)
c = self.peek()
if not self.params.allow_comments:
return c
if c != '/':
return c
d = self.peek(1)
if d == '/':
self.skip_to(self.pos + 2)
self._skip_singleline_comment()
elif d == '*':
self.skip_to(self.pos + 2)
self._skip_multiline_comment()
else:
return c
|
Skips all spaces and comments.
:return: The first character that follows the skipped spaces and comments or
None if the end of the json string has been reached.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/parser.py#L179-L200
|
[
"def skip_chars(self, target_pos, is_char_skippable_func):\n assert self.pos <= target_pos <= self.end\n target_pos = min(target_pos, self.end)\n for self.pos in my_xrange(self.pos, target_pos):\n c = self.text[self.pos]\n if not is_char_skippable_func(c):\n break\n if c in '\\r\\n':\n if self.prev_newline_char is not None and self.prev_newline_char != c:\n # this is the second char of a CRLF or LFCR\n self.prev_newline_char = None\n else:\n self.prev_newline_char = c\n self.line += 1\n self._column_query_pos = self.pos + 1\n self._column = 0\n else:\n self.prev_newline_char = None\n",
"def skip_to(self, target_pos):\n \"\"\"\n Moves the pointer to target_pos (if the current position is less than target_pos)\n and keeps track the current line/column.\n \"\"\"\n self.skip_chars(target_pos, lambda c: True)\n",
"def peek(self, offset=0):\n \"\"\" Looking forward in the input text without actually stepping the current position.\n returns None if the current position is at the end of the input. \"\"\"\n pos = self.pos + offset\n if pos >= self.end:\n return None\n return self.text[pos]\n",
"def _skip_singleline_comment(self):\n for pos in my_xrange(self.pos, self.end):\n if self.text[pos] in '\\r\\n':\n self.skip_to(pos + 1)\n break\n",
"def _skip_multiline_comment(self):\n for pos in my_xrange(self.pos, self.end-1):\n if self.text[pos] == '*' and self.text[pos+1] == '/':\n self.skip_to(pos + 2)\n return\n self.error('Multiline comment isn\\'t closed.')\n"
] |
class JSONParser(TextParser):
"""
A simple json parser that works with a fixed sized input buffer (without input streaming) but
this should not be a problem in case of config files that usually have a small limited size.
This parser emits events similarly to a SAX XML parser. The user of this class can implement
several different kind of event listeners. We will work with a listener that builds a json
object hierarchy but later we could implement for example a json validator listener...
"""
spaces = set(' \t\r\n')
special_chars = set('{}[]",:/*')
spaces_and_special_chars = spaces | special_chars
def __init__(self, params=JSONParserParams()):
super(JSONParser, self).__init__(tab_size=params.tab_size)
self.params = params
self.listener = None
def parse(self, json_text, listener):
"""
Parses the specified json_text and emits parser events to the listener.
If root_is_array then the root element of the json has to be an array/list,
otherwise the expected root is a json object/dict.
In case of python2 the json_text can be either an utf8 encoded string
or a unicode object and the fired parser events will use the same format.
"""
listener.begin_parsing(self)
try:
self.init_text_parser(json_text)
self.listener = listener
c = self._skip_spaces_and_peek()
if c == '{':
if self.params.root_is_array:
self.error('The root of the json is expected to be an array!')
self._parse_object()
elif c == '[':
if not self.params.root_is_array:
self.error('The root of the json is expected to be an object!')
self._parse_array()
else:
self.error('The json string should start with "%s"' % (
'[' if self.params.root_is_array else '{'))
if self._skip_spaces_and_peek() is not None:
self.error('Garbage detected after the parsed json!')
finally:
listener.end_parsing()
def _skip_singleline_comment(self):
for pos in my_xrange(self.pos, self.end):
if self.text[pos] in '\r\n':
self.skip_to(pos + 1)
break
else:
self.skip_to(self.end)
def _skip_multiline_comment(self):
for pos in my_xrange(self.pos, self.end-1):
if self.text[pos] == '*' and self.text[pos+1] == '/':
self.skip_to(pos + 2)
return
self.error('Multiline comment isn\'t closed.')
def _parse_object(self):
assert self.peek() == '{'
self.listener.begin_object()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == '}':
self.skip_char()
self.listener.end_object()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == '}':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_object()
break
key, key_quoted, pos_after_key = self._parse_and_return_string(
self.params.allow_unquoted_keys)
self.listener.begin_object_item(key, key_quoted)
# We step self.pos and self.line only after a successful call to the listener
# because in case of an exception that is raised from the listener we want the
# line/column number to point to the beginning of the parsed string.
self.skip_to(pos_after_key)
c = self._skip_spaces_and_peek()
if c != ':':
self.error('Expected ":"')
self.skip_char()
self._parse_value()
first_item = False
def _parse_array(self):
assert self.peek() == '['
self.listener.begin_array()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == ']':
self.skip_char()
self.listener.end_array()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == ']':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_array()
break
self._parse_value()
first_item = False
def _parse_value(self):
c = self._skip_spaces_and_peek()
if c == '{':
self._parse_object()
elif c == '[':
self._parse_array()
else:
self._parse_scalar()
def _parse_scalar(self):
scalar_str, scalar_str_quoted, pos_after_scalar = self._parse_and_return_string(True)
self.listener.scalar(scalar_str, scalar_str_quoted)
self.skip_to(pos_after_scalar)
def _parse_and_return_string(self, allow_unquoted):
c = self._skip_spaces_and_peek()
quoted = c == '"'
if not quoted and not allow_unquoted:
self.error('Unquoted keys arn\'t allowed.')
if quoted:
return self._parse_and_return_quoted_string()
return self._parse_and_return_unquoted_string()
def _parse_and_return_unquoted_string(self):
"""
Parses a string that has no quotation marks so it doesn't
contain any special characters and we don't have to interpret
any escape sequences.
:return: (string, quoted=False, end_of_string_pos)
"""
begin = self.pos
for end in my_xrange(self.pos, self.end):
if self.text[end] in self.spaces_and_special_chars:
break
else:
end = self.end
if begin == end:
self.error('Expected a scalar here.')
return self.text[begin:end], False, end
def _parse_and_return_quoted_string(self):
"""
Parses a string that has quotation marks so it may contain
special characters and escape sequences.
:return: (unescaped_string, quoted=True, end_of_string_pos)
"""
result = []
pos = self.pos + 1
segment_begin = pos
my_chr = my_unichr if isinstance(self.text, my_unicode) else utf8chr
while pos < self.end:
c = self.text[pos]
if c < ' ' and c != '\t':
self.skip_to(pos)
self.error('Encountered a control character that isn\'t allowed in quoted strings.')
elif c == '"':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
return ''.join(result), True, pos
elif c == '\\':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
if pos >= self.end:
break
c = self.text[pos]
if c == 'u':
code_point, pos = self._handle_unicode_escape(pos)
result.append(my_chr(code_point))
else:
char, pos = self._handle_escape(pos, c)
result.append(char)
segment_begin = pos
else:
pos += 1
self.error('Reached the end of stream while parsing quoted string.')
def _handle_unicode_escape(self, pos):
if self.end - pos < 5:
self.error('Reached the end of stream while parsing quoted string.')
pos += 1
try:
code_point = int(self.text[pos:pos+4], 16)
except ValueError:
self.skip_to(pos - 2)
self.error('Error decoding unicode escape sequence.')
else:
pos += 4
if 0xd800 <= code_point < 0xdc00 and self.end-pos >= 6 and\
self.text[pos] == '\\' and self.text[pos+1] == 'u':
try:
low_surrogate = int(self.text[pos+2:pos+6], 16)
except ValueError:
self.skip_to(pos)
self.error('Error decoding unicode escape sequence.')
else:
if 0xdc00 <= low_surrogate < 0xe000:
pos += 6
code_point = 0x10000 + (((code_point - 0xd800) << 10) |
(low_surrogate - 0xdc00))
return code_point, pos
def _handle_escape(self, pos, c):
char = {
'\\': '\\',
'/': '/',
'"': '"',
'b': '\b',
'f': '\f',
't': '\t',
'r': '\r',
'n': '\n',
}.get(c)
if char is None:
self.skip_to(pos - 1)
self.error('Quoted string contains an invalid escape sequence.')
return char, pos + 1
|
pasztorpisti/json-cfg
|
src/jsoncfg/parser.py
|
JSONParser._parse_and_return_unquoted_string
|
python
|
def _parse_and_return_unquoted_string(self):
begin = self.pos
for end in my_xrange(self.pos, self.end):
if self.text[end] in self.spaces_and_special_chars:
break
else:
end = self.end
if begin == end:
self.error('Expected a scalar here.')
return self.text[begin:end], False, end
|
Parses a string that has no quotation marks so it doesn't
contain any special characters and we don't have to interpret
any escape sequences.
:return: (string, quoted=False, end_of_string_pos)
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/parser.py#L307-L322
| null |
class JSONParser(TextParser):
"""
A simple json parser that works with a fixed sized input buffer (without input streaming) but
this should not be a problem in case of config files that usually have a small limited size.
This parser emits events similarly to a SAX XML parser. The user of this class can implement
several different kind of event listeners. We will work with a listener that builds a json
object hierarchy but later we could implement for example a json validator listener...
"""
spaces = set(' \t\r\n')
special_chars = set('{}[]",:/*')
spaces_and_special_chars = spaces | special_chars
def __init__(self, params=JSONParserParams()):
super(JSONParser, self).__init__(tab_size=params.tab_size)
self.params = params
self.listener = None
def parse(self, json_text, listener):
"""
Parses the specified json_text and emits parser events to the listener.
If root_is_array then the root element of the json has to be an array/list,
otherwise the expected root is a json object/dict.
In case of python2 the json_text can be either an utf8 encoded string
or a unicode object and the fired parser events will use the same format.
"""
listener.begin_parsing(self)
try:
self.init_text_parser(json_text)
self.listener = listener
c = self._skip_spaces_and_peek()
if c == '{':
if self.params.root_is_array:
self.error('The root of the json is expected to be an array!')
self._parse_object()
elif c == '[':
if not self.params.root_is_array:
self.error('The root of the json is expected to be an object!')
self._parse_array()
else:
self.error('The json string should start with "%s"' % (
'[' if self.params.root_is_array else '{'))
if self._skip_spaces_and_peek() is not None:
self.error('Garbage detected after the parsed json!')
finally:
listener.end_parsing()
def _skip_spaces_and_peek(self):
""" Skips all spaces and comments.
:return: The first character that follows the skipped spaces and comments or
None if the end of the json string has been reached.
"""
while 1:
# skipping spaces
self.skip_chars(self.end, lambda x: x in self.spaces)
c = self.peek()
if not self.params.allow_comments:
return c
if c != '/':
return c
d = self.peek(1)
if d == '/':
self.skip_to(self.pos + 2)
self._skip_singleline_comment()
elif d == '*':
self.skip_to(self.pos + 2)
self._skip_multiline_comment()
else:
return c
def _skip_singleline_comment(self):
for pos in my_xrange(self.pos, self.end):
if self.text[pos] in '\r\n':
self.skip_to(pos + 1)
break
else:
self.skip_to(self.end)
def _skip_multiline_comment(self):
for pos in my_xrange(self.pos, self.end-1):
if self.text[pos] == '*' and self.text[pos+1] == '/':
self.skip_to(pos + 2)
return
self.error('Multiline comment isn\'t closed.')
def _parse_object(self):
assert self.peek() == '{'
self.listener.begin_object()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == '}':
self.skip_char()
self.listener.end_object()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == '}':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_object()
break
key, key_quoted, pos_after_key = self._parse_and_return_string(
self.params.allow_unquoted_keys)
self.listener.begin_object_item(key, key_quoted)
# We step self.pos and self.line only after a successful call to the listener
# because in case of an exception that is raised from the listener we want the
# line/column number to point to the beginning of the parsed string.
self.skip_to(pos_after_key)
c = self._skip_spaces_and_peek()
if c != ':':
self.error('Expected ":"')
self.skip_char()
self._parse_value()
first_item = False
def _parse_array(self):
assert self.peek() == '['
self.listener.begin_array()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == ']':
self.skip_char()
self.listener.end_array()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == ']':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_array()
break
self._parse_value()
first_item = False
def _parse_value(self):
c = self._skip_spaces_and_peek()
if c == '{':
self._parse_object()
elif c == '[':
self._parse_array()
else:
self._parse_scalar()
def _parse_scalar(self):
scalar_str, scalar_str_quoted, pos_after_scalar = self._parse_and_return_string(True)
self.listener.scalar(scalar_str, scalar_str_quoted)
self.skip_to(pos_after_scalar)
def _parse_and_return_string(self, allow_unquoted):
c = self._skip_spaces_and_peek()
quoted = c == '"'
if not quoted and not allow_unquoted:
self.error('Unquoted keys arn\'t allowed.')
if quoted:
return self._parse_and_return_quoted_string()
return self._parse_and_return_unquoted_string()
def _parse_and_return_quoted_string(self):
"""
Parses a string that has quotation marks so it may contain
special characters and escape sequences.
:return: (unescaped_string, quoted=True, end_of_string_pos)
"""
result = []
pos = self.pos + 1
segment_begin = pos
my_chr = my_unichr if isinstance(self.text, my_unicode) else utf8chr
while pos < self.end:
c = self.text[pos]
if c < ' ' and c != '\t':
self.skip_to(pos)
self.error('Encountered a control character that isn\'t allowed in quoted strings.')
elif c == '"':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
return ''.join(result), True, pos
elif c == '\\':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
if pos >= self.end:
break
c = self.text[pos]
if c == 'u':
code_point, pos = self._handle_unicode_escape(pos)
result.append(my_chr(code_point))
else:
char, pos = self._handle_escape(pos, c)
result.append(char)
segment_begin = pos
else:
pos += 1
self.error('Reached the end of stream while parsing quoted string.')
def _handle_unicode_escape(self, pos):
if self.end - pos < 5:
self.error('Reached the end of stream while parsing quoted string.')
pos += 1
try:
code_point = int(self.text[pos:pos+4], 16)
except ValueError:
self.skip_to(pos - 2)
self.error('Error decoding unicode escape sequence.')
else:
pos += 4
if 0xd800 <= code_point < 0xdc00 and self.end-pos >= 6 and\
self.text[pos] == '\\' and self.text[pos+1] == 'u':
try:
low_surrogate = int(self.text[pos+2:pos+6], 16)
except ValueError:
self.skip_to(pos)
self.error('Error decoding unicode escape sequence.')
else:
if 0xdc00 <= low_surrogate < 0xe000:
pos += 6
code_point = 0x10000 + (((code_point - 0xd800) << 10) |
(low_surrogate - 0xdc00))
return code_point, pos
def _handle_escape(self, pos, c):
char = {
'\\': '\\',
'/': '/',
'"': '"',
'b': '\b',
'f': '\f',
't': '\t',
'r': '\r',
'n': '\n',
}.get(c)
if char is None:
self.skip_to(pos - 1)
self.error('Quoted string contains an invalid escape sequence.')
return char, pos + 1
|
pasztorpisti/json-cfg
|
src/jsoncfg/parser.py
|
JSONParser._parse_and_return_quoted_string
|
python
|
def _parse_and_return_quoted_string(self):
result = []
pos = self.pos + 1
segment_begin = pos
my_chr = my_unichr if isinstance(self.text, my_unicode) else utf8chr
while pos < self.end:
c = self.text[pos]
if c < ' ' and c != '\t':
self.skip_to(pos)
self.error('Encountered a control character that isn\'t allowed in quoted strings.')
elif c == '"':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
return ''.join(result), True, pos
elif c == '\\':
if segment_begin < pos:
result.append(self.text[segment_begin:pos])
pos += 1
if pos >= self.end:
break
c = self.text[pos]
if c == 'u':
code_point, pos = self._handle_unicode_escape(pos)
result.append(my_chr(code_point))
else:
char, pos = self._handle_escape(pos, c)
result.append(char)
segment_begin = pos
else:
pos += 1
self.error('Reached the end of stream while parsing quoted string.')
|
Parses a string that has quotation marks so it may contain
special characters and escape sequences.
:return: (unescaped_string, quoted=True, end_of_string_pos)
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/parser.py#L324-L360
| null |
class JSONParser(TextParser):
"""
A simple json parser that works with a fixed sized input buffer (without input streaming) but
this should not be a problem in case of config files that usually have a small limited size.
This parser emits events similarly to a SAX XML parser. The user of this class can implement
several different kind of event listeners. We will work with a listener that builds a json
object hierarchy but later we could implement for example a json validator listener...
"""
spaces = set(' \t\r\n')
special_chars = set('{}[]",:/*')
spaces_and_special_chars = spaces | special_chars
def __init__(self, params=JSONParserParams()):
super(JSONParser, self).__init__(tab_size=params.tab_size)
self.params = params
self.listener = None
def parse(self, json_text, listener):
"""
Parses the specified json_text and emits parser events to the listener.
If root_is_array then the root element of the json has to be an array/list,
otherwise the expected root is a json object/dict.
In case of python2 the json_text can be either an utf8 encoded string
or a unicode object and the fired parser events will use the same format.
"""
listener.begin_parsing(self)
try:
self.init_text_parser(json_text)
self.listener = listener
c = self._skip_spaces_and_peek()
if c == '{':
if self.params.root_is_array:
self.error('The root of the json is expected to be an array!')
self._parse_object()
elif c == '[':
if not self.params.root_is_array:
self.error('The root of the json is expected to be an object!')
self._parse_array()
else:
self.error('The json string should start with "%s"' % (
'[' if self.params.root_is_array else '{'))
if self._skip_spaces_and_peek() is not None:
self.error('Garbage detected after the parsed json!')
finally:
listener.end_parsing()
def _skip_spaces_and_peek(self):
""" Skips all spaces and comments.
:return: The first character that follows the skipped spaces and comments or
None if the end of the json string has been reached.
"""
while 1:
# skipping spaces
self.skip_chars(self.end, lambda x: x in self.spaces)
c = self.peek()
if not self.params.allow_comments:
return c
if c != '/':
return c
d = self.peek(1)
if d == '/':
self.skip_to(self.pos + 2)
self._skip_singleline_comment()
elif d == '*':
self.skip_to(self.pos + 2)
self._skip_multiline_comment()
else:
return c
def _skip_singleline_comment(self):
for pos in my_xrange(self.pos, self.end):
if self.text[pos] in '\r\n':
self.skip_to(pos + 1)
break
else:
self.skip_to(self.end)
def _skip_multiline_comment(self):
for pos in my_xrange(self.pos, self.end-1):
if self.text[pos] == '*' and self.text[pos+1] == '/':
self.skip_to(pos + 2)
return
self.error('Multiline comment isn\'t closed.')
def _parse_object(self):
assert self.peek() == '{'
self.listener.begin_object()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == '}':
self.skip_char()
self.listener.end_object()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == '}':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_object()
break
key, key_quoted, pos_after_key = self._parse_and_return_string(
self.params.allow_unquoted_keys)
self.listener.begin_object_item(key, key_quoted)
# We step self.pos and self.line only after a successful call to the listener
# because in case of an exception that is raised from the listener we want the
# line/column number to point to the beginning of the parsed string.
self.skip_to(pos_after_key)
c = self._skip_spaces_and_peek()
if c != ':':
self.error('Expected ":"')
self.skip_char()
self._parse_value()
first_item = False
def _parse_array(self):
assert self.peek() == '['
self.listener.begin_array()
self.skip_char()
first_item = True
while 1:
c = self._skip_spaces_and_peek()
if c == ']':
self.skip_char()
self.listener.end_array()
break
if not first_item:
self.expect(',')
c = self._skip_spaces_and_peek()
if c == ']':
if not self.params.allow_trailing_commas:
self.error('Trailing commas aren\'t enabled for this parser.')
self.skip_char()
self.listener.end_array()
break
self._parse_value()
first_item = False
def _parse_value(self):
c = self._skip_spaces_and_peek()
if c == '{':
self._parse_object()
elif c == '[':
self._parse_array()
else:
self._parse_scalar()
def _parse_scalar(self):
scalar_str, scalar_str_quoted, pos_after_scalar = self._parse_and_return_string(True)
self.listener.scalar(scalar_str, scalar_str_quoted)
self.skip_to(pos_after_scalar)
def _parse_and_return_string(self, allow_unquoted):
c = self._skip_spaces_and_peek()
quoted = c == '"'
if not quoted and not allow_unquoted:
self.error('Unquoted keys arn\'t allowed.')
if quoted:
return self._parse_and_return_quoted_string()
return self._parse_and_return_unquoted_string()
def _parse_and_return_unquoted_string(self):
"""
Parses a string that has no quotation marks so it doesn't
contain any special characters and we don't have to interpret
any escape sequences.
:return: (string, quoted=False, end_of_string_pos)
"""
begin = self.pos
for end in my_xrange(self.pos, self.end):
if self.text[end] in self.spaces_and_special_chars:
break
else:
end = self.end
if begin == end:
self.error('Expected a scalar here.')
return self.text[begin:end], False, end
def _handle_unicode_escape(self, pos):
if self.end - pos < 5:
self.error('Reached the end of stream while parsing quoted string.')
pos += 1
try:
code_point = int(self.text[pos:pos+4], 16)
except ValueError:
self.skip_to(pos - 2)
self.error('Error decoding unicode escape sequence.')
else:
pos += 4
if 0xd800 <= code_point < 0xdc00 and self.end-pos >= 6 and\
self.text[pos] == '\\' and self.text[pos+1] == 'u':
try:
low_surrogate = int(self.text[pos+2:pos+6], 16)
except ValueError:
self.skip_to(pos)
self.error('Error decoding unicode escape sequence.')
else:
if 0xdc00 <= low_surrogate < 0xe000:
pos += 6
code_point = 0x10000 + (((code_point - 0xd800) << 10) |
(low_surrogate - 0xdc00))
return code_point, pos
def _handle_escape(self, pos, c):
char = {
'\\': '\\',
'/': '/',
'"': '"',
'b': '\b',
'f': '\f',
't': '\t',
'r': '\r',
'n': '\n',
}.get(c)
if char is None:
self.skip_to(pos - 1)
self.error('Quoted string contains an invalid escape sequence.')
return char, pos + 1
|
pasztorpisti/json-cfg
|
src/jsoncfg/functions.py
|
loads
|
python
|
def loads(s,
parser_params=JSONParserParams(),
object_builder_params=PythonObjectBuilderParams()):
parser = JSONParser(parser_params)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
|
Loads a json string as a python object hierarchy just like the standard json.loads(). Unlike
the standard json.loads() this function uses OrderedDict instances to represent json objects
but the class of the dictionary to be used is configurable.
:param s: The json string to load.
:params parser_params: Parser parameters.
:type parser_params: JSONParserParams
:param object_builder_params: Parameters to the ObjectBuilderParserListener, these parameters
are mostly factories to create the python object hierarchy while parsing.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/functions.py#L11-L27
|
[
"def parse(self, json_text, listener):\n \"\"\"\n Parses the specified json_text and emits parser events to the listener.\n If root_is_array then the root element of the json has to be an array/list,\n otherwise the expected root is a json object/dict.\n\n In case of python2 the json_text can be either an utf8 encoded string\n or a unicode object and the fired parser events will use the same format.\n \"\"\"\n listener.begin_parsing(self)\n try:\n self.init_text_parser(json_text)\n self.listener = listener\n\n c = self._skip_spaces_and_peek()\n if c == '{':\n if self.params.root_is_array:\n self.error('The root of the json is expected to be an array!')\n self._parse_object()\n elif c == '[':\n if not self.params.root_is_array:\n self.error('The root of the json is expected to be an object!')\n self._parse_array()\n else:\n self.error('The json string should start with \"%s\"' % (\n '[' if self.params.root_is_array else '{'))\n\n if self._skip_spaces_and_peek() is not None:\n self.error('Garbage detected after the parsed json!')\n finally:\n listener.end_parsing()\n"
] |
"""
Contains the load functions that we use as the public interface of this whole library.
"""
from .parser import JSONParserParams, JSONParser
from .parser_listener import ObjectBuilderParserListener
from .tree_python import PythonObjectBuilderParams, DefaultStringToScalarConverter
from .tree_config import ConfigObjectBuilderParams
from .text_encoding import load_utf_text_file
def loads_config(s,
parser_params=JSONParserParams(),
string_to_scalar_converter=DefaultStringToScalarConverter()):
"""
Works similar to the loads() function but this one returns a json object hierarchy
that wraps all json objects, arrays and scalars to provide a nice config query syntax.
For example:
my_config = loads_config(json_string)
ip_address = my_config.servers.reverse_proxy.ip_address()
port = my_config.servers.reverse_proxy.port(80)
Note that the raw unwrapped values can be fetched with the __call__ operator.
This operator has the following signature: __call__(default=None, mapper=None).
Fetching a value without specifying a default value means that the value is required
and it has to be in the config. If it isn't there then a JSONConfigValueNotFoundError
is raised. The optional mapper parameter can be a function that receives the unwrapped
value and it can return something that may be based on the input parameter. You can
also use this mapper parameter to pass a function that performs checking on the
value and raises an exception (eg. ValueError) on error.
If you specify a default value and the required config value is not present then
default is returned. In this case mapper isn't called with the default value.
"""
parser = JSONParser(parser_params)
object_builder_params = ConfigObjectBuilderParams(string_to_scalar_converter=string_to_scalar_converter)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def load(file_, *args, **kwargs):
"""
Does exactly the same as loads() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads(json_str, *args, **kwargs)
def load_config(file_, *args, **kwargs):
"""
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads_config(json_str, *args, **kwargs)
|
pasztorpisti/json-cfg
|
src/jsoncfg/functions.py
|
loads_config
|
python
|
def loads_config(s,
parser_params=JSONParserParams(),
string_to_scalar_converter=DefaultStringToScalarConverter()):
parser = JSONParser(parser_params)
object_builder_params = ConfigObjectBuilderParams(string_to_scalar_converter=string_to_scalar_converter)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
|
Works similar to the loads() function but this one returns a json object hierarchy
that wraps all json objects, arrays and scalars to provide a nice config query syntax.
For example:
my_config = loads_config(json_string)
ip_address = my_config.servers.reverse_proxy.ip_address()
port = my_config.servers.reverse_proxy.port(80)
Note that the raw unwrapped values can be fetched with the __call__ operator.
This operator has the following signature: __call__(default=None, mapper=None).
Fetching a value without specifying a default value means that the value is required
and it has to be in the config. If it isn't there then a JSONConfigValueNotFoundError
is raised. The optional mapper parameter can be a function that receives the unwrapped
value and it can return something that may be based on the input parameter. You can
also use this mapper parameter to pass a function that performs checking on the
value and raises an exception (eg. ValueError) on error.
If you specify a default value and the required config value is not present then
default is returned. In this case mapper isn't called with the default value.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/functions.py#L30-L57
|
[
"def parse(self, json_text, listener):\n \"\"\"\n Parses the specified json_text and emits parser events to the listener.\n If root_is_array then the root element of the json has to be an array/list,\n otherwise the expected root is a json object/dict.\n\n In case of python2 the json_text can be either an utf8 encoded string\n or a unicode object and the fired parser events will use the same format.\n \"\"\"\n listener.begin_parsing(self)\n try:\n self.init_text_parser(json_text)\n self.listener = listener\n\n c = self._skip_spaces_and_peek()\n if c == '{':\n if self.params.root_is_array:\n self.error('The root of the json is expected to be an array!')\n self._parse_object()\n elif c == '[':\n if not self.params.root_is_array:\n self.error('The root of the json is expected to be an object!')\n self._parse_array()\n else:\n self.error('The json string should start with \"%s\"' % (\n '[' if self.params.root_is_array else '{'))\n\n if self._skip_spaces_and_peek() is not None:\n self.error('Garbage detected after the parsed json!')\n finally:\n listener.end_parsing()\n"
] |
"""
Contains the load functions that we use as the public interface of this whole library.
"""
from .parser import JSONParserParams, JSONParser
from .parser_listener import ObjectBuilderParserListener
from .tree_python import PythonObjectBuilderParams, DefaultStringToScalarConverter
from .tree_config import ConfigObjectBuilderParams
from .text_encoding import load_utf_text_file
def loads(s,
parser_params=JSONParserParams(),
object_builder_params=PythonObjectBuilderParams()):
"""
Loads a json string as a python object hierarchy just like the standard json.loads(). Unlike
the standard json.loads() this function uses OrderedDict instances to represent json objects
but the class of the dictionary to be used is configurable.
:param s: The json string to load.
:params parser_params: Parser parameters.
:type parser_params: JSONParserParams
:param object_builder_params: Parameters to the ObjectBuilderParserListener, these parameters
are mostly factories to create the python object hierarchy while parsing.
"""
parser = JSONParser(parser_params)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def load(file_, *args, **kwargs):
"""
Does exactly the same as loads() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads(json_str, *args, **kwargs)
def load_config(file_, *args, **kwargs):
"""
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads_config(json_str, *args, **kwargs)
|
pasztorpisti/json-cfg
|
src/jsoncfg/functions.py
|
load
|
python
|
def load(file_, *args, **kwargs):
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads(json_str, *args, **kwargs)
|
Does exactly the same as loads() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/functions.py#L60-L77
|
[
"def loads(s,\n parser_params=JSONParserParams(),\n object_builder_params=PythonObjectBuilderParams()):\n \"\"\"\n Loads a json string as a python object hierarchy just like the standard json.loads(). Unlike\n the standard json.loads() this function uses OrderedDict instances to represent json objects\n but the class of the dictionary to be used is configurable.\n :param s: The json string to load.\n :params parser_params: Parser parameters.\n :type parser_params: JSONParserParams\n :param object_builder_params: Parameters to the ObjectBuilderParserListener, these parameters\n are mostly factories to create the python object hierarchy while parsing.\n \"\"\"\n parser = JSONParser(parser_params)\n listener = ObjectBuilderParserListener(object_builder_params)\n parser.parse(s, listener)\n return listener.result\n",
"def load_utf_text_file(file_, default_encoding='UTF-8', use_utf8_strings=True):\n \"\"\"\n Loads the specified text file and tries to decode it using one of the UTF encodings.\n :param file_: The path to the loadable text file or a file-like object with a read() method.\n :param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.\n :param use_utf8_strings: Ignored in case of python3, in case of python2 the default\n value of this is True. True means that the loaded json string should be handled as a utf-8\n encoded str instead of a unicode object.\n :return: A unicode object. In case of python2 it can optionally be an str object\n containing utf-8 encoded text.\n \"\"\"\n if isinstance(file_, my_basestring):\n with open(file_, 'rb') as f:\n buf = f.read()\n else:\n buf = file_.read()\n return decode_utf_text_buffer(buf, default_encoding, use_utf8_strings)\n"
] |
"""
Contains the load functions that we use as the public interface of this whole library.
"""
from .parser import JSONParserParams, JSONParser
from .parser_listener import ObjectBuilderParserListener
from .tree_python import PythonObjectBuilderParams, DefaultStringToScalarConverter
from .tree_config import ConfigObjectBuilderParams
from .text_encoding import load_utf_text_file
def loads(s,
parser_params=JSONParserParams(),
object_builder_params=PythonObjectBuilderParams()):
"""
Loads a json string as a python object hierarchy just like the standard json.loads(). Unlike
the standard json.loads() this function uses OrderedDict instances to represent json objects
but the class of the dictionary to be used is configurable.
:param s: The json string to load.
:params parser_params: Parser parameters.
:type parser_params: JSONParserParams
:param object_builder_params: Parameters to the ObjectBuilderParserListener, these parameters
are mostly factories to create the python object hierarchy while parsing.
"""
parser = JSONParser(parser_params)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def loads_config(s,
parser_params=JSONParserParams(),
string_to_scalar_converter=DefaultStringToScalarConverter()):
"""
Works similar to the loads() function but this one returns a json object hierarchy
that wraps all json objects, arrays and scalars to provide a nice config query syntax.
For example:
my_config = loads_config(json_string)
ip_address = my_config.servers.reverse_proxy.ip_address()
port = my_config.servers.reverse_proxy.port(80)
Note that the raw unwrapped values can be fetched with the __call__ operator.
This operator has the following signature: __call__(default=None, mapper=None).
Fetching a value without specifying a default value means that the value is required
and it has to be in the config. If it isn't there then a JSONConfigValueNotFoundError
is raised. The optional mapper parameter can be a function that receives the unwrapped
value and it can return something that may be based on the input parameter. You can
also use this mapper parameter to pass a function that performs checking on the
value and raises an exception (eg. ValueError) on error.
If you specify a default value and the required config value is not present then
default is returned. In this case mapper isn't called with the default value.
"""
parser = JSONParser(parser_params)
object_builder_params = ConfigObjectBuilderParams(string_to_scalar_converter=string_to_scalar_converter)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def load_config(file_, *args, **kwargs):
"""
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads_config(json_str, *args, **kwargs)
|
pasztorpisti/json-cfg
|
src/jsoncfg/functions.py
|
load_config
|
python
|
def load_config(file_, *args, **kwargs):
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads_config(json_str, *args, **kwargs)
|
Does exactly the same as loads_config() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
|
train
|
https://github.com/pasztorpisti/json-cfg/blob/4627b14a92521ef8a39bbedaa7af8d380d406d07/src/jsoncfg/functions.py#L80-L97
|
[
"def loads_config(s,\n parser_params=JSONParserParams(),\n string_to_scalar_converter=DefaultStringToScalarConverter()):\n \"\"\"\n Works similar to the loads() function but this one returns a json object hierarchy\n that wraps all json objects, arrays and scalars to provide a nice config query syntax.\n For example:\n my_config = loads_config(json_string)\n ip_address = my_config.servers.reverse_proxy.ip_address()\n port = my_config.servers.reverse_proxy.port(80)\n\n Note that the raw unwrapped values can be fetched with the __call__ operator.\n This operator has the following signature: __call__(default=None, mapper=None).\n Fetching a value without specifying a default value means that the value is required\n and it has to be in the config. If it isn't there then a JSONConfigValueNotFoundError\n is raised. The optional mapper parameter can be a function that receives the unwrapped\n value and it can return something that may be based on the input parameter. You can\n also use this mapper parameter to pass a function that performs checking on the\n value and raises an exception (eg. ValueError) on error.\n\n If you specify a default value and the required config value is not present then\n default is returned. In this case mapper isn't called with the default value.\n \"\"\"\n parser = JSONParser(parser_params)\n object_builder_params = ConfigObjectBuilderParams(string_to_scalar_converter=string_to_scalar_converter)\n listener = ObjectBuilderParserListener(object_builder_params)\n parser.parse(s, listener)\n return listener.result\n",
"def load_utf_text_file(file_, default_encoding='UTF-8', use_utf8_strings=True):\n \"\"\"\n Loads the specified text file and tries to decode it using one of the UTF encodings.\n :param file_: The path to the loadable text file or a file-like object with a read() method.\n :param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.\n :param use_utf8_strings: Ignored in case of python3, in case of python2 the default\n value of this is True. True means that the loaded json string should be handled as a utf-8\n encoded str instead of a unicode object.\n :return: A unicode object. In case of python2 it can optionally be an str object\n containing utf-8 encoded text.\n \"\"\"\n if isinstance(file_, my_basestring):\n with open(file_, 'rb') as f:\n buf = f.read()\n else:\n buf = file_.read()\n return decode_utf_text_buffer(buf, default_encoding, use_utf8_strings)\n"
] |
"""
Contains the load functions that we use as the public interface of this whole library.
"""
from .parser import JSONParserParams, JSONParser
from .parser_listener import ObjectBuilderParserListener
from .tree_python import PythonObjectBuilderParams, DefaultStringToScalarConverter
from .tree_config import ConfigObjectBuilderParams
from .text_encoding import load_utf_text_file
def loads(s,
parser_params=JSONParserParams(),
object_builder_params=PythonObjectBuilderParams()):
"""
Loads a json string as a python object hierarchy just like the standard json.loads(). Unlike
the standard json.loads() this function uses OrderedDict instances to represent json objects
but the class of the dictionary to be used is configurable.
:param s: The json string to load.
:params parser_params: Parser parameters.
:type parser_params: JSONParserParams
:param object_builder_params: Parameters to the ObjectBuilderParserListener, these parameters
are mostly factories to create the python object hierarchy while parsing.
"""
parser = JSONParser(parser_params)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def loads_config(s,
parser_params=JSONParserParams(),
string_to_scalar_converter=DefaultStringToScalarConverter()):
"""
Works similar to the loads() function but this one returns a json object hierarchy
that wraps all json objects, arrays and scalars to provide a nice config query syntax.
For example:
my_config = loads_config(json_string)
ip_address = my_config.servers.reverse_proxy.ip_address()
port = my_config.servers.reverse_proxy.port(80)
Note that the raw unwrapped values can be fetched with the __call__ operator.
This operator has the following signature: __call__(default=None, mapper=None).
Fetching a value without specifying a default value means that the value is required
and it has to be in the config. If it isn't there then a JSONConfigValueNotFoundError
is raised. The optional mapper parameter can be a function that receives the unwrapped
value and it can return something that may be based on the input parameter. You can
also use this mapper parameter to pass a function that performs checking on the
value and raises an exception (eg. ValueError) on error.
If you specify a default value and the required config value is not present then
default is returned. In this case mapper isn't called with the default value.
"""
parser = JSONParser(parser_params)
object_builder_params = ConfigObjectBuilderParams(string_to_scalar_converter=string_to_scalar_converter)
listener = ObjectBuilderParserListener(object_builder_params)
parser.parse(s, listener)
return listener.result
def load(file_, *args, **kwargs):
"""
Does exactly the same as loads() but instead of a json string this function
receives the path to a file containing the json string or a file like object with a read()
method.
:param file_: Filename or a file like object with read() method.
:param default_encoding: The encoding to be used if the file doesn't have a BOM prefix.
Defaults to UTF-8.
:param use_utf8_strings: Ignored in case of python3, in case of python2 the default
value of this is True. True means that the loaded json string should be handled as a utf-8
encoded str instead of a unicode object.
"""
json_str = load_utf_text_file(
file_,
default_encoding=kwargs.pop('default_encoding', 'UTF-8'),
use_utf8_strings=kwargs.pop('use_utf8_strings', True),
)
return loads(json_str, *args, **kwargs)
|
Rediker-Software/doac
|
doac/handlers/bearer.py
|
BearerHandler.access_token
|
python
|
def access_token(self, value, request):
if self.validate(value, request) is not None:
return None
access_token = AccessToken.objects.for_token(value)
return access_token
|
Try to get the `AccessToken` associated with the provided token.
*The provided value must pass `BearerHandler.validate()`*
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/handlers/bearer.py#L9-L21
|
[
"def validate(self, value, request):\n \"\"\"\n Try to get the `AccessToken` associated with the given token.\n\n The return value is determined based n a few things:\n\n - If no token is provided (`value` is None), a 400 response will be returned.\n - If an invalid token is provided, a 401 response will be returned.\n - If the token provided is valid, `None` will be returned.\n \"\"\"\n\n from django.http import HttpResponseBadRequest\n from doac.http import HttpResponseUnauthorized\n\n if not value:\n response = HttpResponseBadRequest()\n response[\"WWW-Authenticate\"] = request_error_header(CredentialsNotProvided)\n\n return response\n\n try:\n access_token = AccessToken.objects.for_token(value)\n except AccessToken.DoesNotExist:\n response = HttpResponseUnauthorized()\n response[\"WWW-Authenticate\"] = request_error_header(InvalidToken)\n\n return response\n\n return None\n"
] |
class BearerHandler:
def access_token(self, value, request):
"""
Try to get the `AccessToken` associated with the provided token.
*The provided value must pass `BearerHandler.validate()`*
"""
if self.validate(value, request) is not None:
return None
access_token = AccessToken.objects.for_token(value)
return access_token
def authenticate(self, value, request):
"""
Try to get a user associated with the provided token.
*The provided value must pass `BearerHandler.validate()`*
"""
if self.validate(value, request) is not None:
return None
access_token = AccessToken.objects.for_token(value)
return access_token.user
def validate(self, value, request):
"""
Try to get the `AccessToken` associated with the given token.
The return value is determined based n a few things:
- If no token is provided (`value` is None), a 400 response will be returned.
- If an invalid token is provided, a 401 response will be returned.
- If the token provided is valid, `None` will be returned.
"""
from django.http import HttpResponseBadRequest
from doac.http import HttpResponseUnauthorized
if not value:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(CredentialsNotProvided)
return response
try:
access_token = AccessToken.objects.for_token(value)
except AccessToken.DoesNotExist:
response = HttpResponseUnauthorized()
response["WWW-Authenticate"] = request_error_header(InvalidToken)
return response
return None
|
Rediker-Software/doac
|
doac/handlers/bearer.py
|
BearerHandler.validate
|
python
|
def validate(self, value, request):
from django.http import HttpResponseBadRequest
from doac.http import HttpResponseUnauthorized
if not value:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(CredentialsNotProvided)
return response
try:
access_token = AccessToken.objects.for_token(value)
except AccessToken.DoesNotExist:
response = HttpResponseUnauthorized()
response["WWW-Authenticate"] = request_error_header(InvalidToken)
return response
return None
|
Try to get the `AccessToken` associated with the given token.
The return value is determined based n a few things:
- If no token is provided (`value` is None), a 400 response will be returned.
- If an invalid token is provided, a 401 response will be returned.
- If the token provided is valid, `None` will be returned.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/handlers/bearer.py#L37-L65
|
[
"def request_error_header(exception):\n \"\"\"\n Generates the error header for a request using a Bearer token based on a given OAuth exception.\n \"\"\"\n\n from .conf import options\n\n header = \"Bearer realm=\\\"%s\\\"\" % (options.realm, )\n\n if hasattr(exception, \"error\"):\n header = header + \", error=\\\"%s\\\"\" % (exception.error, )\n\n if hasattr(exception, \"reason\"):\n header = header + \", error_description=\\\"%s\\\"\" % (exception.reason, )\n\n return header\n"
] |
class BearerHandler:
def access_token(self, value, request):
"""
Try to get the `AccessToken` associated with the provided token.
*The provided value must pass `BearerHandler.validate()`*
"""
if self.validate(value, request) is not None:
return None
access_token = AccessToken.objects.for_token(value)
return access_token
def authenticate(self, value, request):
"""
Try to get a user associated with the provided token.
*The provided value must pass `BearerHandler.validate()`*
"""
if self.validate(value, request) is not None:
return None
access_token = AccessToken.objects.for_token(value)
return access_token.user
def validate(self, value, request):
"""
Try to get the `AccessToken` associated with the given token.
The return value is determined based n a few things:
- If no token is provided (`value` is None), a 400 response will be returned.
- If an invalid token is provided, a 401 response will be returned.
- If the token provided is valid, `None` will be returned.
"""
from django.http import HttpResponseBadRequest
from doac.http import HttpResponseUnauthorized
if not value:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(CredentialsNotProvided)
return response
try:
access_token = AccessToken.objects.for_token(value)
except AccessToken.DoesNotExist:
response = HttpResponseUnauthorized()
response["WWW-Authenticate"] = request_error_header(InvalidToken)
return response
return None
|
Rediker-Software/doac
|
doac/utils.py
|
prune_old_authorization_codes
|
python
|
def prune_old_authorization_codes():
from .compat import now
from .models import AuthorizationCode
AuthorizationCode.objects.with_expiration_before(now()).delete()
|
Removes all unused and expired authorization codes from the database.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/utils.py#L1-L9
| null |
def prune_old_authorization_codes():
"""
Removes all unused and expired authorization codes from the database.
"""
from .compat import now
from .models import AuthorizationCode
AuthorizationCode.objects.with_expiration_before(now()).delete()
def get_handler(handler_name):
"""
Imports the module for a DOAC handler based on the string representation of the module path that is provided.
"""
from .conf import options
handlers = options.handlers
for handler in handlers:
handler_path = handler.split(".")
name = handler_path[-2]
if handler_name == name:
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
return getattr(handler_module, handler_path[-1])()
return None
def request_error_header(exception):
"""
Generates the error header for a request using a Bearer token based on a given OAuth exception.
"""
from .conf import options
header = "Bearer realm=\"%s\"" % (options.realm, )
if hasattr(exception, "error"):
header = header + ", error=\"%s\"" % (exception.error, )
if hasattr(exception, "reason"):
header = header + ", error_description=\"%s\"" % (exception.reason, )
return header
def total_seconds(delta):
"""
Get the total seconds that a `datetime.timedelta` object covers. Used for returning the total
time until a token expires during the handshake process.
"""
return delta.days * 86400 + delta.seconds
|
Rediker-Software/doac
|
doac/utils.py
|
get_handler
|
python
|
def get_handler(handler_name):
from .conf import options
handlers = options.handlers
for handler in handlers:
handler_path = handler.split(".")
name = handler_path[-2]
if handler_name == name:
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
return getattr(handler_module, handler_path[-1])()
return None
|
Imports the module for a DOAC handler based on the string representation of the module path that is provided.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/utils.py#L12-L30
| null |
def prune_old_authorization_codes():
"""
Removes all unused and expired authorization codes from the database.
"""
from .compat import now
from .models import AuthorizationCode
AuthorizationCode.objects.with_expiration_before(now()).delete()
def get_handler(handler_name):
"""
Imports the module for a DOAC handler based on the string representation of the module path that is provided.
"""
from .conf import options
handlers = options.handlers
for handler in handlers:
handler_path = handler.split(".")
name = handler_path[-2]
if handler_name == name:
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
return getattr(handler_module, handler_path[-1])()
return None
def request_error_header(exception):
"""
Generates the error header for a request using a Bearer token based on a given OAuth exception.
"""
from .conf import options
header = "Bearer realm=\"%s\"" % (options.realm, )
if hasattr(exception, "error"):
header = header + ", error=\"%s\"" % (exception.error, )
if hasattr(exception, "reason"):
header = header + ", error_description=\"%s\"" % (exception.reason, )
return header
def total_seconds(delta):
"""
Get the total seconds that a `datetime.timedelta` object covers. Used for returning the total
time until a token expires during the handshake process.
"""
return delta.days * 86400 + delta.seconds
|
Rediker-Software/doac
|
doac/utils.py
|
request_error_header
|
python
|
def request_error_header(exception):
from .conf import options
header = "Bearer realm=\"%s\"" % (options.realm, )
if hasattr(exception, "error"):
header = header + ", error=\"%s\"" % (exception.error, )
if hasattr(exception, "reason"):
header = header + ", error_description=\"%s\"" % (exception.reason, )
return header
|
Generates the error header for a request using a Bearer token based on a given OAuth exception.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/utils.py#L33-L48
| null |
def prune_old_authorization_codes():
"""
Removes all unused and expired authorization codes from the database.
"""
from .compat import now
from .models import AuthorizationCode
AuthorizationCode.objects.with_expiration_before(now()).delete()
def get_handler(handler_name):
"""
Imports the module for a DOAC handler based on the string representation of the module path that is provided.
"""
from .conf import options
handlers = options.handlers
for handler in handlers:
handler_path = handler.split(".")
name = handler_path[-2]
if handler_name == name:
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
return getattr(handler_module, handler_path[-1])()
return None
def request_error_header(exception):
"""
Generates the error header for a request using a Bearer token based on a given OAuth exception.
"""
from .conf import options
header = "Bearer realm=\"%s\"" % (options.realm, )
if hasattr(exception, "error"):
header = header + ", error=\"%s\"" % (exception.error, )
if hasattr(exception, "reason"):
header = header + ", error_description=\"%s\"" % (exception.reason, )
return header
def total_seconds(delta):
"""
Get the total seconds that a `datetime.timedelta` object covers. Used for returning the total
time until a token expires during the handshake process.
"""
return delta.days * 86400 + delta.seconds
|
Rediker-Software/doac
|
doac/models.py
|
AuthorizationToken.revoke_tokens
|
python
|
def revoke_tokens(self):
self.is_active = False
self.save()
self.refresh_token.revoke_tokens()
|
Revoke the authorization token and all tokens that were generated using it.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/models.py#L145-L153
|
[
"def save(self, *args, **kwargs):\n from .compat import now\n\n if not self.token:\n self.token = self.generate_token()\n\n if not self.expires_at:\n self.expires_at = now() + options.auth_token[\"expires\"]\n\n super(AuthorizationToken, self).save(*args, **kwargs)\n"
] |
class AuthorizationToken(models.Model):
user = models.ForeignKey(user_model, related_name="authorization_tokens")
client = models.ForeignKey("Client", related_name="authorization_tokens")
token = models.CharField(
max_length=options.auth_token["length"],
blank=True,
help_text=AUTO_GENERATION_HELP_TEXT,
)
scope = models.ManyToManyField("Scope", related_name="authorization_tokens")
created_at = models.DateTimeField(auto_now_add=True)
expires_at = models.DateTimeField()
is_active = models.BooleanField(default=True)
objects = managers.AuthorizationTokenManager()
def __unicode__(self):
return self.token
def generate_refresh_token(self):
if self.is_active:
try:
temp = self.refresh_token
return None
except RefreshToken.DoesNotExist:
self.refresh_token = RefreshToken()
self.refresh_token.client = self.client
self.refresh_token.user = self.user
self.refresh_token.save()
self.refresh_token.scope = self.scope.all()
self.refresh_token.save()
self.is_active = False
self.save()
return self.refresh_token
return None
def generate_token(self):
from .compat import get_random_string
return get_random_string(options.auth_token["length"])
def save(self, *args, **kwargs):
from .compat import now
if not self.token:
self.token = self.generate_token()
if not self.expires_at:
self.expires_at = now() + options.auth_token["expires"]
super(AuthorizationToken, self).save(*args, **kwargs)
|
Rediker-Software/doac
|
doac/models.py
|
RefreshToken.revoke_tokens
|
python
|
def revoke_tokens(self):
self.is_active = False
self.save()
for access_token in self.access_tokens.all():
access_token.revoke()
|
Revokes the refresh token and all access tokens that were generated using it.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/models.py#L242-L251
|
[
"def save(self, *args, **kwargs):\n from .compat import now\n\n if not self.token:\n self.token = self.generate_token()\n\n if not self.expires_at:\n self.expires_at = now() + options.refresh_token[\"expires\"]\n\n super(RefreshToken, self).save(*args, **kwargs)\n"
] |
class RefreshToken(models.Model):
user = models.ForeignKey(user_model, related_name="refresh_tokens")
client = models.ForeignKey("Client", related_name="refresh_tokens")
authorization_token = models.OneToOneField("AuthorizationToken", related_name="refresh_token")
token = models.CharField(
max_length=options.refresh_token["length"],
blank=True,
help_text=AUTO_GENERATION_HELP_TEXT,
)
scope = models.ManyToManyField("Scope", related_name="refresh_tokens")
created_at = models.DateTimeField(auto_now_add=True)
expires_at = models.DateTimeField(blank=True, help_text=AUTO_GENERATION_HELP_TEXT)
is_active = models.BooleanField(default=True)
objects = managers.RefreshTokenManager()
def __unicode__(self):
return self.token
def generate_access_token(self):
access_token = AccessToken(client=self.client, user=self.user, refresh_token=self)
access_token.save()
access_token.scope = self.scope.all()
access_token.save()
return access_token
def generate_token(self):
from .compat import get_random_string
return get_random_string(options.refresh_token["length"])
def save(self, *args, **kwargs):
from .compat import now
if not self.token:
self.token = self.generate_token()
if not self.expires_at:
self.expires_at = now() + options.refresh_token["expires"]
super(RefreshToken, self).save(*args, **kwargs)
|
Rediker-Software/doac
|
doac/middleware.py
|
AuthenticationMiddleware.process_request
|
python
|
def process_request(self, request):
request.auth_type = None
http_authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not http_authorization:
return
auth = http_authorization.split()
self.auth_type = auth[0].lower()
self.auth_value = " ".join(auth[1:]).strip()
request.auth_type = self.auth_type
self.validate_auth_type()
if not self.handler_name:
raise Exception("There is no handler defined for this authentication type.")
self.load_handler()
response = self.handler.validate(self.auth_value, request)
if response is not None:
return response
request.access_token = self.handler.access_token(self.auth_value, request)
request.user = self.handler.authenticate(self.auth_value, request)
|
Try to authenticate the user based on any given tokens that have been provided
to the request object. This will try to detect the authentication type and assign
the detected User object to the `request.user` variable, similar to the standard
Django authentication.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/middleware.py#L8-L43
|
[
"def load_handler(self):\n \"\"\"\n Load the detected handler.\n \"\"\"\n\n handler_path = self.handler_name.split(\".\")\n\n handler_module = __import__(\".\".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))\n self.handler = getattr(handler_module, handler_path[-1])()\n",
"def validate_auth_type(self):\n \"\"\"\n Validate the detected authorization type against the list of handlers. This will return the full\n module path to the detected handler.\n \"\"\"\n\n for handler in HANDLERS:\n handler_type = handler.split(\".\")[-2]\n\n if handler_type == self.auth_type:\n self.handler_name = handler\n\n return\n\n self.handler_name = None\n"
] |
class AuthenticationMiddleware:
def process_request(self, request):
"""
Try to authenticate the user based on any given tokens that have been provided
to the request object. This will try to detect the authentication type and assign
the detected User object to the `request.user` variable, similar to the standard
Django authentication.
"""
request.auth_type = None
http_authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not http_authorization:
return
auth = http_authorization.split()
self.auth_type = auth[0].lower()
self.auth_value = " ".join(auth[1:]).strip()
request.auth_type = self.auth_type
self.validate_auth_type()
if not self.handler_name:
raise Exception("There is no handler defined for this authentication type.")
self.load_handler()
response = self.handler.validate(self.auth_value, request)
if response is not None:
return response
request.access_token = self.handler.access_token(self.auth_value, request)
request.user = self.handler.authenticate(self.auth_value, request)
def load_handler(self):
"""
Load the detected handler.
"""
handler_path = self.handler_name.split(".")
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
self.handler = getattr(handler_module, handler_path[-1])()
def validate_auth_type(self):
"""
Validate the detected authorization type against the list of handlers. This will return the full
module path to the detected handler.
"""
for handler in HANDLERS:
handler_type = handler.split(".")[-2]
if handler_type == self.auth_type:
self.handler_name = handler
return
self.handler_name = None
|
Rediker-Software/doac
|
doac/middleware.py
|
AuthenticationMiddleware.load_handler
|
python
|
def load_handler(self):
handler_path = self.handler_name.split(".")
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
self.handler = getattr(handler_module, handler_path[-1])()
|
Load the detected handler.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/middleware.py#L45-L53
| null |
class AuthenticationMiddleware:
def process_request(self, request):
"""
Try to authenticate the user based on any given tokens that have been provided
to the request object. This will try to detect the authentication type and assign
the detected User object to the `request.user` variable, similar to the standard
Django authentication.
"""
request.auth_type = None
http_authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not http_authorization:
return
auth = http_authorization.split()
self.auth_type = auth[0].lower()
self.auth_value = " ".join(auth[1:]).strip()
request.auth_type = self.auth_type
self.validate_auth_type()
if not self.handler_name:
raise Exception("There is no handler defined for this authentication type.")
self.load_handler()
response = self.handler.validate(self.auth_value, request)
if response is not None:
return response
request.access_token = self.handler.access_token(self.auth_value, request)
request.user = self.handler.authenticate(self.auth_value, request)
def load_handler(self):
"""
Load the detected handler.
"""
handler_path = self.handler_name.split(".")
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
self.handler = getattr(handler_module, handler_path[-1])()
def validate_auth_type(self):
"""
Validate the detected authorization type against the list of handlers. This will return the full
module path to the detected handler.
"""
for handler in HANDLERS:
handler_type = handler.split(".")[-2]
if handler_type == self.auth_type:
self.handler_name = handler
return
self.handler_name = None
|
Rediker-Software/doac
|
doac/middleware.py
|
AuthenticationMiddleware.validate_auth_type
|
python
|
def validate_auth_type(self):
for handler in HANDLERS:
handler_type = handler.split(".")[-2]
if handler_type == self.auth_type:
self.handler_name = handler
return
self.handler_name = None
|
Validate the detected authorization type against the list of handlers. This will return the full
module path to the detected handler.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/middleware.py#L55-L69
| null |
class AuthenticationMiddleware:
def process_request(self, request):
"""
Try to authenticate the user based on any given tokens that have been provided
to the request object. This will try to detect the authentication type and assign
the detected User object to the `request.user` variable, similar to the standard
Django authentication.
"""
request.auth_type = None
http_authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not http_authorization:
return
auth = http_authorization.split()
self.auth_type = auth[0].lower()
self.auth_value = " ".join(auth[1:]).strip()
request.auth_type = self.auth_type
self.validate_auth_type()
if not self.handler_name:
raise Exception("There is no handler defined for this authentication type.")
self.load_handler()
response = self.handler.validate(self.auth_value, request)
if response is not None:
return response
request.access_token = self.handler.access_token(self.auth_value, request)
request.user = self.handler.authenticate(self.auth_value, request)
def load_handler(self):
"""
Load the detected handler.
"""
handler_path = self.handler_name.split(".")
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
self.handler = getattr(handler_module, handler_path[-1])()
def validate_auth_type(self):
"""
Validate the detected authorization type against the list of handlers. This will return the full
module path to the detected handler.
"""
for handler in HANDLERS:
handler_type = handler.split(".")[-2]
if handler_type == self.auth_type:
self.handler_name = handler
return
self.handler_name = None
|
Rediker-Software/doac
|
doac/contrib/rest_framework/authentication.py
|
DoacAuthentication.authenticate
|
python
|
def authenticate(self, request):
from doac.middleware import AuthenticationMiddleware
try:
response = AuthenticationMiddleware().process_request(request)
except:
raise exceptions.AuthenticationFailed("Invalid handler")
if not hasattr(request, "user") or not request.user.is_authenticated():
return None
if not hasattr(request, "access_token"):
raise exceptions.AuthenticationFailed("Access token was not valid")
return request.user, request.access_token
|
Send the request through the authentication middleware that
is provided with DOAC and grab the user and token from it.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/contrib/rest_framework/authentication.py#L6-L25
|
[
"def process_request(self, request):\n \"\"\"\n Try to authenticate the user based on any given tokens that have been provided\n to the request object. This will try to detect the authentication type and assign\n the detected User object to the `request.user` variable, similar to the standard\n Django authentication.\n \"\"\"\n\n request.auth_type = None\n\n http_authorization = request.META.get(\"HTTP_AUTHORIZATION\", None)\n\n if not http_authorization:\n return\n\n auth = http_authorization.split()\n\n self.auth_type = auth[0].lower()\n self.auth_value = \" \".join(auth[1:]).strip()\n\n request.auth_type = self.auth_type\n\n self.validate_auth_type()\n\n if not self.handler_name:\n raise Exception(\"There is no handler defined for this authentication type.\")\n\n self.load_handler()\n\n response = self.handler.validate(self.auth_value, request)\n\n if response is not None:\n return response\n\n request.access_token = self.handler.access_token(self.auth_value, request)\n request.user = self.handler.authenticate(self.auth_value, request)\n"
] |
class DoacAuthentication(authentication.BaseAuthentication):
def authenticate_header(self, request):
"""
DOAC specifies the realm as Bearer by default.
"""
from doac.conf import options
return 'Bearer realm="%s"' % options.realm
|
Rediker-Software/doac
|
doac/decorators.py
|
scope_required
|
python
|
def scope_required(*scopes):
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
from django.http import HttpResponseBadRequest, HttpResponseForbidden
from .exceptions.base import InvalidRequest, InsufficientScope
from .models import Scope
from .utils import request_error_header
try:
if not hasattr(request, "access_token"):
raise CredentialsNotProvided()
access_token = request.access_token
for scope_name in scopes:
try:
scope = access_token.scope.for_short_name(scope_name)
except Scope.DoesNotExist:
raise ScopeNotEnough()
except InvalidRequest as e:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(e)
return response
except InsufficientScope as e:
response = HttpResponseForbidden()
response["WWW-Authenticate"] = request_error_header(e)
return response
return view_func(request, *args, **kwargs)
return _wrapped_view
if scopes and hasattr(scopes[0], "__call__"):
func = scopes[0]
scopes = scopes[1:]
return decorator(func)
return decorator
|
Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/decorators.py#L7-L77
|
[
"def decorator(view_func):\n\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n from django.http import HttpResponseBadRequest, HttpResponseForbidden\n from .exceptions.base import InvalidRequest, InsufficientScope\n from .models import Scope\n from .utils import request_error_header\n\n try:\n if not hasattr(request, \"access_token\"):\n raise CredentialsNotProvided()\n\n access_token = request.access_token\n\n for scope_name in scopes:\n try:\n scope = access_token.scope.for_short_name(scope_name)\n except Scope.DoesNotExist:\n raise ScopeNotEnough()\n except InvalidRequest as e:\n response = HttpResponseBadRequest()\n response[\"WWW-Authenticate\"] = request_error_header(e)\n\n return response\n except InsufficientScope as e:\n response = HttpResponseForbidden()\n response[\"WWW-Authenticate\"] = request_error_header(e)\n\n return response\n\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view\n"
] |
from django.utils.decorators import available_attrs
from functools import wraps
from .exceptions.invalid_request import CredentialsNotProvided
from .exceptions.insufficient_scope import ScopeNotEnough
def scope_required(*scopes):
"""
Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
from django.http import HttpResponseBadRequest, HttpResponseForbidden
from .exceptions.base import InvalidRequest, InsufficientScope
from .models import Scope
from .utils import request_error_header
try:
if not hasattr(request, "access_token"):
raise CredentialsNotProvided()
access_token = request.access_token
for scope_name in scopes:
try:
scope = access_token.scope.for_short_name(scope_name)
except Scope.DoesNotExist:
raise ScopeNotEnough()
except InvalidRequest as e:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(e)
return response
except InsufficientScope as e:
response = HttpResponseForbidden()
response["WWW-Authenticate"] = request_error_header(e)
return response
return view_func(request, *args, **kwargs)
return _wrapped_view
if scopes and hasattr(scopes[0], "__call__"):
func = scopes[0]
scopes = scopes[1:]
return decorator(func)
return decorator
|
Rediker-Software/doac
|
doac/views.py
|
OAuthView.handle_exception
|
python
|
def handle_exception(self, exception):
can_redirect = getattr(exception, "can_redirect", True)
redirect_uri = getattr(self, "redirect_uri", None)
if can_redirect and redirect_uri:
return self.redirect_exception(exception)
else:
return self.render_exception(exception)
|
Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/views.py#L22-L38
| null |
class OAuthView(View):
"""
All views must subclass this class.
This provides common methods which are needed for validation and processing OAuth
requests and responses.
"""
def redirect_exception(self, exception):
"""
Build the query string for the exception and return a redirect to the
redirect uri that was associated with the request.
"""
from django.http import QueryDict, HttpResponseRedirect
query = QueryDict("").copy()
query["error"] = exception.error
query["error_description"] = exception.reason
query["state"] = self.state
return HttpResponseRedirect(self.redirect_uri.url + "?" + query.urlencode())
def render_exception(self, exception):
"""
Return a 401 response with the body being the reason for the exception.
"""
from .http import HttpResponseUnauthorized
return HttpResponseUnauthorized(exception.reason)
def render_exception_js(self, exception):
"""
Return a response with the body containing a JSON-formatter version of the exception.
"""
from .http import JsonResponse
response = {}
response["error"] = exception.error
response["error_description"] = exception.reason
return JsonResponse(response, status=getattr(exception, 'code', 400))
def verify_dictionary(self, dict, *args):
"""
Based on a provided `dict`, validate all of the contents of that dictionary that are
provided.
For each argument provided that isn't the dictionary, this will set the raw value of
that key as the instance variable of the same name. It will then call the verification
function named `verify_[argument]` to verify the data.
"""
for arg in args:
setattr(self, arg, dict.get(arg, None))
if hasattr(self, "verify_" + arg):
func = getattr(self, "verify_" + arg)
func()
def verify_client_id(self):
"""
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
"""
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
# Catching also ValueError for the case when client_id doesn't contain an integer.
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided()
def verify_redirect_uri(self):
from urlparse import urlparse
from .models import RedirectUri
from .exceptions.invalid_request import RedirectUriDoesNotValidate, RedirectUriNotProvided
PARSE_MATCH_ATTRIBUTES = ("scheme", "hostname", "port", )
if self.redirect_uri:
client_host = self.client.access_host
client_parse = urlparse(client_host)
redirect_parse = urlparse(self.redirect_uri)
for attribute in PARSE_MATCH_ATTRIBUTES:
client_attribute = getattr(client_parse, attribute)
redirect_attribute = getattr(redirect_parse, attribute)
if not client_attribute == redirect_attribute:
raise RedirectUriDoesNotValidate()
try:
self.redirect_uri = RedirectUri.objects.with_client(self.client).for_url(self.redirect_uri)
except RedirectUri.DoesNotExist:
raise RedirectUriDoesNotValidate()
else:
raise RedirectUriNotProvided()
|
Rediker-Software/doac
|
doac/views.py
|
OAuthView.redirect_exception
|
python
|
def redirect_exception(self, exception):
from django.http import QueryDict, HttpResponseRedirect
query = QueryDict("").copy()
query["error"] = exception.error
query["error_description"] = exception.reason
query["state"] = self.state
return HttpResponseRedirect(self.redirect_uri.url + "?" + query.urlencode())
|
Build the query string for the exception and return a redirect to the
redirect uri that was associated with the request.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/views.py#L40-L53
| null |
class OAuthView(View):
"""
All views must subclass this class.
This provides common methods which are needed for validation and processing OAuth
requests and responses.
"""
def handle_exception(self, exception):
"""
Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request.
"""
can_redirect = getattr(exception, "can_redirect", True)
redirect_uri = getattr(self, "redirect_uri", None)
if can_redirect and redirect_uri:
return self.redirect_exception(exception)
else:
return self.render_exception(exception)
def render_exception(self, exception):
"""
Return a 401 response with the body being the reason for the exception.
"""
from .http import HttpResponseUnauthorized
return HttpResponseUnauthorized(exception.reason)
def render_exception_js(self, exception):
"""
Return a response with the body containing a JSON-formatter version of the exception.
"""
from .http import JsonResponse
response = {}
response["error"] = exception.error
response["error_description"] = exception.reason
return JsonResponse(response, status=getattr(exception, 'code', 400))
def verify_dictionary(self, dict, *args):
"""
Based on a provided `dict`, validate all of the contents of that dictionary that are
provided.
For each argument provided that isn't the dictionary, this will set the raw value of
that key as the instance variable of the same name. It will then call the verification
function named `verify_[argument]` to verify the data.
"""
for arg in args:
setattr(self, arg, dict.get(arg, None))
if hasattr(self, "verify_" + arg):
func = getattr(self, "verify_" + arg)
func()
def verify_client_id(self):
"""
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
"""
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
# Catching also ValueError for the case when client_id doesn't contain an integer.
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided()
def verify_redirect_uri(self):
from urlparse import urlparse
from .models import RedirectUri
from .exceptions.invalid_request import RedirectUriDoesNotValidate, RedirectUriNotProvided
PARSE_MATCH_ATTRIBUTES = ("scheme", "hostname", "port", )
if self.redirect_uri:
client_host = self.client.access_host
client_parse = urlparse(client_host)
redirect_parse = urlparse(self.redirect_uri)
for attribute in PARSE_MATCH_ATTRIBUTES:
client_attribute = getattr(client_parse, attribute)
redirect_attribute = getattr(redirect_parse, attribute)
if not client_attribute == redirect_attribute:
raise RedirectUriDoesNotValidate()
try:
self.redirect_uri = RedirectUri.objects.with_client(self.client).for_url(self.redirect_uri)
except RedirectUri.DoesNotExist:
raise RedirectUriDoesNotValidate()
else:
raise RedirectUriNotProvided()
|
Rediker-Software/doac
|
doac/views.py
|
OAuthView.render_exception_js
|
python
|
def render_exception_js(self, exception):
from .http import JsonResponse
response = {}
response["error"] = exception.error
response["error_description"] = exception.reason
return JsonResponse(response, status=getattr(exception, 'code', 400))
|
Return a response with the body containing a JSON-formatter version of the exception.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/views.py#L64-L75
| null |
class OAuthView(View):
"""
All views must subclass this class.
This provides common methods which are needed for validation and processing OAuth
requests and responses.
"""
def handle_exception(self, exception):
"""
Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request.
"""
can_redirect = getattr(exception, "can_redirect", True)
redirect_uri = getattr(self, "redirect_uri", None)
if can_redirect and redirect_uri:
return self.redirect_exception(exception)
else:
return self.render_exception(exception)
def redirect_exception(self, exception):
"""
Build the query string for the exception and return a redirect to the
redirect uri that was associated with the request.
"""
from django.http import QueryDict, HttpResponseRedirect
query = QueryDict("").copy()
query["error"] = exception.error
query["error_description"] = exception.reason
query["state"] = self.state
return HttpResponseRedirect(self.redirect_uri.url + "?" + query.urlencode())
def render_exception(self, exception):
"""
Return a 401 response with the body being the reason for the exception.
"""
from .http import HttpResponseUnauthorized
return HttpResponseUnauthorized(exception.reason)
def verify_dictionary(self, dict, *args):
"""
Based on a provided `dict`, validate all of the contents of that dictionary that are
provided.
For each argument provided that isn't the dictionary, this will set the raw value of
that key as the instance variable of the same name. It will then call the verification
function named `verify_[argument]` to verify the data.
"""
for arg in args:
setattr(self, arg, dict.get(arg, None))
if hasattr(self, "verify_" + arg):
func = getattr(self, "verify_" + arg)
func()
def verify_client_id(self):
"""
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
"""
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
# Catching also ValueError for the case when client_id doesn't contain an integer.
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided()
def verify_redirect_uri(self):
from urlparse import urlparse
from .models import RedirectUri
from .exceptions.invalid_request import RedirectUriDoesNotValidate, RedirectUriNotProvided
PARSE_MATCH_ATTRIBUTES = ("scheme", "hostname", "port", )
if self.redirect_uri:
client_host = self.client.access_host
client_parse = urlparse(client_host)
redirect_parse = urlparse(self.redirect_uri)
for attribute in PARSE_MATCH_ATTRIBUTES:
client_attribute = getattr(client_parse, attribute)
redirect_attribute = getattr(redirect_parse, attribute)
if not client_attribute == redirect_attribute:
raise RedirectUriDoesNotValidate()
try:
self.redirect_uri = RedirectUri.objects.with_client(self.client).for_url(self.redirect_uri)
except RedirectUri.DoesNotExist:
raise RedirectUriDoesNotValidate()
else:
raise RedirectUriNotProvided()
|
Rediker-Software/doac
|
doac/views.py
|
OAuthView.verify_dictionary
|
python
|
def verify_dictionary(self, dict, *args):
for arg in args:
setattr(self, arg, dict.get(arg, None))
if hasattr(self, "verify_" + arg):
func = getattr(self, "verify_" + arg)
func()
|
Based on a provided `dict`, validate all of the contents of that dictionary that are
provided.
For each argument provided that isn't the dictionary, this will set the raw value of
that key as the instance variable of the same name. It will then call the verification
function named `verify_[argument]` to verify the data.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/views.py#L77-L92
| null |
class OAuthView(View):
"""
All views must subclass this class.
This provides common methods which are needed for validation and processing OAuth
requests and responses.
"""
def handle_exception(self, exception):
"""
Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request.
"""
can_redirect = getattr(exception, "can_redirect", True)
redirect_uri = getattr(self, "redirect_uri", None)
if can_redirect and redirect_uri:
return self.redirect_exception(exception)
else:
return self.render_exception(exception)
def redirect_exception(self, exception):
"""
Build the query string for the exception and return a redirect to the
redirect uri that was associated with the request.
"""
from django.http import QueryDict, HttpResponseRedirect
query = QueryDict("").copy()
query["error"] = exception.error
query["error_description"] = exception.reason
query["state"] = self.state
return HttpResponseRedirect(self.redirect_uri.url + "?" + query.urlencode())
def render_exception(self, exception):
"""
Return a 401 response with the body being the reason for the exception.
"""
from .http import HttpResponseUnauthorized
return HttpResponseUnauthorized(exception.reason)
def render_exception_js(self, exception):
"""
Return a response with the body containing a JSON-formatter version of the exception.
"""
from .http import JsonResponse
response = {}
response["error"] = exception.error
response["error_description"] = exception.reason
return JsonResponse(response, status=getattr(exception, 'code', 400))
def verify_client_id(self):
"""
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
"""
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
# Catching also ValueError for the case when client_id doesn't contain an integer.
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided()
def verify_redirect_uri(self):
from urlparse import urlparse
from .models import RedirectUri
from .exceptions.invalid_request import RedirectUriDoesNotValidate, RedirectUriNotProvided
PARSE_MATCH_ATTRIBUTES = ("scheme", "hostname", "port", )
if self.redirect_uri:
client_host = self.client.access_host
client_parse = urlparse(client_host)
redirect_parse = urlparse(self.redirect_uri)
for attribute in PARSE_MATCH_ATTRIBUTES:
client_attribute = getattr(client_parse, attribute)
redirect_attribute = getattr(redirect_parse, attribute)
if not client_attribute == redirect_attribute:
raise RedirectUriDoesNotValidate()
try:
self.redirect_uri = RedirectUri.objects.with_client(self.client).for_url(self.redirect_uri)
except RedirectUri.DoesNotExist:
raise RedirectUriDoesNotValidate()
else:
raise RedirectUriNotProvided()
|
Rediker-Software/doac
|
doac/views.py
|
OAuthView.verify_client_id
|
python
|
def verify_client_id(self):
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
# Catching also ValueError for the case when client_id doesn't contain an integer.
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided()
|
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
|
train
|
https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/views.py#L94-L113
| null |
class OAuthView(View):
"""
All views must subclass this class.
This provides common methods which are needed for validation and processing OAuth
requests and responses.
"""
def handle_exception(self, exception):
"""
Handle a unspecified exception and return the correct method that should be used
for handling it.
If the exception has the `can_redirect` property set to False, it is
rendered to the browser. Otherwise, it will be redirected to the location
provided in the `RedirectUri` object that is associated with the request.
"""
can_redirect = getattr(exception, "can_redirect", True)
redirect_uri = getattr(self, "redirect_uri", None)
if can_redirect and redirect_uri:
return self.redirect_exception(exception)
else:
return self.render_exception(exception)
def redirect_exception(self, exception):
"""
Build the query string for the exception and return a redirect to the
redirect uri that was associated with the request.
"""
from django.http import QueryDict, HttpResponseRedirect
query = QueryDict("").copy()
query["error"] = exception.error
query["error_description"] = exception.reason
query["state"] = self.state
return HttpResponseRedirect(self.redirect_uri.url + "?" + query.urlencode())
def render_exception(self, exception):
"""
Return a 401 response with the body being the reason for the exception.
"""
from .http import HttpResponseUnauthorized
return HttpResponseUnauthorized(exception.reason)
def render_exception_js(self, exception):
"""
Return a response with the body containing a JSON-formatter version of the exception.
"""
from .http import JsonResponse
response = {}
response["error"] = exception.error
response["error_description"] = exception.reason
return JsonResponse(response, status=getattr(exception, 'code', 400))
def verify_dictionary(self, dict, *args):
"""
Based on a provided `dict`, validate all of the contents of that dictionary that are
provided.
For each argument provided that isn't the dictionary, this will set the raw value of
that key as the instance variable of the same name. It will then call the verification
function named `verify_[argument]` to verify the data.
"""
for arg in args:
setattr(self, arg, dict.get(arg, None))
if hasattr(self, "verify_" + arg):
func = getattr(self, "verify_" + arg)
func()
def verify_redirect_uri(self):
from urlparse import urlparse
from .models import RedirectUri
from .exceptions.invalid_request import RedirectUriDoesNotValidate, RedirectUriNotProvided
PARSE_MATCH_ATTRIBUTES = ("scheme", "hostname", "port", )
if self.redirect_uri:
client_host = self.client.access_host
client_parse = urlparse(client_host)
redirect_parse = urlparse(self.redirect_uri)
for attribute in PARSE_MATCH_ATTRIBUTES:
client_attribute = getattr(client_parse, attribute)
redirect_attribute = getattr(redirect_parse, attribute)
if not client_attribute == redirect_attribute:
raise RedirectUriDoesNotValidate()
try:
self.redirect_uri = RedirectUri.objects.with_client(self.client).for_url(self.redirect_uri)
except RedirectUri.DoesNotExist:
raise RedirectUriDoesNotValidate()
else:
raise RedirectUriNotProvided()
|
raphaelgyory/django-rest-messaging
|
rest_messaging/compat.py
|
compat_serializer_check_is_valid
|
python
|
def compat_serializer_check_is_valid(serializer):
if DRFVLIST[0] >= 3:
serializer.is_valid(raise_exception=True)
else:
if not serializer.is_valid():
serializers.ValidationError('The serializer raises a validation error')
|
http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L23-L29
| null |
# coding=utf8
# -*- coding: utf8 -*-
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from django.conf import settings
from rest_framework import VERSION, serializers
from rest_framework.response import Response
from rest_messaging.pagination import MessagePagination
DRFVLIST = [int(x) for x in VERSION.split(".")]
def compat_serializer_method_field(method_name=None):
""" method_name changed in DRF > 3. See http://www.django-rest-framework.org/topics/3.0-announcement/#optional-argument-to-serializermethodfield. """
if DRFVLIST[0] >= 3:
return serializers.SerializerMethodField()
else:
return serializers.SerializerMethodField(method_name=method_name)
def compat_thread_serializer_set():
""" We create the Thread manually and must assign it to the serializer. DRF 3 uses serializer.instance while DRF 2 uses serializer.object """
if DRFVLIST[0] >= 3:
return "instance"
else:
return "object"
def compat_serializer_attr(serializer, obj):
"""
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj
def compat_get_request_data(request):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#request-objects """
if DRFVLIST[0] >= 3:
return request.data
else:
return request.DATA
def compat_perform_update(instance, serializer):
""" Verbatim copy of compat_perform_update mixin for DRF 2.4 compatibility. """
if DRFVLIST[0] == 2:
serializer.save()
def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data)
def compat_pagination_messages(cls):
"""
For DRF 3.1 and higher, pagination is defined at the paginator level (see http://www.django-rest-framework.org/topics/3.2-announcement/).
For DRF 3.0 and lower, it can be handled at the view level.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
setattr(cls, "pagination_class", MessagePagination)
return cls
else:
# DRF 2 pagination
setattr(cls, "paginate_by", getattr(settings, "DJANGO_REST_MESSAGING_MESSAGES_PAGE_SIZE", 30))
return cls
|
raphaelgyory/django-rest-messaging
|
rest_messaging/compat.py
|
compat_serializer_attr
|
python
|
def compat_serializer_attr(serializer, obj):
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj
|
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L40-L50
| null |
# coding=utf8
# -*- coding: utf8 -*-
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from django.conf import settings
from rest_framework import VERSION, serializers
from rest_framework.response import Response
from rest_messaging.pagination import MessagePagination
DRFVLIST = [int(x) for x in VERSION.split(".")]
def compat_serializer_method_field(method_name=None):
""" method_name changed in DRF > 3. See http://www.django-rest-framework.org/topics/3.0-announcement/#optional-argument-to-serializermethodfield. """
if DRFVLIST[0] >= 3:
return serializers.SerializerMethodField()
else:
return serializers.SerializerMethodField(method_name=method_name)
def compat_serializer_check_is_valid(serializer):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue """
if DRFVLIST[0] >= 3:
serializer.is_valid(raise_exception=True)
else:
if not serializer.is_valid():
serializers.ValidationError('The serializer raises a validation error')
def compat_thread_serializer_set():
""" We create the Thread manually and must assign it to the serializer. DRF 3 uses serializer.instance while DRF 2 uses serializer.object """
if DRFVLIST[0] >= 3:
return "instance"
else:
return "object"
def compat_get_request_data(request):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#request-objects """
if DRFVLIST[0] >= 3:
return request.data
else:
return request.DATA
def compat_perform_update(instance, serializer):
""" Verbatim copy of compat_perform_update mixin for DRF 2.4 compatibility. """
if DRFVLIST[0] == 2:
serializer.save()
def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data)
def compat_pagination_messages(cls):
"""
For DRF 3.1 and higher, pagination is defined at the paginator level (see http://www.django-rest-framework.org/topics/3.2-announcement/).
For DRF 3.0 and lower, it can be handled at the view level.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
setattr(cls, "pagination_class", MessagePagination)
return cls
else:
# DRF 2 pagination
setattr(cls, "paginate_by", getattr(settings, "DJANGO_REST_MESSAGING_MESSAGES_PAGE_SIZE", 30))
return cls
|
raphaelgyory/django-rest-messaging
|
rest_messaging/compat.py
|
compat_get_paginated_response
|
python
|
def compat_get_paginated_response(view, page):
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data)
|
get_paginated_response is unknown to DRF 3.0
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L67-L75
| null |
# coding=utf8
# -*- coding: utf8 -*-
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from django.conf import settings
from rest_framework import VERSION, serializers
from rest_framework.response import Response
from rest_messaging.pagination import MessagePagination
DRFVLIST = [int(x) for x in VERSION.split(".")]
def compat_serializer_method_field(method_name=None):
""" method_name changed in DRF > 3. See http://www.django-rest-framework.org/topics/3.0-announcement/#optional-argument-to-serializermethodfield. """
if DRFVLIST[0] >= 3:
return serializers.SerializerMethodField()
else:
return serializers.SerializerMethodField(method_name=method_name)
def compat_serializer_check_is_valid(serializer):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue """
if DRFVLIST[0] >= 3:
serializer.is_valid(raise_exception=True)
else:
if not serializer.is_valid():
serializers.ValidationError('The serializer raises a validation error')
def compat_thread_serializer_set():
""" We create the Thread manually and must assign it to the serializer. DRF 3 uses serializer.instance while DRF 2 uses serializer.object """
if DRFVLIST[0] >= 3:
return "instance"
else:
return "object"
def compat_serializer_attr(serializer, obj):
"""
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj
def compat_get_request_data(request):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#request-objects """
if DRFVLIST[0] >= 3:
return request.data
else:
return request.DATA
def compat_perform_update(instance, serializer):
""" Verbatim copy of compat_perform_update mixin for DRF 2.4 compatibility. """
if DRFVLIST[0] == 2:
serializer.save()
def compat_pagination_messages(cls):
"""
For DRF 3.1 and higher, pagination is defined at the paginator level (see http://www.django-rest-framework.org/topics/3.2-announcement/).
For DRF 3.0 and lower, it can be handled at the view level.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
setattr(cls, "pagination_class", MessagePagination)
return cls
else:
# DRF 2 pagination
setattr(cls, "paginate_by", getattr(settings, "DJANGO_REST_MESSAGING_MESSAGES_PAGE_SIZE", 30))
return cls
|
raphaelgyory/django-rest-messaging
|
rest_messaging/compat.py
|
compat_pagination_messages
|
python
|
def compat_pagination_messages(cls):
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
setattr(cls, "pagination_class", MessagePagination)
return cls
else:
# DRF 2 pagination
setattr(cls, "paginate_by", getattr(settings, "DJANGO_REST_MESSAGING_MESSAGES_PAGE_SIZE", 30))
return cls
|
For DRF 3.1 and higher, pagination is defined at the paginator level (see http://www.django-rest-framework.org/topics/3.2-announcement/).
For DRF 3.0 and lower, it can be handled at the view level.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L78-L89
| null |
# coding=utf8
# -*- coding: utf8 -*-
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from django.conf import settings
from rest_framework import VERSION, serializers
from rest_framework.response import Response
from rest_messaging.pagination import MessagePagination
DRFVLIST = [int(x) for x in VERSION.split(".")]
def compat_serializer_method_field(method_name=None):
""" method_name changed in DRF > 3. See http://www.django-rest-framework.org/topics/3.0-announcement/#optional-argument-to-serializermethodfield. """
if DRFVLIST[0] >= 3:
return serializers.SerializerMethodField()
else:
return serializers.SerializerMethodField(method_name=method_name)
def compat_serializer_check_is_valid(serializer):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue """
if DRFVLIST[0] >= 3:
serializer.is_valid(raise_exception=True)
else:
if not serializer.is_valid():
serializers.ValidationError('The serializer raises a validation error')
def compat_thread_serializer_set():
""" We create the Thread manually and must assign it to the serializer. DRF 3 uses serializer.instance while DRF 2 uses serializer.object """
if DRFVLIST[0] >= 3:
return "instance"
else:
return "object"
def compat_serializer_attr(serializer, obj):
"""
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything.
"""
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj
def compat_get_request_data(request):
""" http://www.django-rest-framework.org/topics/3.0-announcement/#request-objects """
if DRFVLIST[0] >= 3:
return request.data
else:
return request.DATA
def compat_perform_update(instance, serializer):
""" Verbatim copy of compat_perform_update mixin for DRF 2.4 compatibility. """
if DRFVLIST[0] == 2:
serializer.save()
def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data)
|
raphaelgyory/django-rest-messaging
|
rest_messaging/serializers.py
|
ThreadSerializer.get_participants
|
python
|
def get_participants(self, obj):
# we set the many to many serialization to False, because we only want it with retrieve requests
if self.callback is None:
return [participant.id for participant in obj.participants.all()]
else:
# we do not want user information
return self.callback(obj)
|
Allows to define a callback for serializing information about the user.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/serializers.py#L26-L33
| null |
class ThreadSerializer(serializers.ModelSerializer):
participants = compat_serializer_method_field("get_participants")
removable_participants_ids = compat_serializer_method_field("get_removable_participants_ids")
class Meta:
model = Thread
fields = ('id', 'name', 'participants', 'removable_participants_ids')
def __init__(self, *args, **kwargs):
# Don't pass the 'callback' arg up to the superclass
self.callback = kwargs.pop('callback', None)
# Instantiate the superclass normally
super(ThreadSerializer, self).__init__(*args, **kwargs)
def get_removable_participants_ids(self, obj):
""" Get the participants that can be removed from the thread. """
return obj.get_removable_participants_ids(self.context.get('request', None))
|
raphaelgyory/django-rest-messaging
|
rest_messaging/serializers.py
|
ComplexMessageSerializer.get_is_notification
|
python
|
def get_is_notification(self, obj):
try:
o = compat_serializer_attr(self, obj)
return o.is_notification
except Exception:
return False
|
We say if the message should trigger a notification
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/serializers.py#L57-L63
|
[
"def compat_serializer_attr(serializer, obj):\n \"\"\"\n Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.\n This is a quick solution but works without breajing anything.\n \"\"\"\n if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:\n for i in serializer.instance:\n if i.id == obj.id:\n return i\n else:\n return obj\n"
] |
class ComplexMessageSerializer(serializers.ModelSerializer):
is_notification = compat_serializer_method_field("get_is_notification")
readers = compat_serializer_method_field("get_readers")
class Meta:
model = Message
fields = ('id', 'body', 'sender', 'thread', 'sent_at', 'is_notification', 'readers')
def get_readers(self, obj):
""" Return the ids of the people who read the message instance. """
try:
o = compat_serializer_attr(self, obj)
return o.readers
except Exception:
return []
|
raphaelgyory/django-rest-messaging
|
rest_messaging/serializers.py
|
ComplexMessageSerializer.get_readers
|
python
|
def get_readers(self, obj):
try:
o = compat_serializer_attr(self, obj)
return o.readers
except Exception:
return []
|
Return the ids of the people who read the message instance.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/serializers.py#L65-L71
|
[
"def compat_serializer_attr(serializer, obj):\n \"\"\"\n Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.\n This is a quick solution but works without breajing anything.\n \"\"\"\n if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:\n for i in serializer.instance:\n if i.id == obj.id:\n return i\n else:\n return obj\n"
] |
class ComplexMessageSerializer(serializers.ModelSerializer):
is_notification = compat_serializer_method_field("get_is_notification")
readers = compat_serializer_method_field("get_readers")
class Meta:
model = Message
fields = ('id', 'body', 'sender', 'thread', 'sent_at', 'is_notification', 'readers')
def get_is_notification(self, obj):
""" We say if the message should trigger a notification """
try:
o = compat_serializer_attr(self, obj)
return o.is_notification
except Exception:
return False
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
ThreadManager.get_threads_where_participant_is_active
|
python
|
def get_threads_where_participant_is_active(self, participant_id):
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct()
|
Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L34-L44
| null |
class ThreadManager(models.Manager):
def get_threads_for_participant(self, participant_id):
""" Gets all the threads in which the current participant is or was involved. The method does not exclude threads where the participant has left. """
return Thread.objects.\
filter(participants__id=participant_id).\
distinct()
def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id)
return query.distinct()
def get_or_create_thread(self, request, name=None, *participant_ids):
"""
When a Participant posts a message to other participants without specifying an existing Thread,
we must
1. Create a new Thread if they have not yet opened the discussion.
2. If they have already opened the discussion and multiple Threads are not allowed for the same users, we must
re-attach this message to the existing thread.
3. If they have already opened the discussion and multiple Threads are allowed, we simply create a new one.
"""
# we get the current participant
# or create him if he does not exit
participant_ids = list(participant_ids)
if request.rest_messaging_participant.id not in participant_ids:
participant_ids.append(request.rest_messaging_participant.id)
# we need at least one other participant
if len(participant_ids) < 2:
raise Exception('At least two participants are required.')
if getattr(settings, "REST_MESSAGING_THREAD_UNIQUE_FOR_ACTIVE_RECIPIENTS", True) is True:
# if we limit the number of threads by active participants
# we ensure a thread is not already running
existing_threads = self.get_active_threads_involving_all_participants(*participant_ids)
if len(list(existing_threads)) > 0:
return existing_threads[0]
# we have no existing Thread or multiple Thread instances are allowed
thread = Thread.objects.create(name=name)
# we add the participants
thread.add_participants(request, *participant_ids)
# we send a signal to say the thread with participants is created
post_save.send(Thread, instance=thread, created=True, created_and_add_participants=True, request_participant_id=request.rest_messaging_participant.id)
return thread
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
ThreadManager.get_active_threads_involving_all_participants
|
python
|
def get_active_threads_involving_all_participants(self, *participant_ids):
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id)
return query.distinct()
|
Gets the threads where the specified participants are active and no one has left.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L46-L57
| null |
class ThreadManager(models.Manager):
def get_threads_for_participant(self, participant_id):
""" Gets all the threads in which the current participant is or was involved. The method does not exclude threads where the participant has left. """
return Thread.objects.\
filter(participants__id=participant_id).\
distinct()
def get_threads_where_participant_is_active(self, participant_id):
""" Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. """
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct()
def get_or_create_thread(self, request, name=None, *participant_ids):
"""
When a Participant posts a message to other participants without specifying an existing Thread,
we must
1. Create a new Thread if they have not yet opened the discussion.
2. If they have already opened the discussion and multiple Threads are not allowed for the same users, we must
re-attach this message to the existing thread.
3. If they have already opened the discussion and multiple Threads are allowed, we simply create a new one.
"""
# we get the current participant
# or create him if he does not exit
participant_ids = list(participant_ids)
if request.rest_messaging_participant.id not in participant_ids:
participant_ids.append(request.rest_messaging_participant.id)
# we need at least one other participant
if len(participant_ids) < 2:
raise Exception('At least two participants are required.')
if getattr(settings, "REST_MESSAGING_THREAD_UNIQUE_FOR_ACTIVE_RECIPIENTS", True) is True:
# if we limit the number of threads by active participants
# we ensure a thread is not already running
existing_threads = self.get_active_threads_involving_all_participants(*participant_ids)
if len(list(existing_threads)) > 0:
return existing_threads[0]
# we have no existing Thread or multiple Thread instances are allowed
thread = Thread.objects.create(name=name)
# we add the participants
thread.add_participants(request, *participant_ids)
# we send a signal to say the thread with participants is created
post_save.send(Thread, instance=thread, created=True, created_and_add_participants=True, request_participant_id=request.rest_messaging_participant.id)
return thread
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
ThreadManager.get_or_create_thread
|
python
|
def get_or_create_thread(self, request, name=None, *participant_ids):
# we get the current participant
# or create him if he does not exit
participant_ids = list(participant_ids)
if request.rest_messaging_participant.id not in participant_ids:
participant_ids.append(request.rest_messaging_participant.id)
# we need at least one other participant
if len(participant_ids) < 2:
raise Exception('At least two participants are required.')
if getattr(settings, "REST_MESSAGING_THREAD_UNIQUE_FOR_ACTIVE_RECIPIENTS", True) is True:
# if we limit the number of threads by active participants
# we ensure a thread is not already running
existing_threads = self.get_active_threads_involving_all_participants(*participant_ids)
if len(list(existing_threads)) > 0:
return existing_threads[0]
# we have no existing Thread or multiple Thread instances are allowed
thread = Thread.objects.create(name=name)
# we add the participants
thread.add_participants(request, *participant_ids)
# we send a signal to say the thread with participants is created
post_save.send(Thread, instance=thread, created=True, created_and_add_participants=True, request_participant_id=request.rest_messaging_participant.id)
return thread
|
When a Participant posts a message to other participants without specifying an existing Thread,
we must
1. Create a new Thread if they have not yet opened the discussion.
2. If they have already opened the discussion and multiple Threads are not allowed for the same users, we must
re-attach this message to the existing thread.
3. If they have already opened the discussion and multiple Threads are allowed, we simply create a new one.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L59-L96
|
[
"def get_active_threads_involving_all_participants(self, *participant_ids):\n \"\"\" Gets the threads where the specified participants are active and no one has left. \"\"\"\n\n query = Thread.objects.\\\n exclude(participation__date_left__lte=now()).\\\n annotate(count_participants=Count('participants')).\\\n filter(count_participants=len(participant_ids))\n\n for participant_id in participant_ids:\n query = query.filter(participants__id=participant_id)\n\n return query.distinct()\n"
] |
class ThreadManager(models.Manager):
def get_threads_for_participant(self, participant_id):
""" Gets all the threads in which the current participant is or was involved. The method does not exclude threads where the participant has left. """
return Thread.objects.\
filter(participants__id=participant_id).\
distinct()
def get_threads_where_participant_is_active(self, participant_id):
""" Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. """
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct()
def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id)
return query.distinct()
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
MessageManager.return_daily_messages_count
|
python
|
def return_daily_messages_count(self, sender):
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count()
|
Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L191-L194
| null |
class MessageManager(models.Manager):
def check_who_read(self, messages):
""" Check who read each message. """
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages
def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages
def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages
def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
MessageManager.check_who_read
|
python
|
def check_who_read(self, messages):
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages
|
Check who read each message.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L196-L209
| null |
class MessageManager(models.Manager):
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count()
def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages
def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages
def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
MessageManager.check_is_notification
|
python
|
def check_is_notification(self, participant_id, messages):
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages
|
Check if each message requires a notification for the specified participant.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L211-L228
| null |
class MessageManager(models.Manager):
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count()
def check_who_read(self, messages):
""" Check who read each message. """
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages
def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages
def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
MessageManager.get_lasts_messages_of_threads
|
python
|
def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages
|
Returns the last message in each thread
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L230-L250
|
[
"def check_who_read(self, messages):\n \"\"\" Check who read each message. \"\"\"\n # we get the corresponding Participation objects\n for m in messages:\n readers = []\n for p in m.thread.participation_set.all():\n if p.date_last_check is None:\n pass\n elif p.date_last_check > m.sent_at:\n # the message has been read\n readers.append(p.participant.id)\n setattr(m, \"readers\", readers)\n\n return messages\n",
"def check_is_notification(self, participant_id, messages):\n \"\"\" Check if each message requires a notification for the specified participant. \"\"\"\n try:\n # we get the last check\n last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check\n except Exception:\n # we have no notification check\n # all the messages are considered as new\n for m in messages:\n m.is_notification = True\n return messages\n\n for m in messages:\n if m.sent_at > last_check and m.sender.id != participant_id:\n setattr(m, \"is_notification\", True)\n else:\n setattr(m, \"is_notification\", False)\n return messages\n"
] |
class MessageManager(models.Manager):
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count()
def check_who_read(self, messages):
""" Check who read each message. """
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages
def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages
def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages
|
raphaelgyory/django-rest-messaging
|
rest_messaging/models.py
|
MessageManager.get_all_messages_in_thread
|
python
|
def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages
|
Returns all the messages in a thread.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/models.py#L252-L263
|
[
"def check_who_read(self, messages):\n \"\"\" Check who read each message. \"\"\"\n # we get the corresponding Participation objects\n for m in messages:\n readers = []\n for p in m.thread.participation_set.all():\n if p.date_last_check is None:\n pass\n elif p.date_last_check > m.sent_at:\n # the message has been read\n readers.append(p.participant.id)\n setattr(m, \"readers\", readers)\n\n return messages\n"
] |
class MessageManager(models.Manager):
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count()
def check_who_read(self, messages):
""" Check who read each message. """
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages
def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages
def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages
|
raphaelgyory/django-rest-messaging
|
rest_messaging/views.py
|
ThreadView.create
|
python
|
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=compat_get_request_data(request))
compat_serializer_check_is_valid(serializer)
self.perform_create(request, serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
We ensure the Thread only involves eligible participants.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/views.py#L36-L42
|
[
"def compat_serializer_check_is_valid(serializer):\n \"\"\" http://www.django-rest-framework.org/topics/3.0-announcement/#using-is_validraise_exceptiontrue \"\"\"\n if DRFVLIST[0] >= 3:\n serializer.is_valid(raise_exception=True)\n else:\n if not serializer.is_valid():\n serializers.ValidationError('The serializer raises a validation error')\n",
"def compat_get_request_data(request):\n \"\"\" http://www.django-rest-framework.org/topics/3.0-announcement/#request-objects \"\"\"\n if DRFVLIST[0] >= 3:\n return request.data\n else:\n return request.DATA\n"
] |
class ThreadView(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
The ThreadView allow us to create threads, and add/remove people to/from them.
It does not list the messages belonging to the thread.
"""
queryset = Thread.objects.all().prefetch_related('participants')
serializer_class = ThreadSerializer
permission_classes = (IsInThread,)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = ThreadSerializer(instance, callback=getattr(settings, 'REST_MESSAGING_SERIALIZE_PARTICIPANTS_CALLBACK', None), context={'request': request}) # self.get_serializer will raise an error in DRF 2.4
return Response(serializer.data)
def perform_create(self, request, serializer):
participants_ids = json.loads(compat_get_request_data(self.request).get('participants'))
thread = Thread.managers.get_or_create_thread(self.request, compat_get_request_data(self.request).get('name'), *participants_ids)
setattr(serializer, compat_thread_serializer_set(), thread)
def update(self, request, *args, **kwargs):
participants_ids = compat_get_request_data(self.request).getlist('participants', [])
if len(participants_ids) > 0:
# we warn the user he cannot update the participants here
return Response("Participant updates not allowed by this method.", status=status.HTTP_400_BAD_REQUEST)
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=compat_get_request_data(request), partial=partial)
compat_serializer_check_is_valid(serializer)
try:
self.perform_update(serializer)
except:
compat_perform_update(self, serializer)
return Response(serializer.data)
@detail_route(methods=['post'])
def add_participants(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the participants and add them
participants_ids = json.loads(compat_get_request_data(self.request).get('participants'))
thread.add_participants(request, *participants_ids)
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
@detail_route(methods=['post'])
def remove_participant(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the participant
participant_id = compat_get_request_data(self.request).get('participant')
participant = Participant.objects.get(id=participant_id)
# we remove him if thread.remove_participant allows us to
try:
thread.remove_participant(request, participant)
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['get'])
def get_removable_participants_ids(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the removable participants
removable_participants_ids = thread.get_removable_participants_ids(request)
# we remove him if thread.remove_participant allows us to
try:
return Response({'participants': removable_participants_ids})
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['post'])
def mark_thread_as_read(self, request, pk=None):
""" Pk is the pk of the Thread to which the messages belong. """
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we save the date
try:
participation = Participation.objects.get(thread=thread, participant=request.rest_messaging_participant)
participation.date_last_check = now()
participation.save()
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
|
raphaelgyory/django-rest-messaging
|
rest_messaging/views.py
|
ThreadView.mark_thread_as_read
|
python
|
def mark_thread_as_read(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we save the date
try:
participation = Participation.objects.get(thread=thread, participant=request.rest_messaging_participant)
participation.date_last_check = now()
participation.save()
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
|
Pk is the pk of the Thread to which the messages belong.
|
train
|
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/views.py#L107-L121
| null |
class ThreadView(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
The ThreadView allow us to create threads, and add/remove people to/from them.
It does not list the messages belonging to the thread.
"""
queryset = Thread.objects.all().prefetch_related('participants')
serializer_class = ThreadSerializer
permission_classes = (IsInThread,)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = ThreadSerializer(instance, callback=getattr(settings, 'REST_MESSAGING_SERIALIZE_PARTICIPANTS_CALLBACK', None), context={'request': request}) # self.get_serializer will raise an error in DRF 2.4
return Response(serializer.data)
def create(self, request, *args, **kwargs):
""" We ensure the Thread only involves eligible participants. """
serializer = self.get_serializer(data=compat_get_request_data(request))
compat_serializer_check_is_valid(serializer)
self.perform_create(request, serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, request, serializer):
participants_ids = json.loads(compat_get_request_data(self.request).get('participants'))
thread = Thread.managers.get_or_create_thread(self.request, compat_get_request_data(self.request).get('name'), *participants_ids)
setattr(serializer, compat_thread_serializer_set(), thread)
def update(self, request, *args, **kwargs):
participants_ids = compat_get_request_data(self.request).getlist('participants', [])
if len(participants_ids) > 0:
# we warn the user he cannot update the participants here
return Response("Participant updates not allowed by this method.", status=status.HTTP_400_BAD_REQUEST)
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=compat_get_request_data(request), partial=partial)
compat_serializer_check_is_valid(serializer)
try:
self.perform_update(serializer)
except:
compat_perform_update(self, serializer)
return Response(serializer.data)
@detail_route(methods=['post'])
def add_participants(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the participants and add them
participants_ids = json.loads(compat_get_request_data(self.request).get('participants'))
thread.add_participants(request, *participants_ids)
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
@detail_route(methods=['post'])
def remove_participant(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the participant
participant_id = compat_get_request_data(self.request).get('participant')
participant = Participant.objects.get(id=participant_id)
# we remove him if thread.remove_participant allows us to
try:
thread.remove_participant(request, participant)
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['get'])
def get_removable_participants_ids(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the removable participants
removable_participants_ids = thread.get_removable_participants_ids(request)
# we remove him if thread.remove_participant allows us to
try:
return Response({'participants': removable_participants_ids})
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['post'])
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
serialize
|
python
|
def serialize(obj):
from datetime import datetime, date, time
if isinstance(obj, date) and not isinstance(obj, datetime):
obj = datetime.combine(obj, time.min)
if isinstance(obj, datetime):
return obj.isoformat()
|
JSON serializer that accepts datetime & date
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L48-L54
| null |
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from __future__ import unicode_literals, print_function, absolute_import
from collections import namedtuple
from itertools import islice
"""
Utility module for accessing the AmCAT API.
This module is designed to be used as an independent module, so you can copy
this file into your project. For that reason, this module is also licensed
under the GNU Lesser GPL rather than the Affero GPL, so feel free to use it
in non-GPL programs.
"""
import re
import requests
import json
import logging
import os
import os.path
import csv
import itertools
import tempfile
from six import string_types
log = logging.getLogger(__name__)
Version = namedtuple("Version", ["major", "minor", "build"])
class URL:
articlesets = 'projects/{project}/articlesets/'
articleset = articlesets + '{articleset}/'
article = articleset + 'articles/'
search = 'search'
get_token = 'get_token'
media = 'medium'
aggregate = 'aggregate'
projectmeta = articleset + "meta"
meta = "meta"
status = 'status'
AUTH_FILE = os.path.join("~", ".amcatauth")
class APIError(EnvironmentError):
def __init__(self, http_status, message, url, response, description=None, details=None):
super(APIError, self).__init__(http_status, message, url)
self.http_status = http_status
self.url = url
self.response = response
self.description = description
self.details = details
def __str__(self):
return "{parent}: {description}; {details}".format(
parent=super(APIError, self).__str__(), **self.__dict__
)
class Unauthorized(APIError):
pass
def _APIError(http_status, *args, **kargs):
cls = Unauthorized if http_status == 401 else APIError
return cls(http_status, *args, **kargs)
def check(response, expected_status=200, url=None):
"""
Check whether the status code of the response equals expected_status and
raise an APIError otherwise.
@param url: The url of the response (for error messages).
Defaults to response.url
@param json: if True, return r.json(), otherwise return r.text
"""
if response.status_code != expected_status:
if url is None:
url = response.url
try:
err = response.json()
except:
err = {} # force generic error
if all(x in err for x in ("status", "message", "description", "details")):
raise _APIError(err["status"], err['message'], url,
err, err["description"], err["details"])
else: # generic error
suffix = ".html" if "<html" in response.text else ".txt"
msg = response.text
if len(msg) > 200:
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f:
f.write(response.text.encode("utf-8"))
msg = "{}...\n\n[snipped; full response written to {f.name}".format(msg[:100], **locals())
msg = ("Request {url!r} returned code {response.status_code},"
" expected {expected_status}. \n{msg}".format(**locals()))
raise _APIError(response.status_code, msg, url, response.text)
if response.headers.get('Content-Type') == 'application/json':
try:
return response.json()
except:
raise Exception("Cannot decode json; text={response.text!r}"
.format(**locals()))
else:
return response.text
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("server", help="Server hostname (e.g. https://amcat.nl)")
parser.add_argument("--username", help="Username")
parser.add_argument("--password", nargs="?", help="Password (leave empty to prompt)")
action_parser = parser.add_subparsers(dest='action', title='Actions',)
p = action_parser.add_parser("get_articles")
p.add_argument('project', help="Project ID")
p.add_argument('articleset', help="Article Set ID")
p.add_argument('--page-size', nargs=1, type=int, default=100, help="Number of items per page")
p.add_argument('--columns', default='date,headline,medium', help="Columns to retrieve (e.g. headline,date)")
p.add_argument('--format', default='json', help="Format (currently only json is supported)", choices=['json'])
args = parser.parse_args()
c = AmcatAPI(args.server, args.username, args.password)
if args.action == "get_articles":
kargs = dict(page_size=args.page_size, format=args.format, columns=args.columns.split(","))
for a in c.get_articles(args.project, args.articleset, **kargs):
print(json.dumps(a))
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
check
|
python
|
def check(response, expected_status=200, url=None):
if response.status_code != expected_status:
if url is None:
url = response.url
try:
err = response.json()
except:
err = {} # force generic error
if all(x in err for x in ("status", "message", "description", "details")):
raise _APIError(err["status"], err['message'], url,
err, err["description"], err["details"])
else: # generic error
suffix = ".html" if "<html" in response.text else ".txt"
msg = response.text
if len(msg) > 200:
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f:
f.write(response.text.encode("utf-8"))
msg = "{}...\n\n[snipped; full response written to {f.name}".format(msg[:100], **locals())
msg = ("Request {url!r} returned code {response.status_code},"
" expected {expected_status}. \n{msg}".format(**locals()))
raise _APIError(response.status_code, msg, url, response.text)
if response.headers.get('Content-Type') == 'application/json':
try:
return response.json()
except:
raise Exception("Cannot decode json; text={response.text!r}"
.format(**locals()))
else:
return response.text
|
Check whether the status code of the response equals expected_status and
raise an APIError otherwise.
@param url: The url of the response (for error messages).
Defaults to response.url
@param json: if True, return r.json(), otherwise return r.text
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L94-L132
|
[
"def _APIError(http_status, *args, **kargs):\n cls = Unauthorized if http_status == 401 else APIError\n return cls(http_status, *args, **kargs)\n"
] |
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from __future__ import unicode_literals, print_function, absolute_import
from collections import namedtuple
from itertools import islice
"""
Utility module for accessing the AmCAT API.
This module is designed to be used as an independent module, so you can copy
this file into your project. For that reason, this module is also licensed
under the GNU Lesser GPL rather than the Affero GPL, so feel free to use it
in non-GPL programs.
"""
import re
import requests
import json
import logging
import os
import os.path
import csv
import itertools
import tempfile
from six import string_types
log = logging.getLogger(__name__)
Version = namedtuple("Version", ["major", "minor", "build"])
def serialize(obj):
"""JSON serializer that accepts datetime & date"""
from datetime import datetime, date, time
if isinstance(obj, date) and not isinstance(obj, datetime):
obj = datetime.combine(obj, time.min)
if isinstance(obj, datetime):
return obj.isoformat()
class URL:
articlesets = 'projects/{project}/articlesets/'
articleset = articlesets + '{articleset}/'
article = articleset + 'articles/'
search = 'search'
get_token = 'get_token'
media = 'medium'
aggregate = 'aggregate'
projectmeta = articleset + "meta"
meta = "meta"
status = 'status'
AUTH_FILE = os.path.join("~", ".amcatauth")
class APIError(EnvironmentError):
def __init__(self, http_status, message, url, response, description=None, details=None):
super(APIError, self).__init__(http_status, message, url)
self.http_status = http_status
self.url = url
self.response = response
self.description = description
self.details = details
def __str__(self):
return "{parent}: {description}; {details}".format(
parent=super(APIError, self).__str__(), **self.__dict__
)
class Unauthorized(APIError):
pass
def _APIError(http_status, *args, **kargs):
cls = Unauthorized if http_status == 401 else APIError
return cls(http_status, *args, **kargs)
def check(response, expected_status=200, url=None):
"""
Check whether the status code of the response equals expected_status and
raise an APIError otherwise.
@param url: The url of the response (for error messages).
Defaults to response.url
@param json: if True, return r.json(), otherwise return r.text
"""
if response.status_code != expected_status:
if url is None:
url = response.url
try:
err = response.json()
except:
err = {} # force generic error
if all(x in err for x in ("status", "message", "description", "details")):
raise _APIError(err["status"], err['message'], url,
err, err["description"], err["details"])
else: # generic error
suffix = ".html" if "<html" in response.text else ".txt"
msg = response.text
if len(msg) > 200:
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f:
f.write(response.text.encode("utf-8"))
msg = "{}...\n\n[snipped; full response written to {f.name}".format(msg[:100], **locals())
msg = ("Request {url!r} returned code {response.status_code},"
" expected {expected_status}. \n{msg}".format(**locals()))
raise _APIError(response.status_code, msg, url, response.text)
if response.headers.get('Content-Type') == 'application/json':
try:
return response.json()
except:
raise Exception("Cannot decode json; text={response.text!r}"
.format(**locals()))
else:
return response.text
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("server", help="Server hostname (e.g. https://amcat.nl)")
parser.add_argument("--username", help="Username")
parser.add_argument("--password", nargs="?", help="Password (leave empty to prompt)")
action_parser = parser.add_subparsers(dest='action', title='Actions',)
p = action_parser.add_parser("get_articles")
p.add_argument('project', help="Project ID")
p.add_argument('articleset', help="Article Set ID")
p.add_argument('--page-size', nargs=1, type=int, default=100, help="Number of items per page")
p.add_argument('--columns', default='date,headline,medium', help="Columns to retrieve (e.g. headline,date)")
p.add_argument('--format', default='json', help="Format (currently only json is supported)", choices=['json'])
args = parser.parse_args()
c = AmcatAPI(args.server, args.username, args.password)
if args.action == "get_articles":
kargs = dict(page_size=args.page_size, format=args.format, columns=args.columns.split(","))
for a in c.get_articles(args.project, args.articleset, **kargs):
print(json.dumps(a))
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI._get_auth
|
python
|
def _get_auth(self, user=None, password=None):
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
|
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L170-L195
| null |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.request
|
python
|
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
|
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L211-L250
|
[
"def check(response, expected_status=200, url=None):\n \"\"\"\n Check whether the status code of the response equals expected_status and\n raise an APIError otherwise.\n @param url: The url of the response (for error messages).\n Defaults to response.url\n @param json: if True, return r.json(), otherwise return r.text\n \"\"\"\n if response.status_code != expected_status:\n if url is None:\n url = response.url\n\n try:\n err = response.json()\n except:\n err = {} # force generic error\n\n if all(x in err for x in (\"status\", \"message\", \"description\", \"details\")):\n raise _APIError(err[\"status\"], err['message'], url,\n err, err[\"description\"], err[\"details\"])\n else: # generic error\n suffix = \".html\" if \"<html\" in response.text else \".txt\"\n msg = response.text\n if len(msg) > 200:\n with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f:\n f.write(response.text.encode(\"utf-8\"))\n msg = \"{}...\\n\\n[snipped; full response written to {f.name}\".format(msg[:100], **locals())\n\n msg = (\"Request {url!r} returned code {response.status_code},\"\n \" expected {expected_status}. \\n{msg}\".format(**locals()))\n raise _APIError(response.status_code, msg, url, response.text)\n if response.headers.get('Content-Type') == 'application/json':\n try:\n return response.json()\n except:\n raise Exception(\"Cannot decode json; text={response.text!r}\"\n .format(**locals()))\n else:\n return response.text\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.get_pages
|
python
|
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
|
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L254-L275
|
[
"def request(self, url, method=\"get\", format=\"json\", data=None,\n expected_status=None, headers=None, use_xpost=True, **options):\n \"\"\"\n Make an HTTP request to the given relative URL with the host,\n user, and password information. Returns the deserialized json\n if successful, and raises an exception otherwise\n \"\"\"\n if expected_status is None:\n if method == \"get\":\n expected_status = 200\n elif method == \"post\":\n expected_status = 201\n else:\n raise ValueError(\"No expected status supplied and method unknown.\")\n\n if not url.startswith(\"http\"):\n url = \"{self.host}/api/v4/{url}\".format(**locals())\n if format is not None:\n options = dict({'format': format}, **options)\n options = {field: value for field, value in options.items() if value is not None}\n headers = dict(headers or {}, Authorization=\"Token {}\".format(self.token))\n #headers['Accept-encoding'] = 'gzip'\n\n if method == \"get\" and use_xpost:\n # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our\n # query via POST. This allows for a large number of parameters to be supplied\n assert(data is None)\n\n headers.update({\"X-HTTP-METHOD-OVERRIDE\": method})\n data = options\n options = None\n method = \"post\"\n\n r = requests.request(method, url, data=data, params=options, headers=headers)\n\n log.debug(\n \"HTTP {method} {url} (options={options!r}, data={data!r},\"\n \"headers={headers}) -> {r.status_code}\".format(**locals())\n )\n return check(r, expected_status=expected_status)\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.get_scroll
|
python
|
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
|
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L277-L301
|
[
"def request(self, url, method=\"get\", format=\"json\", data=None,\n expected_status=None, headers=None, use_xpost=True, **options):\n \"\"\"\n Make an HTTP request to the given relative URL with the host,\n user, and password information. Returns the deserialized json\n if successful, and raises an exception otherwise\n \"\"\"\n if expected_status is None:\n if method == \"get\":\n expected_status = 200\n elif method == \"post\":\n expected_status = 201\n else:\n raise ValueError(\"No expected status supplied and method unknown.\")\n\n if not url.startswith(\"http\"):\n url = \"{self.host}/api/v4/{url}\".format(**locals())\n if format is not None:\n options = dict({'format': format}, **options)\n options = {field: value for field, value in options.items() if value is not None}\n headers = dict(headers or {}, Authorization=\"Token {}\".format(self.token))\n #headers['Accept-encoding'] = 'gzip'\n\n if method == \"get\" and use_xpost:\n # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our\n # query via POST. This allows for a large number of parameters to be supplied\n assert(data is None)\n\n headers.update({\"X-HTTP-METHOD-OVERRIDE\": method})\n data = options\n options = None\n method = \"post\"\n\n r = requests.request(method, url, data=data, params=options, headers=headers)\n\n log.debug(\n \"HTTP {method} {url} (options={options!r}, data={data!r},\"\n \"headers={headers}) -> {r.status_code}\".format(**locals())\n )\n return check(r, expected_status=expected_status)\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.get_status
|
python
|
def get_status(self):
url = URL.status.format(**locals())
return self.get_request(url)
|
Get the AmCAT status page
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L303-L306
| null |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.aggregate
|
python
|
def aggregate(self, **filters):
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
|
Conduct an aggregate query
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L308-L311
|
[
"def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):\n \"\"\"\n Get all pages at url, yielding individual results\n :param url: the url to fetch\n :param page: start from this page\n :param page_size: results per page\n :param yield_pages: yield whole pages rather than individual results\n :param filters: additional filters\n :return: a generator of objects (dicts) from the API\n \"\"\"\n n = 0\n for page in itertools.count(page):\n r = self.request(url, page=page, page_size=page_size, **filters)\n n += len(r['results'])\n log.debug(\"Got {url} page {page} / {pages}\".format(url=url, **r))\n if yield_pages:\n yield r\n else:\n for row in r['results']:\n yield row\n if r['next'] is None:\n break\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.list_sets
|
python
|
def list_sets(self, project, **filters):
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
|
List the articlesets in a project
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L313-L316
|
[
"def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):\n \"\"\"\n Get all pages at url, yielding individual results\n :param url: the url to fetch\n :param page: start from this page\n :param page_size: results per page\n :param yield_pages: yield whole pages rather than individual results\n :param filters: additional filters\n :return: a generator of objects (dicts) from the API\n \"\"\"\n n = 0\n for page in itertools.count(page):\n r = self.request(url, page=page, page_size=page_size, **filters)\n n += len(r['results'])\n log.debug(\"Got {url} page {page} / {pages}\".format(url=url, **r))\n if yield_pages:\n yield r\n else:\n for row in r['results']:\n yield row\n if r['next'] is None:\n break\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.get_set
|
python
|
def get_set(self, project, articleset, **filters):
url = URL.articleset.format(**locals())
return self.request(url, **filters)
|
List the articlesets in a project
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L318-L321
|
[
"def request(self, url, method=\"get\", format=\"json\", data=None,\n expected_status=None, headers=None, use_xpost=True, **options):\n \"\"\"\n Make an HTTP request to the given relative URL with the host,\n user, and password information. Returns the deserialized json\n if successful, and raises an exception otherwise\n \"\"\"\n if expected_status is None:\n if method == \"get\":\n expected_status = 200\n elif method == \"post\":\n expected_status = 201\n else:\n raise ValueError(\"No expected status supplied and method unknown.\")\n\n if not url.startswith(\"http\"):\n url = \"{self.host}/api/v4/{url}\".format(**locals())\n if format is not None:\n options = dict({'format': format}, **options)\n options = {field: value for field, value in options.items() if value is not None}\n headers = dict(headers or {}, Authorization=\"Token {}\".format(self.token))\n #headers['Accept-encoding'] = 'gzip'\n\n if method == \"get\" and use_xpost:\n # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our\n # query via POST. This allows for a large number of parameters to be supplied\n assert(data is None)\n\n headers.update({\"X-HTTP-METHOD-OVERRIDE\": method})\n data = options\n options = None\n method = \"post\"\n\n r = requests.request(method, url, data=data, params=options, headers=headers)\n\n log.debug(\n \"HTTP {method} {url} (options={options!r}, data={data!r},\"\n \"headers={headers}) -> {r.status_code}\".format(**locals())\n )\n return check(r, expected_status=expected_status)\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.list_articles
|
python
|
def list_articles(self, project, articleset, page=1, **filters):
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
|
List the articles in a set
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L323-L326
|
[
"def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):\n \"\"\"\n Get all pages at url, yielding individual results\n :param url: the url to fetch\n :param page: start from this page\n :param page_size: results per page\n :param yield_pages: yield whole pages rather than individual results\n :param filters: additional filters\n :return: a generator of objects (dicts) from the API\n \"\"\"\n n = 0\n for page in itertools.count(page):\n r = self.request(url, page=page, page_size=page_size, **filters)\n n += len(r['results'])\n log.debug(\"Got {url} page {page} / {pages}\".format(url=url, **r))\n if yield_pages:\n yield r\n else:\n for row in r['results']:\n yield row\n if r['next'] is None:\n break\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.create_set
|
python
|
def create_set(self, project, json_data=None, **options):
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
|
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L333-L347
|
[
"def request(self, url, method=\"get\", format=\"json\", data=None,\n expected_status=None, headers=None, use_xpost=True, **options):\n \"\"\"\n Make an HTTP request to the given relative URL with the host,\n user, and password information. Returns the deserialized json\n if successful, and raises an exception otherwise\n \"\"\"\n if expected_status is None:\n if method == \"get\":\n expected_status = 200\n elif method == \"post\":\n expected_status = 201\n else:\n raise ValueError(\"No expected status supplied and method unknown.\")\n\n if not url.startswith(\"http\"):\n url = \"{self.host}/api/v4/{url}\".format(**locals())\n if format is not None:\n options = dict({'format': format}, **options)\n options = {field: value for field, value in options.items() if value is not None}\n headers = dict(headers or {}, Authorization=\"Token {}\".format(self.token))\n #headers['Accept-encoding'] = 'gzip'\n\n if method == \"get\" and use_xpost:\n # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our\n # query via POST. This allows for a large number of parameters to be supplied\n assert(data is None)\n\n headers.update({\"X-HTTP-METHOD-OVERRIDE\": method})\n data = options\n options = None\n method = \"post\"\n\n r = requests.request(method, url, data=data, params=options, headers=headers)\n\n log.debug(\n \"HTTP {method} {url} (options={options!r}, data={data!r},\"\n \"headers={headers}) -> {r.status_code}\".format(**locals())\n )\n return check(r, expected_status=expected_status)\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_articles(self, project, articleset, json_data=None, **options):
"""
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
"""
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
amcatclient/amcatclient.py
|
AmcatAPI.create_articles
|
python
|
def create_articles(self, project, articleset, json_data=None, **options):
url = URL.article.format(**locals())
# TODO duplicated from create_set, move into requests
# (or separate post method?)
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, string_types):
json_data = json.dumps(json_data, default=serialize)
headers = {'content-type': 'application/json'}
return self.request(url, method='post', data=json_data, headers=headers)
|
Create one or more articles in the set. Provide the needed arguments
using the json_data or with key-value pairs
@param json_data: A dictionary or list of dictionaries. Each dict
can contain a 'children' attribute which
is another list of dictionaries.
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/amcatclient/amcatclient.py#L349-L367
|
[
"def request(self, url, method=\"get\", format=\"json\", data=None,\n expected_status=None, headers=None, use_xpost=True, **options):\n \"\"\"\n Make an HTTP request to the given relative URL with the host,\n user, and password information. Returns the deserialized json\n if successful, and raises an exception otherwise\n \"\"\"\n if expected_status is None:\n if method == \"get\":\n expected_status = 200\n elif method == \"post\":\n expected_status = 201\n else:\n raise ValueError(\"No expected status supplied and method unknown.\")\n\n if not url.startswith(\"http\"):\n url = \"{self.host}/api/v4/{url}\".format(**locals())\n if format is not None:\n options = dict({'format': format}, **options)\n options = {field: value for field, value in options.items() if value is not None}\n headers = dict(headers or {}, Authorization=\"Token {}\".format(self.token))\n #headers['Accept-encoding'] = 'gzip'\n\n if method == \"get\" and use_xpost:\n # If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our\n # query via POST. This allows for a large number of parameters to be supplied\n assert(data is None)\n\n headers.update({\"X-HTTP-METHOD-OVERRIDE\": method})\n data = options\n options = None\n method = \"post\"\n\n r = requests.request(method, url, data=data, params=options, headers=headers)\n\n log.debug(\n \"HTTP {method} {url} (options={options!r}, data={data!r},\"\n \"headers={headers}) -> {r.status_code}\".format(**locals())\n )\n return check(r, expected_status=expected_status)\n"
] |
class AmcatAPI(object):
def __init__(self, host, user=None, password=None, token=None):
"""
Connection to an AmCAT server.
:param host: AmCAT server address, including http(s)://
:param user: Username. If not given, taken from AMCAT_USER or USER environment
:param password: Password. If not given, taken from AMCAT_PASSWORD environment, or read from ~/.amcatauth
:param token: Token to use (requires amcat >= 3.5)
"""
self.host = host
if token:
try:
self.token, self.version = self.renew_token(token)
except APIError as e:
logging.warning("Cannot renew token (requires amcat>3.5), trying normal authentication: {e}"
.format(**locals()))
token = None
if token is None:
self.token, self.version = self.get_token(user, password)
logging.info("Connected to {self.host} (AmCAT version {self.version})".format(**locals()))
def has_version(self, major=3, minor=None):
v = self.get_version()
if v.major < major:
return False
return (minor is None) or (v.minor >= minor)
def get_version(self):
m = re.match(r"(\d+)\.(\d+)(.*)", self.version)
if not m:
raise Exception("Cannot parse version string: {self.version}".format(**locals()))
return Version(int(m.group(1)), int(m.group(2)), m.group(3))
def _get_auth(self, user=None, password=None):
"""
Get the authentication info for the current user, from
1) a ~/.amcatauth file, which should be a csv file
containing host, username, password entries
2) the AMCAT_USER (or USER) and AMCAT_PASSWORD environment variables
"""
fn = os.path.expanduser(AUTH_FILE)
if os.path.exists(fn):
for i, line in enumerate(csv.reader(open(fn))):
if len(line) != 3:
log.warning("Cannot parse line {i} in {fn}".format(**locals()))
continue
hostname, username, pwd = line
if (hostname in ("", "*", self.host)
and (user is None or username == user)):
return (username, pwd)
if user is None:
user = os.environ.get("AMCAT_USER", os.environ.get("USER"))
if password is None:
password = os.environ.get("AMCAT_PASSWORD")
if user is None or password is None:
raise Exception("No authentication info for {user}@{self.host} "
"from {fn} or AMCAT_USER / AMCAT_PASSWORD "
"variables".format(**locals()))
return user, password
def renew_token(self, token):
self.token = token
resp = self.request(URL.get_token, method='post', expected_status=200)
return resp['token'], resp['version']
def get_token(self, user=None, password=None):
if user is None or password is None:
user, password = self._get_auth()
url = "{self.host}/api/v4/{url}".format(url=URL.get_token, **locals())
r = requests.post(url, data={'username': user, 'password': password})
r.raise_for_status()
r = r.json()
return r['token'], r.get('version', '3.3 (or older)')
def request(self, url, method="get", format="json", data=None,
expected_status=None, headers=None, use_xpost=True, **options):
"""
Make an HTTP request to the given relative URL with the host,
user, and password information. Returns the deserialized json
if successful, and raises an exception otherwise
"""
if expected_status is None:
if method == "get":
expected_status = 200
elif method == "post":
expected_status = 201
else:
raise ValueError("No expected status supplied and method unknown.")
if not url.startswith("http"):
url = "{self.host}/api/v4/{url}".format(**locals())
if format is not None:
options = dict({'format': format}, **options)
options = {field: value for field, value in options.items() if value is not None}
headers = dict(headers or {}, Authorization="Token {}".format(self.token))
#headers['Accept-encoding'] = 'gzip'
if method == "get" and use_xpost:
# If method is purely GET, we can use X-HTTP-METHOD-OVERRIDE to send our
# query via POST. This allows for a large number of parameters to be supplied
assert(data is None)
headers.update({"X-HTTP-METHOD-OVERRIDE": method})
data = options
options = None
method = "post"
r = requests.request(method, url, data=data, params=options, headers=headers)
log.debug(
"HTTP {method} {url} (options={options!r}, data={data!r},"
"headers={headers}) -> {r.status_code}".format(**locals())
)
return check(r, expected_status=expected_status)
def get_pages(self, url, page=1, page_size=100, yield_pages=False, **filters):
"""
Get all pages at url, yielding individual results
:param url: the url to fetch
:param page: start from this page
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
for page in itertools.count(page):
r = self.request(url, page=page, page_size=page_size, **filters)
n += len(r['results'])
log.debug("Got {url} page {page} / {pages}".format(url=url, **r))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
def get_scroll(self, url, page_size=100, yield_pages=False, **filters):
"""
Scroll through the resource at url and yield the individual results
:param url: url to scroll through
:param page_size: results per page
:param yield_pages: yield whole pages rather than individual results
:param filters: Additional filters
:return: a generator of objects (dicts) from the API
"""
n = 0
options = dict(page_size=page_size, **filters)
format = filters.get('format')
while True:
r = self.request(url, use_xpost=False, **options)
n += len(r['results'])
log.debug("Got {} {n}/{total}".format(url.split("?")[0], total=r['total'], **locals()))
if yield_pages:
yield r
else:
for row in r['results']:
yield row
if r['next'] is None:
break
url = r['next']
options = {'format': None}
def get_status(self):
"""Get the AmCAT status page"""
url = URL.status.format(**locals())
return self.get_request(url)
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
def list_sets(self, project, **filters):
"""List the articlesets in a project"""
url = URL.articlesets.format(**locals())
return self.get_pages(url, **filters)
def get_set(self, project, articleset, **filters):
"""List the articlesets in a project"""
url = URL.articleset.format(**locals())
return self.request(url, **filters)
def list_articles(self, project, articleset, page=1, **filters):
"""List the articles in a set"""
url = URL.article.format(**locals())
return self.get_pages(url, page=page, **filters)
def get_media(self, medium_ids):
query = "&".join("pk={}".format(mid) for mid in medium_ids)
url = "{}?{}".format(URL.media, query)
return self.request(url, page_size=len(medium_ids))
def create_set(self, project, json_data=None, **options):
"""
Create a new article set. Provide the needed arguments using
post_data or with key-value pairs
"""
url = URL.articlesets.format(**locals())
if json_data is None:
# form encoded request
return self.request(url, method="post", data=options)
else:
if not isinstance(json_data, (string_types)):
json_data = json.dumps(json_data,default = serialize)
headers = {'content-type': 'application/json'}
return self.request(
url, method='post', data=json_data, headers=headers)
def get_articles(self, project, articleset=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
if self.has_version(3, 4):
url = URL.projectmeta.format(**locals())
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=",".join(columns), **options)
else:
return self.list_articles(project, articleset, page, page_size=page_size, **options)
def get_articles_by_id(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=100, **options):
url = URL.meta.format(**locals())
# we cannot use POST here, so need to limit number of ids per request
it = iter(articles)
while True:
ids = list(islice(it, page_size))
if not ids:
break
options['id'] = ids
for a in self.get_scroll(url, page_size=page_size, format=format, columns=columns, **options):
yield a
def get_articles_by_uuid(self, articles=None, format='json',
columns=['date', 'headline', 'medium'], page_size=1000, page=1, **options):
url = URL.meta.format(**locals())
options['uuid'] = articles
return self.get_scroll(url, page=page, page_size=page_size, format=format, columns=columns, **options)
def search(self, articleset, query, columns=['hits'], minimal=True, **filters):
return self.get_pages(URL.search, q=query, col=columns, minimal=minimal, sets=articleset, **filters)
|
amcat/amcatclient
|
demo_wikinews_scraper.py
|
get_pages
|
python
|
def get_pages(url):
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href'))
|
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L45-L56
| null |
#!/usr/bin/python
from __future__ import unicode_literals, print_function, absolute_import
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
# en_wikinews_org_scraper -- scrape en.wikinews.org
# 20121218 Paul Huygen
# 20140418 Wouter van Atteveldt
"""
Simple (and not terribly good) wikinews scraper
This scraper is provided as an exmample non-trivial scraper using
the AmCAT API as back-end, and to provide non-copyrighted text for examples
"""
from urlparse import urljoin
import re
import datetime
import logging
from lxml import html, etree
logging.basicConfig(level=logging.INFO)
######################################################################
### Functions specific to reading/parsing wiki news ###
######################################################################
def get_article_urls(url):
"""
Return the articles from a page
Technically, look for a div with class mw-search-result-heading
and get the first link from this div
"""
doc = html.parse(url).getroot()
for div in doc.cssselect("div.mw-search-result-heading"):
href = div.cssselect("a")[0].get('href')
if ":" in href:
continue # skip Category: links
href = urljoin(url, href)
yield href
def export_url(url):
"""
Get the 'Special:Export' XML version url of an article
"""
page = url.split("/")[-1]
return ("http://en.wikinews.org/w/index.php?title=Special:Export"
"&action=submit&pages={}".format(page))
def get_articles(urls):
for url in urls:
try:
yield get_article(url)
except:
logging.exception("Error on scraping {}".format(url))
def get_article(url):
"""
Return a single article as a 'amcat-ready' dict
Uses the 'export' function of wikinews to get an xml article
"""
a = html.parse(url).getroot()
title = a.cssselect(".firstHeading")[0].text_content()
date = a.cssselect(".published")[0].text_content()
date = datetime.datetime.strptime(date, "%A, %B %d, %Y").isoformat()
paras = a.cssselect("#mw-content-text p")
paras = paras[1:] # skip first paragraph, which contains date
text = "\n\n".join(p.text_content().strip() for p in paras)
return dict(headline=title,
date=date,
url=url,
text=text,
medium="Wikinews")
def date_of_unit(self, doc):
# find element like '<span id="publishDate" class="value-title" title="2004-11-15">'
# and extract "title".
return doc.cssselect('#publishDate')[0].get('title')
######################################################################
### AmCAT functionality: connect to API and add articles ###
######################################################################
def scrape_wikinews(conn, project, articleset, query):
"""
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
"""
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts)
if __name__ == '__main__':
from amcatclient import AmcatAPI
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('host', help='The AmCAT host to connect to, '
'e.g. http://amcat.vu.nl')
parser.add_argument('project', help='Project ID to add the articles to')
parser.add_argument('query', help='Wikinews query for scraping')
parser.add_argument('--username', help='Username for AmCAT login')
parser.add_argument('--password', help='Password for AmCAT login')
args = parser.parse_args()
conn = AmcatAPI(args.host, args.username, args.password)
category = "Iraq"
articleset = conn.create_set(project=args.project,
name="Wikinews articles for {}".format(args.query),
provenance="Scraped from wikinews on {}"
.format(datetime.datetime.now().isoformat()))
scrape_wikinews(conn, args.project, articleset['id'], args.query)
|
amcat/amcatclient
|
demo_wikinews_scraper.py
|
get_article_urls
|
python
|
def get_article_urls(url):
doc = html.parse(url).getroot()
for div in doc.cssselect("div.mw-search-result-heading"):
href = div.cssselect("a")[0].get('href')
if ":" in href:
continue # skip Category: links
href = urljoin(url, href)
yield href
|
Return the articles from a page
Technically, look for a div with class mw-search-result-heading
and get the first link from this div
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L58-L70
| null |
#!/usr/bin/python
from __future__ import unicode_literals, print_function, absolute_import
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
# en_wikinews_org_scraper -- scrape en.wikinews.org
# 20121218 Paul Huygen
# 20140418 Wouter van Atteveldt
"""
Simple (and not terribly good) wikinews scraper
This scraper is provided as an exmample non-trivial scraper using
the AmCAT API as back-end, and to provide non-copyrighted text for examples
"""
from urlparse import urljoin
import re
import datetime
import logging
from lxml import html, etree
logging.basicConfig(level=logging.INFO)
######################################################################
### Functions specific to reading/parsing wiki news ###
######################################################################
def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href'))
def export_url(url):
"""
Get the 'Special:Export' XML version url of an article
"""
page = url.split("/")[-1]
return ("http://en.wikinews.org/w/index.php?title=Special:Export"
"&action=submit&pages={}".format(page))
def get_articles(urls):
for url in urls:
try:
yield get_article(url)
except:
logging.exception("Error on scraping {}".format(url))
def get_article(url):
"""
Return a single article as a 'amcat-ready' dict
Uses the 'export' function of wikinews to get an xml article
"""
a = html.parse(url).getroot()
title = a.cssselect(".firstHeading")[0].text_content()
date = a.cssselect(".published")[0].text_content()
date = datetime.datetime.strptime(date, "%A, %B %d, %Y").isoformat()
paras = a.cssselect("#mw-content-text p")
paras = paras[1:] # skip first paragraph, which contains date
text = "\n\n".join(p.text_content().strip() for p in paras)
return dict(headline=title,
date=date,
url=url,
text=text,
medium="Wikinews")
def date_of_unit(self, doc):
# find element like '<span id="publishDate" class="value-title" title="2004-11-15">'
# and extract "title".
return doc.cssselect('#publishDate')[0].get('title')
######################################################################
### AmCAT functionality: connect to API and add articles ###
######################################################################
def scrape_wikinews(conn, project, articleset, query):
"""
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
"""
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts)
if __name__ == '__main__':
from amcatclient import AmcatAPI
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('host', help='The AmCAT host to connect to, '
'e.g. http://amcat.vu.nl')
parser.add_argument('project', help='Project ID to add the articles to')
parser.add_argument('query', help='Wikinews query for scraping')
parser.add_argument('--username', help='Username for AmCAT login')
parser.add_argument('--password', help='Password for AmCAT login')
args = parser.parse_args()
conn = AmcatAPI(args.host, args.username, args.password)
category = "Iraq"
articleset = conn.create_set(project=args.project,
name="Wikinews articles for {}".format(args.query),
provenance="Scraped from wikinews on {}"
.format(datetime.datetime.now().isoformat()))
scrape_wikinews(conn, args.project, articleset['id'], args.query)
|
amcat/amcatclient
|
demo_wikinews_scraper.py
|
get_article
|
python
|
def get_article(url):
a = html.parse(url).getroot()
title = a.cssselect(".firstHeading")[0].text_content()
date = a.cssselect(".published")[0].text_content()
date = datetime.datetime.strptime(date, "%A, %B %d, %Y").isoformat()
paras = a.cssselect("#mw-content-text p")
paras = paras[1:] # skip first paragraph, which contains date
text = "\n\n".join(p.text_content().strip() for p in paras)
return dict(headline=title,
date=date,
url=url,
text=text,
medium="Wikinews")
|
Return a single article as a 'amcat-ready' dict
Uses the 'export' function of wikinews to get an xml article
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L89-L106
| null |
#!/usr/bin/python
from __future__ import unicode_literals, print_function, absolute_import
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
# en_wikinews_org_scraper -- scrape en.wikinews.org
# 20121218 Paul Huygen
# 20140418 Wouter van Atteveldt
"""
Simple (and not terribly good) wikinews scraper
This scraper is provided as an exmample non-trivial scraper using
the AmCAT API as back-end, and to provide non-copyrighted text for examples
"""
from urlparse import urljoin
import re
import datetime
import logging
from lxml import html, etree
logging.basicConfig(level=logging.INFO)
######################################################################
### Functions specific to reading/parsing wiki news ###
######################################################################
def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href'))
def get_article_urls(url):
"""
Return the articles from a page
Technically, look for a div with class mw-search-result-heading
and get the first link from this div
"""
doc = html.parse(url).getroot()
for div in doc.cssselect("div.mw-search-result-heading"):
href = div.cssselect("a")[0].get('href')
if ":" in href:
continue # skip Category: links
href = urljoin(url, href)
yield href
def export_url(url):
"""
Get the 'Special:Export' XML version url of an article
"""
page = url.split("/")[-1]
return ("http://en.wikinews.org/w/index.php?title=Special:Export"
"&action=submit&pages={}".format(page))
def get_articles(urls):
for url in urls:
try:
yield get_article(url)
except:
logging.exception("Error on scraping {}".format(url))
def date_of_unit(self, doc):
# find element like '<span id="publishDate" class="value-title" title="2004-11-15">'
# and extract "title".
return doc.cssselect('#publishDate')[0].get('title')
######################################################################
### AmCAT functionality: connect to API and add articles ###
######################################################################
def scrape_wikinews(conn, project, articleset, query):
"""
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
"""
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts)
if __name__ == '__main__':
from amcatclient import AmcatAPI
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('host', help='The AmCAT host to connect to, '
'e.g. http://amcat.vu.nl')
parser.add_argument('project', help='Project ID to add the articles to')
parser.add_argument('query', help='Wikinews query for scraping')
parser.add_argument('--username', help='Username for AmCAT login')
parser.add_argument('--password', help='Password for AmCAT login')
args = parser.parse_args()
conn = AmcatAPI(args.host, args.username, args.password)
category = "Iraq"
articleset = conn.create_set(project=args.project,
name="Wikinews articles for {}".format(args.query),
provenance="Scraped from wikinews on {}"
.format(datetime.datetime.now().isoformat()))
scrape_wikinews(conn, args.project, articleset['id'], args.query)
|
amcat/amcatclient
|
demo_wikinews_scraper.py
|
scrape_wikinews
|
python
|
def scrape_wikinews(conn, project, articleset, query):
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts)
|
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
|
train
|
https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L118-L133
|
[
"def get_pages(url):\n \"\"\"\n Return the 'pages' from the starting url\n Technically, look for the 'next 50' link, yield and download it, repeat\n \"\"\"\n while True:\n yield url\n doc = html.parse(url).find(\"body\")\n links = [a for a in doc.findall(\".//a\") if a.text and a.text.startswith(\"next \")]\n if not links:\n break\n url = urljoin(url, links[0].get('href'))\n",
"def get_articles(urls):\n for url in urls:\n try:\n yield get_article(url)\n except:\n logging.exception(\"Error on scraping {}\".format(url))\n",
"def get_article_urls(url):\n \"\"\"\n Return the articles from a page\n Technically, look for a div with class mw-search-result-heading\n and get the first link from this div\n \"\"\"\n doc = html.parse(url).getroot()\n for div in doc.cssselect(\"div.mw-search-result-heading\"):\n href = div.cssselect(\"a\")[0].get('href')\n if \":\" in href:\n continue # skip Category: links\n href = urljoin(url, href)\n yield href\n",
"def create_articles(self, project, articleset, json_data=None, **options):\n \"\"\"\n Create one or more articles in the set. Provide the needed arguments\n using the json_data or with key-value pairs\n @param json_data: A dictionary or list of dictionaries. Each dict\n can contain a 'children' attribute which\n is another list of dictionaries.\n \"\"\"\n url = URL.article.format(**locals())\n # TODO duplicated from create_set, move into requests\n # (or separate post method?)\n if json_data is None:\n # form encoded request\n return self.request(url, method=\"post\", data=options)\n else:\n if not isinstance(json_data, string_types):\n json_data = json.dumps(json_data, default=serialize)\n headers = {'content-type': 'application/json'}\n return self.request(url, method='post', data=json_data, headers=headers)\n"
] |
#!/usr/bin/python
from __future__ import unicode_literals, print_function, absolute_import
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
# en_wikinews_org_scraper -- scrape en.wikinews.org
# 20121218 Paul Huygen
# 20140418 Wouter van Atteveldt
"""
Simple (and not terribly good) wikinews scraper
This scraper is provided as an exmample non-trivial scraper using
the AmCAT API as back-end, and to provide non-copyrighted text for examples
"""
from urlparse import urljoin
import re
import datetime
import logging
from lxml import html, etree
logging.basicConfig(level=logging.INFO)
######################################################################
### Functions specific to reading/parsing wiki news ###
######################################################################
def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href'))
def get_article_urls(url):
"""
Return the articles from a page
Technically, look for a div with class mw-search-result-heading
and get the first link from this div
"""
doc = html.parse(url).getroot()
for div in doc.cssselect("div.mw-search-result-heading"):
href = div.cssselect("a")[0].get('href')
if ":" in href:
continue # skip Category: links
href = urljoin(url, href)
yield href
def export_url(url):
"""
Get the 'Special:Export' XML version url of an article
"""
page = url.split("/")[-1]
return ("http://en.wikinews.org/w/index.php?title=Special:Export"
"&action=submit&pages={}".format(page))
def get_articles(urls):
for url in urls:
try:
yield get_article(url)
except:
logging.exception("Error on scraping {}".format(url))
def get_article(url):
"""
Return a single article as a 'amcat-ready' dict
Uses the 'export' function of wikinews to get an xml article
"""
a = html.parse(url).getroot()
title = a.cssselect(".firstHeading")[0].text_content()
date = a.cssselect(".published")[0].text_content()
date = datetime.datetime.strptime(date, "%A, %B %d, %Y").isoformat()
paras = a.cssselect("#mw-content-text p")
paras = paras[1:] # skip first paragraph, which contains date
text = "\n\n".join(p.text_content().strip() for p in paras)
return dict(headline=title,
date=date,
url=url,
text=text,
medium="Wikinews")
def date_of_unit(self, doc):
# find element like '<span id="publishDate" class="value-title" title="2004-11-15">'
# and extract "title".
return doc.cssselect('#publishDate')[0].get('title')
######################################################################
### AmCAT functionality: connect to API and add articles ###
######################################################################
if __name__ == '__main__':
from amcatclient import AmcatAPI
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('host', help='The AmCAT host to connect to, '
'e.g. http://amcat.vu.nl')
parser.add_argument('project', help='Project ID to add the articles to')
parser.add_argument('query', help='Wikinews query for scraping')
parser.add_argument('--username', help='Username for AmCAT login')
parser.add_argument('--password', help='Password for AmCAT login')
args = parser.parse_args()
conn = AmcatAPI(args.host, args.username, args.password)
category = "Iraq"
articleset = conn.create_set(project=args.project,
name="Wikinews articles for {}".format(args.query),
provenance="Scraped from wikinews on {}"
.format(datetime.datetime.now().isoformat()))
scrape_wikinews(conn, args.project, articleset['id'], args.query)
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
get_commits
|
python
|
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
|
Find all commits between two commit SHAs.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L181-L188
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
get_commit_url
|
python
|
def get_commit_url(repo_url):
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
|
Determine URL to view commits for repo.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L191-L200
| null |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_projects(osa_repo_dir, commit):
"""Get all projects from multiple YAML files."""
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
rcbops/osa_differ
|
osa_differ/osa_differ.py
|
get_projects
|
python
|
def get_projects(osa_repo_dir, commit):
# Check out the correct commit SHA from the repository
repo = Repo(osa_repo_dir)
checkout(repo, commit)
yaml_files = glob.glob(
'{0}/playbooks/defaults/repo_packages/*.yml'.format(osa_repo_dir)
)
yaml_parsed = []
for yaml_file in yaml_files:
with open(yaml_file, 'r') as f:
yaml_parsed.append(yaml.load(f))
merged_dicts = {k: v for d in yaml_parsed for k, v in d.items()}
return normalize_yaml(merged_dicts)
|
Get all projects from multiple YAML files.
|
train
|
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L203-L219
|
[
"def checkout(repo, ref):\n \"\"\"Checkout a repoself.\"\"\"\n # Delete local branch if it exists, remote branch will be tracked\n # automatically. This prevents stale local branches from causing problems.\n # It also avoids problems with appending origin/ to refs as that doesn't\n # work with tags, SHAs, and upstreams not called origin.\n if ref in repo.branches:\n # eg delete master but leave origin/master\n log.warn(\"Removing local branch {b} for repo {r}\".format(b=ref,\n r=repo))\n # Can't delete currently checked out branch, so make sure head is\n # detached before deleting.\n\n repo.head.reset(index=True, working_tree=True)\n repo.git.checkout(repo.head.commit.hexsha)\n repo.delete_head(ref, '--force')\n\n log.info(\"Checkout out repo {repo} to ref {ref}\".format(repo=repo,\n ref=ref))\n repo.head.reset(index=True, working_tree=True)\n repo.git.checkout(ref)\n repo.head.reset(index=True, working_tree=True)\n sha = repo.head.commit.hexsha\n log.info(\"Current SHA for repo {repo} is {sha}\".format(repo=repo, sha=sha))\n",
"def normalize_yaml(yaml):\n \"\"\"Normalize the YAML from project and role lookups.\n\n These are returned as a list of tuples.\n \"\"\"\n if isinstance(yaml, list):\n # Normalize the roles YAML data\n normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))\n for x in yaml]\n else:\n # Extract the project names from the roles YAML and create a list of\n # tuples.\n projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]\n normalized_yaml = []\n for project in projects:\n repo_url = yaml['{0}_git_repo'.format(project)]\n commit_sha = yaml['{0}_git_install_branch'.format(project)]\n normalized_yaml.append((project, repo_url, commit_sha))\n\n return normalized_yaml\n"
] |
#!/usr/bin/env python
# Copyright 2016, Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes the differences between two OpenStack-Ansible commits."""
import argparse
import glob
import json
import logging
import os
import re
import subprocess
import sys
from collections import defaultdict
from distutils.version import LooseVersion
from git import Repo
import jinja2
import requests
import yaml
from . import exceptions
# Configure logging
log = logging.getLogger()
log.setLevel(logging.ERROR)
class VersionMappingsAction(argparse.Action):
"""Process version-mapping argparse arguments."""
def __init__(self, option_strings, dest, **kwargs):
"""Initialise instance."""
superclass = super(VersionMappingsAction, self)
superclass.__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
"""Process version-mapping string."""
version_mappings = getattr(namespace, "version_mappings",
defaultdict(dict))
if not isinstance(version_mappings, defaultdict):
version_mappings = defaultdict(dict)
repo_name, version_mapping = values.split(";", 1)
versions = {
old: new
for old_new in version_mapping.split(";")
for old, new in [old_new.split(":")]
}
version_mappings[repo_name].update(versions)
setattr(namespace, self.dest, version_mappings)
def create_parser():
"""Create argument parser."""
description = """Generate OpenStack-Ansible Diff
----------------------------------------
Finds changes in OpenStack projects and OpenStack-Ansible roles between two
commits in OpenStack-Ansible.
"""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description=description,
epilog='Licensed "Apache 2.0"',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'old_commit',
action='store',
nargs=1,
help="Git SHA of the older commit",
)
parser.add_argument(
'new_commit',
action='store',
nargs=1,
help="Git SHA of the newer commit",
)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help="Enable info output",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="Enable debug output",
)
parser.add_argument(
'-d', '--directory',
action='store',
default="~/.osa-differ",
help="Git repo storage directory (default: ~/.osa-differ)",
)
parser.add_argument(
'-rr', '--role-requirements',
action='store',
default='ansible-role-requirements.yml',
help="Name of the ansible role requirements file to read",
)
parser.add_argument(
'-u', '--update',
action='store_true',
default=False,
help="Fetch latest changes to repo",
)
parser.add_argument(
'--osa-repo-url',
action='store',
default='https://git.openstack.org/openstack/openstack-ansible',
help="URL of the openstack-ansible git repo",
)
parser.add_argument(
'--version-mappings',
action=VersionMappingsAction,
help=(
"Map dependency versions in cases where the old version no longer "
"exists. The argument should be of the form "
"'repo-name;old-version1:new-version1;old-version2:new-version2'."
),
)
display_opts = parser.add_argument_group("Limit scope")
display_opts.add_argument(
"--skip-projects",
action="store_true",
help="Skip checking for changes in OpenStack projects"
)
display_opts.add_argument(
"--skip-roles",
action="store_true",
help="Skip checking for changes in OpenStack-Ansible roles"
)
release_note_opts = parser.add_argument_group("Release notes")
release_note_opts.add_argument(
"--release-notes",
action="store_true",
help=("Print reno release notes for OpenStack-Ansible "
"between the two commits")
)
output_desc = ("Output is printed to stdout by default.")
output_opts = parser.add_argument_group('Output options', output_desc)
output_opts.add_argument(
'--quiet',
action='store_true',
default=False,
help="Do not output to stdout",
)
output_opts.add_argument(
'--gist',
action='store_true',
default=False,
help="Output into a GitHub Gist",
)
output_opts.add_argument(
'--file',
metavar="FILENAME",
action='store',
help="Output to a file",
)
return parser
def get_commits(repo_dir, old_commit, new_commit, hide_merges=True):
"""Find all commits between two commit SHAs."""
repo = Repo(repo_dir)
commits = repo.iter_commits(rev="{0}..{1}".format(old_commit, new_commit))
if hide_merges:
return [x for x in commits if not x.summary.startswith("Merge ")]
else:
return list(commits)
def get_commit_url(repo_url):
"""Determine URL to view commits for repo."""
if "github.com" in repo_url:
return repo_url[:-4] if repo_url.endswith(".git") else repo_url
if "git.openstack.org" in repo_url:
uri = '/'.join(repo_url.split('/')[-2:])
return "https://github.com/{0}".format(uri)
# If it didn't match these conditions, just return it.
return repo_url
def checkout(repo, ref):
"""Checkout a repoself."""
# Delete local branch if it exists, remote branch will be tracked
# automatically. This prevents stale local branches from causing problems.
# It also avoids problems with appending origin/ to refs as that doesn't
# work with tags, SHAs, and upstreams not called origin.
if ref in repo.branches:
# eg delete master but leave origin/master
log.warn("Removing local branch {b} for repo {r}".format(b=ref,
r=repo))
# Can't delete currently checked out branch, so make sure head is
# detached before deleting.
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(repo.head.commit.hexsha)
repo.delete_head(ref, '--force')
log.info("Checkout out repo {repo} to ref {ref}".format(repo=repo,
ref=ref))
repo.head.reset(index=True, working_tree=True)
repo.git.checkout(ref)
repo.head.reset(index=True, working_tree=True)
sha = repo.head.commit.hexsha
log.info("Current SHA for repo {repo} is {sha}".format(repo=repo, sha=sha))
def get_roles(osa_repo_dir, commit, role_requirements):
"""Read OSA role information at a particular commit."""
repo = Repo(osa_repo_dir)
checkout(repo, commit)
log.info("Looking for file {f} in repo {r}".format(r=osa_repo_dir,
f=role_requirements))
filename = "{0}/{1}".format(osa_repo_dir, role_requirements)
with open(filename, 'r') as f:
roles_yaml = yaml.load(f)
return normalize_yaml(roles_yaml)
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars)
def make_report(storage_directory, old_pins, new_pins, do_update=False,
version_mappings=None):
"""Create RST report from a list of projects/roles."""
report = ""
version_mappings = version_mappings or {}
for new_pin in new_pins:
repo_name, repo_url, commit_sha = new_pin
commit_sha = version_mappings.get(repo_name, {}
).get(commit_sha, commit_sha)
# Prepare our repo directory and clone the repo if needed. Only pull
# if the user requests it.
repo_dir = "{0}/{1}".format(storage_directory, repo_name)
update_repo(repo_dir, repo_url, do_update)
# Get the old SHA from the previous pins. If this pin didn't exist
# in the previous OSA revision, skip it. This could happen with newly-
# added projects and roles.
try:
commit_sha_old = next(x[2] for x in old_pins if x[0] == repo_name)
except Exception:
continue
else:
commit_sha_old = version_mappings.get(repo_name, {}
).get(commit_sha_old,
commit_sha_old)
# Loop through the commits and render our template.
validate_commits(repo_dir, [commit_sha_old, commit_sha])
commits = get_commits(repo_dir, commit_sha_old, commit_sha)
template_vars = {
'repo': repo_name,
'commits': commits,
'commit_base_url': get_commit_url(repo_url),
'old_sha': commit_sha_old,
'new_sha': commit_sha
}
rst = render_template('offline-repo-changes.j2', template_vars)
report += rst
return report
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml
def parse_arguments():
"""Parse arguments."""
parser = create_parser()
args = parser.parse_args()
return args
def post_gist(report_data, old_sha, new_sha):
"""Post the report to a GitHub Gist and return the URL of the gist."""
payload = {
"description": ("Changes in OpenStack-Ansible between "
"{0} and {1}".format(old_sha, new_sha)),
"public": True,
"files": {
"osa-diff-{0}-{1}.rst".format(old_sha, new_sha): {
"content": report_data
}
}
}
url = "https://api.github.com/gists"
r = requests.post(url, data=json.dumps(payload))
response = r.json()
return response['html_url']
def publish_report(report, args, old_commit, new_commit):
"""Publish the RST report based on the user request."""
# Print the report to stdout unless the user specified --quiet.
output = ""
if not args.quiet and not args.gist and not args.file:
return report
if args.gist:
gist_url = post_gist(report, old_commit, new_commit)
output += "\nReport posted to GitHub Gist: {0}".format(gist_url)
if args.file is not None:
with open(args.file, 'w') as f:
f.write(report)
output += "\nReport written to file: {0}".format(args.file)
return output
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory
def render_template(template_file, template_vars):
"""Render a jinja template."""
# Load our Jinja templates
template_dir = "{0}/templates".format(
os.path.dirname(os.path.abspath(__file__))
)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True
)
rendered = jinja_env.get_template(template_file).render(template_vars)
return rendered
def repo_clone(repo_dir, repo_url):
"""Clone repository to this host."""
repo = Repo.clone_from(repo_url, repo_dir)
return repo
def repo_pull(repo_dir, repo_url, fetch=False):
"""Reset repository and optionally update it."""
# Make sure the repository is reset to the master branch.
repo = Repo(repo_dir)
repo.git.clean("-df")
repo.git.reset("--hard")
repo.git.checkout("master")
repo.head.reset(index=True, working_tree=True)
# Compile the refspec appropriately to ensure
# that if the repo is from github it includes
# all the refs needed, including PR's.
refspec_list = [
"+refs/heads/*:refs/remotes/origin/*",
"+refs/heads/*:refs/heads/*",
"+refs/tags/*:refs/tags/*"
]
if "github.com" in repo_url:
refspec_list.extend([
"+refs/pull/*:refs/remotes/origin/pr/*",
"+refs/heads/*:refs/remotes/origin/*"])
# Only get the latest updates if requested.
if fetch:
repo.git.fetch(["-u", "-v", "-f",
repo_url,
refspec_list])
return repo
def update_repo(repo_dir, repo_url, fetch=False):
"""Clone the repo if it doesn't exist already, otherwise update it."""
repo_exists = os.path.exists(repo_dir)
if not repo_exists:
log.info("Cloning repo {}".format(repo_url))
repo = repo_clone(repo_dir, repo_url)
# Make sure the repo is properly prepared
# and has all the refs required
log.info("Fetching repo {} (fetch: {})".format(repo_url, fetch))
repo = repo_pull(repo_dir, repo_url, fetch)
return repo
def validate_commits(repo_dir, commits):
"""Test if a commit is valid for the repository."""
log.debug("Validating {c} exist in {r}".format(c=commits, r=repo_dir))
repo = Repo(repo_dir)
for commit in commits:
try:
commit = repo.commit(commit)
except Exception:
msg = ("Commit {commit} could not be found in repo {repo}. "
"You may need to pass --update to fetch the latest "
"updates to the git repositories stored on "
"your local computer.".format(repo=repo_dir, commit=commit))
raise exceptions.InvalidCommitException(msg)
return True
def validate_commit_range(repo_dir, old_commit, new_commit):
"""Check if commit range is valid. Flip it if needed."""
# Are there any commits between the two commits that were provided?
try:
commits = get_commits(repo_dir, old_commit, new_commit)
except Exception:
commits = []
if len(commits) == 0:
# The user might have gotten their commits out of order. Let's flip
# the order of the commits and try again.
try:
commits = get_commits(repo_dir, new_commit, old_commit)
except Exception:
commits = []
if len(commits) == 0:
# Okay, so there really are no commits between the two commits
# provided by the user. :)
msg = ("The commit range {0}..{1} is invalid for {2}."
"You may need to use the --update option to fetch the "
"latest updates to the git repositories stored on your "
"local computer.".format(old_commit, new_commit, repo_dir))
raise exceptions.InvalidCommitRangeException(msg)
else:
return 'flip'
return True
def get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit):
"""Get release notes between the two revisions."""
repo = Repo(osa_repo_dir)
# Get a list of tags, sorted
tags = repo.git.tag().split('\n')
tags = sorted(tags, key=LooseVersion)
# Currently major tags are being printed after rc and
# b tags. We need to fix the list so that major
# tags are printed before rc and b releases
tags = _fix_tags_list(tags)
# Find the closest tag from a given SHA
# The tag found here is the tag that was cut
# either on or before the given SHA
checkout(repo, osa_old_commit)
old_tag = repo.git.describe()
# If the SHA given is between two release tags, then
# 'git describe' will return a tag in form of
# <tag>-<commitNum>-<sha>. For example:
# 14.0.2-3-g6931e26
# Since reno does not support this format, we need to
# strip away the commit number and sha bits.
if '-' in old_tag:
old_tag = old_tag[0:old_tag.index('-')]
# Get the nearest tag associated with the new commit
checkout(repo, osa_new_commit)
new_tag = repo.git.describe()
if '-' in new_tag:
nearest_new_tag = new_tag[0:new_tag.index('-')]
else:
nearest_new_tag = new_tag
# Truncate the tags list to only include versions
# between old_sha and new_sha. The latest release
# is not included in this list. That version will be
# printed separately in the following step.
tags = tags[tags.index(old_tag):tags.index(nearest_new_tag)]
release_notes = ""
# Checkout the new commit, then run reno to get the latest
# releasenotes that have been created or updated between
# the latest release and this new commit.
repo.git.checkout(osa_new_commit, '-f')
reno_report_command = ['reno',
'report',
'--earliest-version',
nearest_new_tag]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
release_notes += reno_output
# We want to start with the latest packaged release first, so
# the tags list is reversed
for version in reversed(tags):
# If version is an rc or b tag, and it has a major
# release tag, then skip it. There is no need to print
# release notes for an rc or b release unless we are
# comparing shas between two rc or b releases.
repo.git.checkout(version, '-f')
# We are outputing one version at a time here
reno_report_command = ['reno',
'report',
'--branch',
version,
'--earliest-version',
version]
reno_report_p = subprocess.Popen(reno_report_command,
cwd=osa_repo_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
reno_output = reno_report_p.communicate()[0].decode('UTF-8')
# We need to ensure the output includes the version we are concerned
# about.
# This is due to https://bugs.launchpad.net/reno/+bug/1670173
if version in reno_output:
release_notes += reno_output
# Clean up "Release Notes" title. We don't need this title for
# each tagged release.
release_notes = release_notes.replace(
"=============\nRelease Notes\n=============",
""
)
# Replace headers that contain '=' with '~' to comply with osa-differ's
# formatting
release_notes = re.sub('===+', _equal_to_tilde, release_notes)
# Replace headers that contain '-' with '#' to comply with osa-differ's
# formatting
release_notes = re.sub('---+', _dash_to_num, release_notes)
return release_notes
def _equal_to_tilde(matchobj):
num_of_equal = len(matchobj.group(0))
return '~' * num_of_equal
def _dash_to_num(matchobj):
num_of_dashes = len(matchobj.group(0))
return '#' * num_of_dashes
def _fix_tags_list(tags):
new_list = []
for tag in tags:
rc_releases = []
# Ignore rc and b releases, these will be built
# out in the list comprehension below.
# Finding the rc and b releases of the tag..
if 'rc' not in tag and 'b' not in tag:
rc_releases = [
rc_tag for rc_tag in tags
if tag in rc_tag and ('rc' in rc_tag or 'b' in rc_tag)
]
new_list.extend(rc_releases)
# Make sure we don't add the tag in twice
if tag not in new_list:
new_list.append(tag)
return new_list
def run_osa_differ():
"""Start here."""
# Get our arguments from the command line
args = parse_arguments()
# Set up DEBUG logging if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.verbose:
log.setLevel(logging.INFO)
# Create the storage directory if it doesn't exist already.
try:
storage_directory = prepare_storage_dir(args.directory)
except OSError:
print("ERROR: Couldn't create the storage directory {0}. "
"Please create it manually.".format(args.directory))
sys.exit(1)
# Assemble some variables for the OSA repository.
osa_old_commit = args.old_commit[0]
osa_new_commit = args.new_commit[0]
osa_repo_dir = "{0}/openstack-ansible".format(storage_directory)
# Generate OpenStack-Ansible report header.
report_rst = make_osa_report(osa_repo_dir,
osa_old_commit,
osa_new_commit,
args)
# Get OpenStack-Ansible Reno release notes for the packaged
# releases between the two commits.
if args.release_notes:
report_rst += ("\nRelease Notes\n"
"-------------")
report_rst += get_release_notes(osa_repo_dir,
osa_old_commit,
osa_new_commit)
# Get the list of OpenStack roles from the newer and older commits.
role_yaml = get_roles(osa_repo_dir,
osa_old_commit,
args.role_requirements)
role_yaml_latest = get_roles(osa_repo_dir,
osa_new_commit,
args.role_requirements)
if not args.skip_roles:
# Generate the role report.
report_rst += ("\nOpenStack-Ansible Roles\n"
"-----------------------")
report_rst += make_report(storage_directory,
role_yaml,
role_yaml_latest,
args.update,
args.version_mappings)
if not args.skip_projects:
# Get the list of OpenStack projects from newer commit and older
# commit.
project_yaml = get_projects(osa_repo_dir, osa_old_commit)
project_yaml_latest = get_projects(osa_repo_dir,
osa_new_commit)
# Generate the project report.
report_rst += ("\nOpenStack Projects\n"
"------------------")
report_rst += make_report(storage_directory,
project_yaml,
project_yaml_latest,
args.update)
# Publish report according to the user's request.
output = publish_report(report_rst, args, osa_old_commit, osa_new_commit)
print(output)
if __name__ == "__main__":
run_osa_differ()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.