repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ml31415/numpy-groupies | numpy_groupies/aggregate_weave.py | c_func | python | def c_func(funcname, reverse=False, nans=False, scalar=False):
varnames = ['group_idx', 'a', 'ret', 'counter']
codebase = c_base_reverse if reverse else c_base
iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname]
if scalar:
varnames.remove('a')
return codebase % dict(init=c_init(varnames), iter=iteration,
finish=c_finish.get(funcname, ''),
ri_redir=(c_ri_redir if nans else c_ri)) | Fill c_funcs with constructed code from the templates | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_weave.py#L154-L163 | [
"def c_init(varnames):\n return ' ' + ''.join(c_size(varname) for varname in varnames).lstrip() + \"\"\"\n\n long ri = 0;\n long cmp_pos = 0;\"\"\"\n"
] | import numpy as np
try:
from weave import inline
except ImportError:
from scipy.weave import inline
from .utils import get_func, isstr, funcs_no_separate_nan, aggregate_common_doc
from .utils_numpy import check_dtype, aliasing, check_fill_value, input_validation
optimized_funcs = {'sum', 'min', 'max', 'amin', 'amax', 'mean', 'var', 'std', 'prod', 'len',
'nansum', 'nanmin', 'nanmax', 'nanmean', 'nanvar', 'nanstd', 'nanprod', 'nanlen',
'all', 'any', 'nanall', 'nanany', 'allnan', 'anynan',
'first', 'last', 'nanfirst', 'nanlast'}
# c_funcs will contain all generated c code, so it can be read easily for debugging
c_funcs = dict()
c_iter = dict()
c_iter_scalar = dict()
c_finish = dict()
# Set this for testing, to fail deprecated C-API calls
#c_macros = [('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')]
c_macros = []
c_args = ['-Wno-cpp'] # Suppress the deprecation warnings created by weave
def c_size(varname):
return r"""
long L%(varname)s = 1;
for (int n=0; n<D%(varname)s; n++) L%(varname)s *= N%(varname)s[n];""" % dict(varname=varname)
def c_init(varnames):
return ' ' + ''.join(c_size(varname) for varname in varnames).lstrip() + """
long ri = 0;
long cmp_pos = 0;"""
c_ri = """ri = group_idx[i];"""
c_ri_redir = """ri = (group_idx[i] + 1) * (a[i] == a[i]);"""
c_base = r"""%(init)s
for (long i=0; i<Lgroup_idx; i++) {
%(ri_redir)s
%(iter)s
}
%(finish)s
"""
c_base_reverse = r"""%(init)s
for (long i=Lgroup_idx-1; i>=0; i--) {
%(ri_redir)s
%(iter)s
}
%(finish)s
"""
c_iter['sum'] = r"""
counter[ri] = 0;
ret[ri] += a[i];"""
c_iter_scalar['sum'] = r"""
counter[ri] = 0;
ret[ri] += a;"""
c_iter['prod'] = r"""
counter[ri] = 0;
ret[ri] *= a[i];"""
c_iter_scalar['prod'] = r"""
counter[ri] = 0;
ret[ri] *= a;"""
c_iter['len'] = r"""
counter[ri] = 0;
ret[ri] += 1;"""
c_iter_scalar['len'] = r"""
counter[ri] = 0;
ret[ri] += 1;"""
c_iter['all'] = r"""
counter[ri] = 0;
ret[ri] &= (a[i] != 0);"""
c_iter['any'] = r"""
counter[ri] = 0;
ret[ri] |= (a[i] != 0);"""
c_iter['last'] = r"""
ret[ri] = a[i];"""
c_iter['allnan'] = r"""
counter[ri] = 0;
ret[ri] &= (a[i] == a[i]);"""
c_iter['anynan'] = r"""
counter[ri] = 0;
ret[ri] |= (a[i] == a[i]);"""
c_iter['max'] = r"""
if (counter[ri]) {
ret[ri] = a[i];
counter[ri] = 0;
}
else if (ret[ri] < a[i]) ret[ri] = a[i];"""
c_iter['min'] = r"""
if (counter[ri]) {
ret[ri] = a[i];
counter[ri] = 0;
}
else if (ret[ri] > a[i]) ret[ri] = a[i];"""
c_iter['mean'] = r"""
counter[ri]++;
ret[ri] += a[i];"""
c_finish['mean'] = r"""
for (long ri=0; ri<Lret; ri++) {
if (counter[ri]) ret[ri] = ret[ri] / counter[ri];
else ret[ri] = fill_value;
}"""
c_iter['std'] = r"""
counter[ri]++;
means[ri] += a[i];
ret[ri] += a[i] * a[i];"""
c_finish['std'] = r"""
double mean2 = 0;
for (long ri=0; ri<Lret; ri++) {
if (counter[ri]) {
mean2 = means[ri] * means[ri];
ret[ri] = sqrt((ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof));
}
else ret[ri] = fill_value;
}"""
c_iter['var'] = c_iter['std']
c_finish['var'] = r"""
double mean2 = 0;
for (long ri=0; ri<Lret; ri++) {
if (counter[ri]) {
mean2 = means[ri] * means[ri];
ret[ri] = (ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof);
}
else ret[ri] = fill_value;
}"""
def get_cfuncs():
c_funcs = dict()
for funcname in c_iter:
c_funcs[funcname] = c_func(funcname)
if funcname not in funcs_no_separate_nan:
c_funcs['nan' + funcname] = c_func(funcname, nans=True)
for funcname in c_iter_scalar:
c_funcs[funcname + 'scalar'] = c_func(funcname, scalar=True)
c_funcs['first'] = c_func('last', reverse=True)
c_funcs['nanfirst'] = c_func('last', reverse=True, nans=True)
return c_funcs
c_funcs = get_cfuncs()
c_step_count = c_size('group_idx') + r"""
long cmp_pos = 0;
long steps = 1;
if (Lgroup_idx < 1) return_val = 0;
else {
for (long i=0; i<Lgroup_idx; i++) {
if (group_idx[cmp_pos] != group_idx[i]) {
cmp_pos = i;
steps++;
}
}
return_val = steps;
}"""
def step_count(group_idx):
""" Determine the size of the result array
for contiguous data
"""
return inline(c_step_count, ['group_idx'], define_macros=c_macros, extra_compile_args=c_args)
c_step_indices = c_size('group_idx') + r"""
long cmp_pos = 0;
long ri = 1;
for (long i=1; i<Lgroup_idx; i++) {
if (group_idx[cmp_pos] != group_idx[i]) {
cmp_pos = i;
indices[ri++] = i;
}
}"""
def step_indices(group_idx):
""" Get the edges of areas within group_idx, which are filled
with the same value
"""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, int)
indices[0] = 0
indices[-1] = group_idx.size
inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args)
return indices
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
func = get_func(func, aliasing, optimized_funcs)
if not isstr(func):
raise NotImplementedError("generic functions not supported, in the weave implementation of aggregate")
# Preparations for optimized processing
group_idx, a, flat_size, ndim_idx, size = input_validation(group_idx, a,
size=size,
order=order,
axis=axis)
dtype = check_dtype(dtype, func, a, len(group_idx))
check_fill_value(fill_value, dtype)
nans = func.startswith('nan')
if nans:
flat_size += 1
if func in ('sum', 'any', 'len', 'anynan', 'nansum', 'nanlen'):
ret = np.zeros(flat_size, dtype=dtype)
elif func in ('prod', 'all', 'allnan', 'nanprod'):
ret = np.ones(flat_size, dtype=dtype)
else:
ret = np.full(flat_size, fill_value, dtype=dtype)
# In case we should get some ugly fortran arrays, convert them
inline_vars = dict(group_idx=np.ascontiguousarray(group_idx), a=np.ascontiguousarray(a),
ret=ret, fill_value=fill_value)
# TODO: Have this fixed by proper raveling
if func in ('std', 'var', 'nanstd', 'nanvar'):
counter = np.zeros_like(ret, dtype=int)
inline_vars['means'] = np.zeros_like(ret)
inline_vars['ddof'] = kwargs.pop('ddof', 0)
elif func in ('mean', 'nanmean'):
counter = np.zeros_like(ret, dtype=int)
else:
# Using inverse logic, marking anyting touched with zero for later removal
counter = np.ones_like(ret, dtype=bool)
inline_vars['counter'] = counter
if np.isscalar(a):
func += 'scalar'
inline_vars['a'] = a
inline(c_funcs[func], inline_vars.keys(), local_dict=inline_vars, define_macros=c_macros, extra_compile_args=c_args)
# Postprocessing
if func in ('sum', 'any', 'anynan', 'nansum') and fill_value != 0:
ret[counter] = fill_value
elif func in ('prod', 'all', 'allnan', 'nanprod') and fill_value != 1:
ret[counter] = fill_value
if nans:
# Restore the shifted return array
ret = ret[1:]
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
aggregate.__doc__ = """
This is the weave based implementation of aggregate.
**NOTE:** If weave is installed but fails to run (probably because you
have not setup a suitable compiler) then you can manually select the numpy
implementation by using::
import numpy_groupies as npg
# NOT THIS: npg.aggregate(...)
npg.aggregate_np(...)
""" + aggregate_common_doc
|
ml31415/numpy-groupies | numpy_groupies/aggregate_weave.py | step_indices | python | def step_indices(group_idx):
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, int)
indices[0] = 0
indices[-1] = group_idx.size
inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args)
return indices | Get the edges of areas within group_idx, which are filled
with the same value | train | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_weave.py#L212-L221 | [
"def step_count(group_idx):\n \"\"\" Determine the size of the result array\n for contiguous data\n \"\"\"\n return inline(c_step_count, ['group_idx'], define_macros=c_macros, extra_compile_args=c_args)\n"
] | import numpy as np
try:
from weave import inline
except ImportError:
from scipy.weave import inline
from .utils import get_func, isstr, funcs_no_separate_nan, aggregate_common_doc
from .utils_numpy import check_dtype, aliasing, check_fill_value, input_validation
optimized_funcs = {'sum', 'min', 'max', 'amin', 'amax', 'mean', 'var', 'std', 'prod', 'len',
'nansum', 'nanmin', 'nanmax', 'nanmean', 'nanvar', 'nanstd', 'nanprod', 'nanlen',
'all', 'any', 'nanall', 'nanany', 'allnan', 'anynan',
'first', 'last', 'nanfirst', 'nanlast'}
# c_funcs will contain all generated c code, so it can be read easily for debugging
c_funcs = dict()
c_iter = dict()
c_iter_scalar = dict()
c_finish = dict()
# Set this for testing, to fail deprecated C-API calls
#c_macros = [('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION')]
c_macros = []
c_args = ['-Wno-cpp'] # Suppress the deprecation warnings created by weave
def c_size(varname):
return r"""
long L%(varname)s = 1;
for (int n=0; n<D%(varname)s; n++) L%(varname)s *= N%(varname)s[n];""" % dict(varname=varname)
def c_init(varnames):
return ' ' + ''.join(c_size(varname) for varname in varnames).lstrip() + """
long ri = 0;
long cmp_pos = 0;"""
c_ri = """ri = group_idx[i];"""
c_ri_redir = """ri = (group_idx[i] + 1) * (a[i] == a[i]);"""
c_base = r"""%(init)s
for (long i=0; i<Lgroup_idx; i++) {
%(ri_redir)s
%(iter)s
}
%(finish)s
"""
c_base_reverse = r"""%(init)s
for (long i=Lgroup_idx-1; i>=0; i--) {
%(ri_redir)s
%(iter)s
}
%(finish)s
"""
c_iter['sum'] = r"""
counter[ri] = 0;
ret[ri] += a[i];"""
c_iter_scalar['sum'] = r"""
counter[ri] = 0;
ret[ri] += a;"""
c_iter['prod'] = r"""
counter[ri] = 0;
ret[ri] *= a[i];"""
c_iter_scalar['prod'] = r"""
counter[ri] = 0;
ret[ri] *= a;"""
c_iter['len'] = r"""
counter[ri] = 0;
ret[ri] += 1;"""
c_iter_scalar['len'] = r"""
counter[ri] = 0;
ret[ri] += 1;"""
c_iter['all'] = r"""
counter[ri] = 0;
ret[ri] &= (a[i] != 0);"""
c_iter['any'] = r"""
counter[ri] = 0;
ret[ri] |= (a[i] != 0);"""
c_iter['last'] = r"""
ret[ri] = a[i];"""
c_iter['allnan'] = r"""
counter[ri] = 0;
ret[ri] &= (a[i] == a[i]);"""
c_iter['anynan'] = r"""
counter[ri] = 0;
ret[ri] |= (a[i] == a[i]);"""
c_iter['max'] = r"""
if (counter[ri]) {
ret[ri] = a[i];
counter[ri] = 0;
}
else if (ret[ri] < a[i]) ret[ri] = a[i];"""
c_iter['min'] = r"""
if (counter[ri]) {
ret[ri] = a[i];
counter[ri] = 0;
}
else if (ret[ri] > a[i]) ret[ri] = a[i];"""
c_iter['mean'] = r"""
counter[ri]++;
ret[ri] += a[i];"""
c_finish['mean'] = r"""
for (long ri=0; ri<Lret; ri++) {
if (counter[ri]) ret[ri] = ret[ri] / counter[ri];
else ret[ri] = fill_value;
}"""
c_iter['std'] = r"""
counter[ri]++;
means[ri] += a[i];
ret[ri] += a[i] * a[i];"""
c_finish['std'] = r"""
double mean2 = 0;
for (long ri=0; ri<Lret; ri++) {
if (counter[ri]) {
mean2 = means[ri] * means[ri];
ret[ri] = sqrt((ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof));
}
else ret[ri] = fill_value;
}"""
c_iter['var'] = c_iter['std']
c_finish['var'] = r"""
double mean2 = 0;
for (long ri=0; ri<Lret; ri++) {
if (counter[ri]) {
mean2 = means[ri] * means[ri];
ret[ri] = (ret[ri] - mean2 / counter[ri]) / (counter[ri] - ddof);
}
else ret[ri] = fill_value;
}"""
def c_func(funcname, reverse=False, nans=False, scalar=False):
""" Fill c_funcs with constructed code from the templates """
varnames = ['group_idx', 'a', 'ret', 'counter']
codebase = c_base_reverse if reverse else c_base
iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname]
if scalar:
varnames.remove('a')
return codebase % dict(init=c_init(varnames), iter=iteration,
finish=c_finish.get(funcname, ''),
ri_redir=(c_ri_redir if nans else c_ri))
def get_cfuncs():
c_funcs = dict()
for funcname in c_iter:
c_funcs[funcname] = c_func(funcname)
if funcname not in funcs_no_separate_nan:
c_funcs['nan' + funcname] = c_func(funcname, nans=True)
for funcname in c_iter_scalar:
c_funcs[funcname + 'scalar'] = c_func(funcname, scalar=True)
c_funcs['first'] = c_func('last', reverse=True)
c_funcs['nanfirst'] = c_func('last', reverse=True, nans=True)
return c_funcs
c_funcs = get_cfuncs()
c_step_count = c_size('group_idx') + r"""
long cmp_pos = 0;
long steps = 1;
if (Lgroup_idx < 1) return_val = 0;
else {
for (long i=0; i<Lgroup_idx; i++) {
if (group_idx[cmp_pos] != group_idx[i]) {
cmp_pos = i;
steps++;
}
}
return_val = steps;
}"""
def step_count(group_idx):
""" Determine the size of the result array
for contiguous data
"""
return inline(c_step_count, ['group_idx'], define_macros=c_macros, extra_compile_args=c_args)
c_step_indices = c_size('group_idx') + r"""
long cmp_pos = 0;
long ri = 1;
for (long i=1; i<Lgroup_idx; i++) {
if (group_idx[cmp_pos] != group_idx[i]) {
cmp_pos = i;
indices[ri++] = i;
}
}"""
def aggregate(group_idx, a, func='sum', size=None, fill_value=0, order='C',
dtype=None, axis=None, **kwargs):
func = get_func(func, aliasing, optimized_funcs)
if not isstr(func):
raise NotImplementedError("generic functions not supported, in the weave implementation of aggregate")
# Preparations for optimized processing
group_idx, a, flat_size, ndim_idx, size = input_validation(group_idx, a,
size=size,
order=order,
axis=axis)
dtype = check_dtype(dtype, func, a, len(group_idx))
check_fill_value(fill_value, dtype)
nans = func.startswith('nan')
if nans:
flat_size += 1
if func in ('sum', 'any', 'len', 'anynan', 'nansum', 'nanlen'):
ret = np.zeros(flat_size, dtype=dtype)
elif func in ('prod', 'all', 'allnan', 'nanprod'):
ret = np.ones(flat_size, dtype=dtype)
else:
ret = np.full(flat_size, fill_value, dtype=dtype)
# In case we should get some ugly fortran arrays, convert them
inline_vars = dict(group_idx=np.ascontiguousarray(group_idx), a=np.ascontiguousarray(a),
ret=ret, fill_value=fill_value)
# TODO: Have this fixed by proper raveling
if func in ('std', 'var', 'nanstd', 'nanvar'):
counter = np.zeros_like(ret, dtype=int)
inline_vars['means'] = np.zeros_like(ret)
inline_vars['ddof'] = kwargs.pop('ddof', 0)
elif func in ('mean', 'nanmean'):
counter = np.zeros_like(ret, dtype=int)
else:
# Using inverse logic, marking anyting touched with zero for later removal
counter = np.ones_like(ret, dtype=bool)
inline_vars['counter'] = counter
if np.isscalar(a):
func += 'scalar'
inline_vars['a'] = a
inline(c_funcs[func], inline_vars.keys(), local_dict=inline_vars, define_macros=c_macros, extra_compile_args=c_args)
# Postprocessing
if func in ('sum', 'any', 'anynan', 'nansum') and fill_value != 0:
ret[counter] = fill_value
elif func in ('prod', 'all', 'allnan', 'nanprod') and fill_value != 1:
ret[counter] = fill_value
if nans:
# Restore the shifted return array
ret = ret[1:]
# Deal with ndimensional indexing
if ndim_idx > 1:
ret = ret.reshape(size, order=order)
return ret
aggregate.__doc__ = """
This is the weave based implementation of aggregate.
**NOTE:** If weave is installed but fails to run (probably because you
have not setup a suitable compiler) then you can manually select the numpy
implementation by using::
import numpy_groupies as npg
# NOT THIS: npg.aggregate(...)
npg.aggregate_np(...)
""" + aggregate_common_doc
|
bcb/jsonrpcclient | jsonrpcclient/response.py | sort_response | python | def sort_response(response: Dict[str, Any]) -> OrderedDict:
root_order = ["jsonrpc", "result", "error", "id"]
error_order = ["code", "message", "data"]
req = OrderedDict(sorted(response.items(), key=lambda k: root_order.index(k[0])))
if "error" in response:
req["error"] = OrderedDict(
sorted(response["error"].items(), key=lambda k: error_order.index(k[0]))
)
return req | Sort the keys in a JSON-RPC response object.
This has no effect other than making it nicer to read. Useful in Python 3.5 only,
dictionaries are already sorted in newer Python versions.
Example::
>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))
{"jsonrpc": "2.0", "result": 5, "id": 1}
Args:
response: Deserialized JSON-RPC response.
Returns:
The same response, sorted in an OrderedDict. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/response.py#L13-L38 | null | """
Response and JSONRPCResponse classes.
This module needs a major overhaul.
"""
from collections import OrderedDict
from json import dumps as serialize
from typing import Any, Dict, List, Union
NOID = object()
class JSONRPCResponse:
"""
A parsed JSON-RPC response object.
Base class for the responses. There should be no need to validate the input data to
these responses, since the data hass passed the jsonschema validation.
"""
ok = False
def __init__(self, jsonrpc: str, id: Any) -> None:
self.jsonrpc = jsonrpc
self.id = id
class SuccessResponse(JSONRPCResponse):
"""
Represents a JSON-RPC success response object.
"""
ok = True
def __init__(self, result: Any, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.result = result
def __repr__(self) -> str:
return "<SuccessResponse(id={}, result={})>".format(self.id, self.result)
def __str__(self) -> str:
return serialize(
sort_response(dict(jsonrpc=self.jsonrpc, result=self.result, id=self.id))
)
class NotificationResponse(SuccessResponse):
"""
Represents a JSON-RPC notification response object.
"""
ok = True
def __init__(self) -> None:
super().__init__(jsonrpc="2.0", result=None, id=NOID)
def __repr__(self) -> str:
return "<NotificationResponse()>"
def __str__(self) -> str:
return ""
class ErrorResponse(JSONRPCResponse):
"""
Represents a JSON-RPC error response object.
"""
ok = False
def __init__(self, error: Dict[str, Any], **kwargs: Any) -> None:
super().__init__(id=kwargs.pop("id", NOID), **kwargs)
self.message = error.get("message")
self.code = error.get("code")
self.data = error.get("data")
def __repr__(self) -> str:
if self.id is NOID:
return '<ErrorResponse(message="{}")>'.format(self.message)
return '<ErrorResponse(id={}, message="{}")>'.format(self.id, self.message)
def __str__(self) -> str:
error = dict(code=self.code, message=self.message)
if self.data:
error["data"] = self.data
deserialized = dict(jsonrpc=self.jsonrpc, error=error)
if self.id is not NOID:
deserialized["id"] = self.id
return serialize(sort_response(deserialized))
def total_results(
data: Union[List[JSONRPCResponse], JSONRPCResponse, None], *, ok: bool = True
) -> int:
"""
Returns the total parsed responses, given the return value from parse().
"""
if isinstance(data, list):
return sum([1 for d in data if d.ok == ok])
elif isinstance(data, JSONRPCResponse):
return int(data.ok == ok)
return 0 # The data hasn't been parsed yet. The data attribute hasn't been set.
class Response:
"""
Wraps a client response.
>>> Response(response.text, raw=response)
"""
def __init__(self, text: str, raw: Any = None) -> None:
"""
Args:
text: The response string, as it was returned from the server.
raw: The framework's own response object. Gives the user access to the
framework (e.g. Requests library's `Response` object). (optional)
"""
self.text = text
self.raw = raw
# Data is the parsed version of the response.
self.data = None # type: Union[JSONRPCResponse, List[JSONRPCResponse], None]
def __repr__(self) -> str:
total_ok = total_results(self.data, ok=True)
total_errors = total_results(self.data, ok=False)
if total_errors:
return "<Response[{} ok, {} errors]>".format(total_ok, total_errors)
return "<Response[{}]>".format(total_ok)
|
bcb/jsonrpcclient | jsonrpcclient/response.py | total_results | python | def total_results(
data: Union[List[JSONRPCResponse], JSONRPCResponse, None], *, ok: bool = True
) -> int:
if isinstance(data, list):
return sum([1 for d in data if d.ok == ok])
elif isinstance(data, JSONRPCResponse):
return int(data.ok == ok)
return 0 | Returns the total parsed responses, given the return value from parse(). | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/response.py#L121-L131 | null | """
Response and JSONRPCResponse classes.
This module needs a major overhaul.
"""
from collections import OrderedDict
from json import dumps as serialize
from typing import Any, Dict, List, Union
NOID = object()
def sort_response(response: Dict[str, Any]) -> OrderedDict:
"""
Sort the keys in a JSON-RPC response object.
This has no effect other than making it nicer to read. Useful in Python 3.5 only,
dictionaries are already sorted in newer Python versions.
Example::
>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))
{"jsonrpc": "2.0", "result": 5, "id": 1}
Args:
response: Deserialized JSON-RPC response.
Returns:
The same response, sorted in an OrderedDict.
"""
root_order = ["jsonrpc", "result", "error", "id"]
error_order = ["code", "message", "data"]
req = OrderedDict(sorted(response.items(), key=lambda k: root_order.index(k[0])))
if "error" in response:
req["error"] = OrderedDict(
sorted(response["error"].items(), key=lambda k: error_order.index(k[0]))
)
return req
class JSONRPCResponse:
"""
A parsed JSON-RPC response object.
Base class for the responses. There should be no need to validate the input data to
these responses, since the data hass passed the jsonschema validation.
"""
ok = False
def __init__(self, jsonrpc: str, id: Any) -> None:
self.jsonrpc = jsonrpc
self.id = id
class SuccessResponse(JSONRPCResponse):
"""
Represents a JSON-RPC success response object.
"""
ok = True
def __init__(self, result: Any, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.result = result
def __repr__(self) -> str:
return "<SuccessResponse(id={}, result={})>".format(self.id, self.result)
def __str__(self) -> str:
return serialize(
sort_response(dict(jsonrpc=self.jsonrpc, result=self.result, id=self.id))
)
class NotificationResponse(SuccessResponse):
"""
Represents a JSON-RPC notification response object.
"""
ok = True
def __init__(self) -> None:
super().__init__(jsonrpc="2.0", result=None, id=NOID)
def __repr__(self) -> str:
return "<NotificationResponse()>"
def __str__(self) -> str:
return ""
class ErrorResponse(JSONRPCResponse):
"""
Represents a JSON-RPC error response object.
"""
ok = False
def __init__(self, error: Dict[str, Any], **kwargs: Any) -> None:
super().__init__(id=kwargs.pop("id", NOID), **kwargs)
self.message = error.get("message")
self.code = error.get("code")
self.data = error.get("data")
def __repr__(self) -> str:
if self.id is NOID:
return '<ErrorResponse(message="{}")>'.format(self.message)
return '<ErrorResponse(id={}, message="{}")>'.format(self.id, self.message)
def __str__(self) -> str:
error = dict(code=self.code, message=self.message)
if self.data:
error["data"] = self.data
deserialized = dict(jsonrpc=self.jsonrpc, error=error)
if self.id is not NOID:
deserialized["id"] = self.id
return serialize(sort_response(deserialized))
# The data hasn't been parsed yet. The data attribute hasn't been set.
class Response:
"""
Wraps a client response.
>>> Response(response.text, raw=response)
"""
def __init__(self, text: str, raw: Any = None) -> None:
"""
Args:
text: The response string, as it was returned from the server.
raw: The framework's own response object. Gives the user access to the
framework (e.g. Requests library's `Response` object). (optional)
"""
self.text = text
self.raw = raw
# Data is the parsed version of the response.
self.data = None # type: Union[JSONRPCResponse, List[JSONRPCResponse], None]
def __repr__(self) -> str:
total_ok = total_results(self.data, ok=True)
total_errors = total_results(self.data, ok=False)
if total_errors:
return "<Response[{} ok, {} errors]>".format(total_ok, total_errors)
return "<Response[{}]>".format(total_ok)
|
bcb/jsonrpcclient | jsonrpcclient/clients/aiohttp_client.py | AiohttpClient.send_message | python | async def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
with async_timeout.timeout(self.timeout):
async with self.session.post(
self.endpoint, data=request, ssl=self.ssl
) as response:
response_text = await response.text()
return Response(response_text, raw=response) | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/clients/aiohttp_client.py#L53-L71 | null | class AiohttpClient(AsyncClient):
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s (%(http_code)s %(http_reason)s)"
def __init__(
self,
session: ClientSession,
endpoint: str,
*args: Any,
ssl: Optional[SSLContext] = None,
timeout: int = 10,
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self.endpoint = endpoint
self.session = session
self.ssl = ssl
self.timeout = timeout
def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
extra = (
{"http_code": response.raw.status, "http_reason": response.raw.reason}
if response.raw is not None
else {}
)
super().log_response(
response, extra=extra, trim_log_values=trim_log_values, **kwargs
)
def validate_response(self, response: Response) -> None:
if response.raw is not None and not 200 <= response.raw.status <= 299:
raise ReceivedNon2xxResponseError(response.raw.status)
|
bcb/jsonrpcclient | jsonrpcclient/clients/websockets_client.py | WebSocketsClient.send_message | python | async def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
await self.socket.send(request)
if response_expected:
response_text = await self.socket.recv()
return Response(response_text)
return Response("") | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/clients/websockets_client.py#L25-L42 | null | class WebSocketsClient(AsyncClient):
def __init__(
self, socket: WebSocketCommonProtocol, *args: Any, **kwargs: Any
) -> None:
"""
Args:
socket: Connected websocket (websockets.connect("ws://localhost:5000"))
"""
super().__init__(*args, **kwargs)
self.socket = socket
|
bcb/jsonrpcclient | jsonrpcclient/config.py | parse_callable | python | def parse_callable(path: str) -> Iterator:
module = path[: path.rindex(".")]
callable_name = path[path.rindex(".") + 1 :]
callable_ = getattr(importlib.import_module(module), callable_name)
return callable_() | ConfigParser converter.
Calls the specified object, e.g. Option "id_generators.decimal" returns
`id_generators.decimal()`. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/config.py#L12-L22 | null | """
Config object.
Allows modules to import a pre-loaded configuration object.
"""
import importlib
import os
from configparser import ConfigParser
from typing import Iterator
defaults = {
"trim_log_values": "False",
"validate_against_schema": "True",
"id_generator": "jsonrpcclient.id_generators.decimal",
}
config = ConfigParser(
defaults=defaults,
default_section="general",
converters={"callable": parse_callable},
)
config.read([".jsonrpcclientrc", os.path.expanduser("~/.jsonrpcclientrc")])
|
bcb/jsonrpcclient | jsonrpcclient/id_generators.py | random | python | def random(length: int = 8, chars: str = digits + ascii_lowercase) -> Iterator[str]:
while True:
yield "".join([choice(chars) for _ in range(length)]) | A random string.
Not unique, but has around 1 in a million chance of collision (with the default 8
character length). e.g. 'fubui5e6'
Args:
length: Length of the random string.
chars: The characters to randomly choose from. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/id_generators.py#L40-L52 | null | """
Generators which yield an id to include in a JSON-RPC request.
By default the request `id` is a decimal number which increments with each request. See
the `config` module.
"""
import itertools
from random import choice
from string import ascii_lowercase, digits
from typing import Iterator
from uuid import uuid4
def decimal(start: int = 1) -> Iterator[int]:
"""
Increments from `start`.
e.g. 1, 2, 3, .. 9, 10, 11, etc.
Args:
start: The first value to start with.
"""
return itertools.count(start)
def hexadecimal(start: int = 1) -> Iterator[str]:
"""
Incremental hexadecimal numbers.
e.g. 1, 2, 3, .. 9, a, b, etc.
Args:
start: The first value to start with.
"""
while True:
yield "%x" % start
start += 1
def uuid() -> Iterator[str]:
"""
Unique uuid ids.
For example, '9bfe2c93-717e-4a45-b91b-55422c5af4ff'
"""
while True:
yield str(uuid4())
|
bcb/jsonrpcclient | jsonrpcclient/parse.py | get_response | python | def get_response(response: Dict[str, Any]) -> JSONRPCResponse:
if "error" in response:
return ErrorResponse(**response)
return SuccessResponse(**response) | Converts a deserialized response into a JSONRPCResponse object.
The dictionary be either an error or success response, never a notification.
Args:
response: Deserialized response dictionary. We can assume the response is valid
JSON-RPC here, since it passed the jsonschema validation. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/parse.py#L18-L30 | null | """Parse response text, returning JSONRPCResponse objects."""
from json import loads as deserialize
from typing import Any, Dict, List, Union
import jsonschema # type: ignore
from pkg_resources import resource_string
from .response import (
ErrorResponse,
JSONRPCResponse,
NotificationResponse,
SuccessResponse,
)
schema = deserialize(resource_string(__name__, "response-schema.json").decode())
def parse(
response_text: str, *, batch: bool, validate_against_schema: bool = True
) -> Union[JSONRPCResponse, List[JSONRPCResponse]]:
"""
Parses response text, returning JSONRPCResponse objects.
Args:
response_text: JSON-RPC response string.
batch: If the response_text is an empty string, this determines how to parse.
validate_against_schema: Validate against the json-rpc schema.
Returns:
Either a JSONRPCResponse, or a list of them.
Raises:
json.JSONDecodeError: The response was not valid JSON.
jsonschema.ValidationError: The response was not a valid JSON-RPC response
object.
"""
# If the response is empty, we can't deserialize it; an empty string is valid
# JSON-RPC, but not valid JSON.
if not response_text:
if batch:
# An empty string is a valid response to a batch request, when there were
# only notifications in the batch.
return []
else:
# An empty string is valid response to a Notification request.
return NotificationResponse()
# If a string, ensure it's json-deserializable
deserialized = deserialize(response_text)
# Validate the response against the Response schema (raises
# jsonschema.ValidationError if invalid)
if validate_against_schema:
jsonschema.validate(deserialized, schema)
# Batch response
if isinstance(deserialized, list):
return [get_response(r) for r in deserialized if "id" in r]
# Single response
return get_response(deserialized)
|
bcb/jsonrpcclient | jsonrpcclient/parse.py | parse | python | def parse(
response_text: str, *, batch: bool, validate_against_schema: bool = True
) -> Union[JSONRPCResponse, List[JSONRPCResponse]]:
# If the response is empty, we can't deserialize it; an empty string is valid
# JSON-RPC, but not valid JSON.
if not response_text:
if batch:
# An empty string is a valid response to a batch request, when there were
# only notifications in the batch.
return []
else:
# An empty string is valid response to a Notification request.
return NotificationResponse()
# If a string, ensure it's json-deserializable
deserialized = deserialize(response_text)
# Validate the response against the Response schema (raises
# jsonschema.ValidationError if invalid)
if validate_against_schema:
jsonschema.validate(deserialized, schema)
# Batch response
if isinstance(deserialized, list):
return [get_response(r) for r in deserialized if "id" in r]
# Single response
return get_response(deserialized) | Parses response text, returning JSONRPCResponse objects.
Args:
response_text: JSON-RPC response string.
batch: If the response_text is an empty string, this determines how to parse.
validate_against_schema: Validate against the json-rpc schema.
Returns:
Either a JSONRPCResponse, or a list of them.
Raises:
json.JSONDecodeError: The response was not valid JSON.
jsonschema.ValidationError: The response was not a valid JSON-RPC response
object. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/parse.py#L33-L75 | [
"def get_response(response: Dict[str, Any]) -> JSONRPCResponse:\n \"\"\"\n Converts a deserialized response into a JSONRPCResponse object.\n\n The dictionary be either an error or success response, never a notification.\n\n Args:\n response: Deserialized response dictionary. We can assume the response is valid\n JSON-RPC here, since it passed the jsonschema validation.\n \"\"\"\n if \"error\" in response:\n return ErrorResponse(**response)\n return SuccessResponse(**response)\n"
] | """Parse response text, returning JSONRPCResponse objects."""
from json import loads as deserialize
from typing import Any, Dict, List, Union
import jsonschema # type: ignore
from pkg_resources import resource_string
from .response import (
ErrorResponse,
JSONRPCResponse,
NotificationResponse,
SuccessResponse,
)
schema = deserialize(resource_string(__name__, "response-schema.json").decode())
def get_response(response: Dict[str, Any]) -> JSONRPCResponse:
"""
Converts a deserialized response into a JSONRPCResponse object.
The dictionary be either an error or success response, never a notification.
Args:
response: Deserialized response dictionary. We can assume the response is valid
JSON-RPC here, since it passed the jsonschema validation.
"""
if "error" in response:
return ErrorResponse(**response)
return SuccessResponse(**response)
|
bcb/jsonrpcclient | jsonrpcclient/async_client.py | AsyncClient.send | python | async def send(
self,
request: Union[str, Dict, List],
trim_log_values: bool = False,
validate_against_schema: bool = True,
**kwargs: Any
) -> Response:
# We need both the serialized and deserialized version of the request
if isinstance(request, str):
request_text = request
request_deserialized = deserialize(request)
else:
request_text = serialize(request)
request_deserialized = request
batch = isinstance(request_deserialized, list)
response_expected = batch or "id" in request_deserialized
self.log_request(request_text, trim_log_values=trim_log_values)
response = await self.send_message(
request_text, response_expected=response_expected, **kwargs
)
self.log_response(response, trim_log_values=trim_log_values)
self.validate_response(response)
response.data = parse(
response.text, batch=batch, validate_against_schema=validate_against_schema
)
# If received a single error response, raise
if isinstance(response.data, ErrorResponse):
raise ReceivedErrorResponseError(response.data)
return response | Async version of Client.send. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/async_client.py#L32-L63 | [
"def parse(\n response_text: str, *, batch: bool, validate_against_schema: bool = True\n) -> Union[JSONRPCResponse, List[JSONRPCResponse]]:\n \"\"\"\n Parses response text, returning JSONRPCResponse objects.\n\n Args:\n response_text: JSON-RPC response string.\n batch: If the response_text is an empty string, this determines how to parse.\n validate_against_schema: Validate against the json-rpc schema.\n\n Returns:\n Either a JSONRPCResponse, or a list of them.\n\n Raises:\n json.JSONDecodeError: The response was not valid JSON.\n jsonschema.ValidationError: The response was not a valid JSON-RPC response\n object.\n \"\"\"\n # If the response is empty, we can't deserialize it; an empty string is valid\n # JSON-RPC, but not valid JSON.\n if not response_text:\n if batch:\n # An empty string is a valid response to a batch request, when there were\n # only notifications in the batch.\n return []\n else:\n # An empty string is valid response to a Notification request.\n return NotificationResponse()\n\n # If a string, ensure it's json-deserializable\n deserialized = deserialize(response_text)\n\n # Validate the response against the Response schema (raises\n # jsonschema.ValidationError if invalid)\n if validate_against_schema:\n jsonschema.validate(deserialized, schema)\n\n # Batch response\n if isinstance(deserialized, list):\n return [get_response(r) for r in deserialized if \"id\" in r]\n # Single response\n return get_response(deserialized)\n",
"async def send_message(\n self, request: str, response_expected: bool, **kwargs: Any\n) -> Response:\n \"\"\"Override to transport the request\"\"\"\n",
"def validate_response(self, response: Response) -> None:\n \"\"\"\n Can be overridden for custom validation of the response.\n\n Raise an exception to fail validation.\n \"\"\"\n pass\n"
] | class AsyncClient(Client, metaclass=ABCMeta):
"""
Abstract base class for the asynchronous clients.
Has async versions of the Client class's public methods.
"""
@abstractmethod
async def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""Override to transport the request"""
@apply_self
@apply_self
async def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
"""
Async version of Client.notify.
"""
return await self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
@apply_self
async def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Async version of Client.request.
"""
return await self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
|
bcb/jsonrpcclient | jsonrpcclient/clients/tornado_client.py | TornadoClient.send_message | python | async def send_message( # type: ignore
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
headers = dict(self.DEFAULT_HEADERS)
headers.update(kwargs.pop("headers", {}))
response = await self.client.fetch(
self.endpoint, method="POST", body=request, headers=headers, **kwargs
)
return Response(response.body.decode(), raw=response) | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/clients/tornado_client.py#L46-L66 | null | class TornadoClient(AsyncClient):
"""
Note: Tornado raises its own HTTP response status code exceptions, so there's no
need to raise ReceivedNon2xxResponseError.
"""
DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json"}
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s (%(http_code)s %(http_reason)s)"
def __init__(
self,
endpoint: str,
*args: Any,
client: Optional[AsyncHTTPClient] = None,
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self.endpoint = endpoint
self.client = client or AsyncHTTPClient()
def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
extra = (
{"http_code": response.raw.code, "http_reason": response.raw.reason}
if response.raw is not None
else {}
)
super().log_response(
response, extra=extra, trim_log_values=trim_log_values, **kwargs
)
|
bcb/jsonrpcclient | jsonrpcclient/__main__.py | main | python | def main(
context: click.core.Context, method: str, request_type: str, id: Any, send: str
) -> None:
exit_status = 0
# Extract the jsonrpc arguments
positional = [a for a in context.args if "=" not in a]
named = {a.split("=")[0]: a.split("=")[1] for a in context.args if "=" in a}
# Create the request
if request_type == "notify":
req = Notification(method, *positional, **named)
else:
req = Request(method, *positional, request_id=id, **named) # type: ignore
# Sending?
if send:
client = HTTPClient(send)
try:
response = client.send(req)
except JsonRpcClientError as e:
click.echo(str(e), err=True)
exit_status = 1
else:
click.echo(response.text)
# Otherwise, simply output the JSON-RPC request.
else:
click.echo(str(req))
sys.exit(exit_status) | Create a JSON-RPC request. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/__main__.py#L40-L68 | null | # /usr/bin/env python
# pylint: disable=no-value-for-parameter
"""
This is an attempt at using this library to create a "jsonrpc" command-line utility.
Currently it's only useful for very basic requests.
$ pip install jsonrpcclient
$ jsonrpc ping
{"jsonrpc": "2.0", "method": "ping", "id": 1}
$ jsonrpc ping --send http://localhost:5000
{"jsonrpc": "2.0", "result": "pong", "id": 1}
"""
import sys
from typing import Any
import click
import pkg_resources
from jsonrpcclient.clients.http_client import HTTPClient
from jsonrpcclient.exceptions import JsonRpcClientError
from jsonrpcclient.requests import Notification, Request
version = pkg_resources.require("jsonrpcclient")[0].version
@click.command(
context_settings={"ignore_unknown_options": True, "allow_extra_args": True}
)
@click.option("--id", default=1, help="Set the id for a request.")
@click.option(
"--notify",
"request_type",
flag_value="notify",
help="Indicates that no response is expected.",
)
@click.option("--send", help="URL to send request to. (requires the Requests library)")
@click.version_option(prog_name="jsonrpcclient", version=version)
@click.argument("method", required=True, metavar="METHOD [PARAMS]...")
@click.pass_context
if __name__ == "__main__":
main()
|
bcb/jsonrpcclient | jsonrpcclient/log.py | log_ | python | def log_(
message: str,
logger: logging.Logger,
level: str = "info",
extra: Optional[Dict] = None,
trim: bool = False,
) -> None:
if extra is None:
extra = {}
# Clean up the message for logging
if message:
message = message.replace("\n", "").replace(" ", " ").replace("{ ", "{")
if trim:
message = _trim_message(message)
# Log.
getattr(logger, level)(message, extra=extra) | Log a request or response
Args:
message: JSON-RPC request or response string.
level: Log level.
extra: More details to include in the log entry.
trim: Abbreviate log messages. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/log.py#L54-L78 | [
"def _trim_message(message: str) -> str:\n # Attempt to deserialize\n try:\n message_obj = json.loads(message)\n except ValueError:\n # Could not be deserialized, trim the string anyway.\n return _trim_string(str(message))\n else:\n return json.dumps(_trim_values(message_obj))\n"
] | """Logging"""
import json
import logging
from typing import Any, Dict, List, Optional, Union, cast
def _trim_string(message: str) -> str:
longest_string = 30
if len(message) > longest_string:
prefix_len = int(longest_string / 3)
suffix_len = prefix_len
return message[:prefix_len] + "..." + message[-suffix_len:]
return message
def _trim_dict(message_obj: Dict[str, Any]) -> Dict[str, Any]:
result = {}
longest_list = 30
for k, val in message_obj.items():
if isinstance(val, str):
result[k] = _trim_string(val)
elif isinstance(val, list) and len(val) > longest_list:
prefix_len = int(longest_list / 3)
suffix_len = prefix_len
result[k] = cast(str, val[:prefix_len] + ["..."] + val[-suffix_len:])
elif isinstance(val, dict):
result[k] = cast(str, _trim_values(val))
else:
result[k] = val
return result
def _trim_values(message_obj: Union[Dict, List]) -> Union[Dict, List]:
# Batch?
if isinstance(message_obj, list):
return [_trim_dict(i) for i in message_obj]
else:
return _trim_dict(message_obj)
def _trim_message(message: str) -> str:
# Attempt to deserialize
try:
message_obj = json.loads(message)
except ValueError:
# Could not be deserialized, trim the string anyway.
return _trim_string(str(message))
else:
return json.dumps(_trim_values(message_obj))
|
bcb/jsonrpcclient | jsonrpcclient/requests.py | sort_request | python | def sort_request(request: Dict[str, Any]) -> OrderedDict:
sort_order = ["jsonrpc", "method", "params", "id"]
return OrderedDict(sorted(request.items(), key=lambda k: sort_order.index(k[0]))) | Sort a JSON-RPC request dict.
This has no effect other than making the request nicer to read.
>>> json.dumps(sort_request(
... {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'}))
'{"jsonrpc": "2.0", "method": "add", "params": [2, 3], "id": 2}'
Args:
request: JSON-RPC request in dict format. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/requests.py#L18-L32 | null | """
Classes to help create JSON-RPC Request objects.
Named plural to distinguish it from the request convenience function.
To create a request:
>>> Request("cat", name="Yoko")
{'jsonrpc': '2.0', 'method': 'cat', 'params': {'name': 'Yoko'}, 'id': 1}
"""
import json
from collections import OrderedDict
from typing import Any, Callable, Dict, Iterator, Optional
from . import id_generators
class _RequestClassType(type):
"""
Request Metaclass.
Catches undefined attributes on the class.
"""
def __getattr__(cls: Callable, name: str) -> Callable:
"""
This gives us an alternate way to make a request:
>>> Request.cat()
{'jsonrpc': '2.0', 'method': 'cat', 'id': 1}
That's the same as saying `Request("cat")`.
"""
def attr_handler(*args: Any, **kwargs: Any) -> "Request":
return cls(name, *args, **kwargs)
return attr_handler
class Notification(dict, metaclass=_RequestClassType): # type: ignore
"""
A request which does not expect a response.
>>> Notification("cat")
{'jsonrpc': '2.0', 'method': 'cat'}
The first argument is the *method*; everything else is *arguments* to the
method:
>>> Notification("cat", 'Yoko', 5)
{'jsonrpc': '2.0', 'method': 'cat', params: ['Yoko', 5]}
Keyword arguments are also acceptable:
>>> Notification("cat", name="Yoko", age=5)
{'jsonrpc': '2.0', 'method': 'cat', 'params': {'name': 'Yoko', 'age': 5}}
If you prefer, call the method as though it was a class attribute:
>>> Notification.cat(name="Yoko", age=5)
{'jsonrpc': '2.0', 'method': 'cat', 'params': {'name': 'Yoko', 'age': 5}}
Args:
method: The method name.
args: Positional arguments.
kwargs: Keyword arguments.
Returns:
The JSON-RPC request in dictionary form.
"""
def __init__(self, method: str, *args: Any, **kwargs: Any) -> None:
super().__init__(jsonrpc="2.0", method=method)
# Add the params to self.
if args and kwargs:
# The 'params' can be *EITHER* "by-position" (a list) or "by-name" (a dict).
# Therefore, in this case it violates the JSON-RPC 2.0 specification.
# However, it provides the same behavior as the previous version of
# jsonrpcclient to keep compatibility.
# TODO: consider to raise a warning.
params_list = list(args)
params_list.append(kwargs)
self.update(params=params_list)
elif args:
self.update(params=list(args))
elif kwargs:
self.update(params=kwargs)
def __str__(self) -> str:
"""Wrapper around request, returning a string instead of a dict"""
return json.dumps(sort_request(self))
class Request(Notification):
"""
Create a JSON-RPC request object
http://www.jsonrpc.org/specification#request_object.
>>> Request("cat", name="Yoko")
{'jsonrpc': '2.0', 'method': 'cat', 'params': {'name': 'Yoko'}, 'id': 1}
Args:
method: The `method` name.
args: Positional arguments added to `params`.
kwargs: Keyword arguments added to `params`. Use request_id=x to force the
`id` to use.
Returns:
The JSON-RPC request in dictionary form.
"""
id_generator = id_generators.decimal()
def __init__(
self,
method: str,
*args: Any,
id_generator: Optional[Iterator[Any]] = None,
**kwargs: Any
) -> None:
# If 'request_id' is passed, use the specified id
if "request_id" in kwargs:
id_ = kwargs.pop("request_id", None)
else: # Get the next id from the generator
id_generator = (
id_generator if id_generator is not None else self.id_generator
)
id_ = next(id_generator)
# We call super last, after popping the request_id
super().__init__(method, *args, **kwargs)
self.update(id=id_)
|
bcb/jsonrpcclient | jsonrpcclient/client.py | Client.basic_logging | python | def basic_logging(self) -> None:
# Request handler
if len(request_log.handlers) == 0:
request_handler = logging.StreamHandler()
request_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_REQUEST_LOG_FORMAT)
)
request_log.addHandler(request_handler)
request_log.setLevel(logging.INFO)
# Response handler
if len(response_log.handlers) == 0:
response_handler = logging.StreamHandler()
response_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_RESPONSE_LOG_FORMAT)
)
response_log.addHandler(response_handler)
response_log.setLevel(logging.INFO) | Call this on the client object to create log handlers to output request and
response messages. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/client.py#L57-L77 | null | class Client(metaclass=ABCMeta):
"""
Protocol-agnostic base class for clients.
Subclasses must override `send_message` to transport the message.
"""
DEFAULT_REQUEST_LOG_FORMAT = "--> %(message)s"
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s"
@apply_config(config, converters={"id_generator": "getcallable"})
def __init__(
self,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
basic_logging: bool = False,
) -> None:
"""
Args:
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
basic_logging: Will create log handlers to output request & response
messages.
"""
self.trim_log_values = trim_log_values
self.validate_against_schema = validate_against_schema
self.id_generator = id_generator
if basic_logging:
self.basic_logging()
@apply_self
def log_request(
self, request: str, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a request.
Args:
request: The JSON-RPC request string.
trim_log_values: Log an abbreviated version of the request.
"""
return log_(request, request_log, "info", trim=trim_log_values, **kwargs)
@apply_self
def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a response.
Note this is different to log_request, in that it takes a Response object, not a
string.
Args:
response: The Response object to log. Note this is different to log_request
which takes a string.
trim_log_values: Log an abbreviated version of the response.
"""
return log_(response.text, response_log, "info", trim=trim_log_values, **kwargs)
@abstractmethod
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
"""
def validate_response(self, response: Response) -> None:
"""
Can be overridden for custom validation of the response.
Raise an exception to fail validation.
"""
pass
@apply_self
def send(
self,
request: Union[str, Dict, List],
trim_log_values: bool = False,
validate_against_schema: bool = True,
**kwargs: Any
) -> Response:
"""
Send a request, passing the whole JSON-RPC request object.
After sending, logs, validates and parses.
>>> client.send('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
<Response[1]>
Args:
request: The JSON-RPC request. Can be either a JSON-encoded string or a
Request/Notification object.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
kwargs: Clients can use this to configure an single request. For example,
HTTPClient passes this through to `requests.Session.send()`.
in the case of a Notification.
"""
# We need both the serialized and deserialized version of the request
if isinstance(request, str):
request_text = request
request_deserialized = deserialize(request)
else:
request_text = serialize(request)
request_deserialized = request
batch = isinstance(request_deserialized, list)
response_expected = batch or "id" in request_deserialized
self.log_request(request_text, trim_log_values=trim_log_values)
response = self.send_message(
request_text, response_expected=response_expected, **kwargs
)
self.log_response(response, trim_log_values=trim_log_values)
self.validate_response(response)
response.data = parse(
response.text, batch=batch, validate_against_schema=validate_against_schema
)
# If received a single error response, raise
if isinstance(response.data, ErrorResponse):
raise ReceivedErrorResponseError(response.data)
return response
@apply_self
def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
"""
Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
"""
return self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
@apply_self
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
def __getattr__(self, name: str) -> Callable:
"""
This gives us an alternate way to make a request.
>>> client.cube(3)
--> {"jsonrpc": "2.0", "method": "cube", "params": [3], "id": 1}
That's the same as saying `client.request("cube", 3)`.
"""
def attr_handler(*args: Any, **kwargs: Any) -> Response:
return self.request(name, *args, **kwargs)
return attr_handler
|
bcb/jsonrpcclient | jsonrpcclient/client.py | Client.log_request | python | def log_request(
self, request: str, trim_log_values: bool = False, **kwargs: Any
) -> None:
return log_(request, request_log, "info", trim=trim_log_values, **kwargs) | Log a request.
Args:
request: The JSON-RPC request string.
trim_log_values: Log an abbreviated version of the request. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/client.py#L80-L90 | [
"def log_(\n message: str,\n logger: logging.Logger,\n level: str = \"info\",\n extra: Optional[Dict] = None,\n trim: bool = False,\n) -> None:\n \"\"\"\n Log a request or response\n\n Args:\n message: JSON-RPC request or response string.\n level: Log level.\n extra: More details to include in the log entry.\n trim: Abbreviate log messages.\n \"\"\"\n if extra is None:\n extra = {}\n # Clean up the message for logging\n if message:\n message = message.replace(\"\\n\", \"\").replace(\" \", \" \").replace(\"{ \", \"{\")\n if trim:\n message = _trim_message(message)\n # Log.\n getattr(logger, level)(message, extra=extra)\n"
] | class Client(metaclass=ABCMeta):
"""
Protocol-agnostic base class for clients.
Subclasses must override `send_message` to transport the message.
"""
DEFAULT_REQUEST_LOG_FORMAT = "--> %(message)s"
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s"
@apply_config(config, converters={"id_generator": "getcallable"})
def __init__(
self,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
basic_logging: bool = False,
) -> None:
"""
Args:
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
basic_logging: Will create log handlers to output request & response
messages.
"""
self.trim_log_values = trim_log_values
self.validate_against_schema = validate_against_schema
self.id_generator = id_generator
if basic_logging:
self.basic_logging()
def basic_logging(self) -> None:
"""
Call this on the client object to create log handlers to output request and
response messages.
"""
# Request handler
if len(request_log.handlers) == 0:
request_handler = logging.StreamHandler()
request_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_REQUEST_LOG_FORMAT)
)
request_log.addHandler(request_handler)
request_log.setLevel(logging.INFO)
# Response handler
if len(response_log.handlers) == 0:
response_handler = logging.StreamHandler()
response_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_RESPONSE_LOG_FORMAT)
)
response_log.addHandler(response_handler)
response_log.setLevel(logging.INFO)
@apply_self
@apply_self
def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a response.
Note this is different to log_request, in that it takes a Response object, not a
string.
Args:
response: The Response object to log. Note this is different to log_request
which takes a string.
trim_log_values: Log an abbreviated version of the response.
"""
return log_(response.text, response_log, "info", trim=trim_log_values, **kwargs)
@abstractmethod
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
"""
def validate_response(self, response: Response) -> None:
"""
Can be overridden for custom validation of the response.
Raise an exception to fail validation.
"""
pass
@apply_self
def send(
self,
request: Union[str, Dict, List],
trim_log_values: bool = False,
validate_against_schema: bool = True,
**kwargs: Any
) -> Response:
"""
Send a request, passing the whole JSON-RPC request object.
After sending, logs, validates and parses.
>>> client.send('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
<Response[1]>
Args:
request: The JSON-RPC request. Can be either a JSON-encoded string or a
Request/Notification object.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
kwargs: Clients can use this to configure an single request. For example,
HTTPClient passes this through to `requests.Session.send()`.
in the case of a Notification.
"""
# We need both the serialized and deserialized version of the request
if isinstance(request, str):
request_text = request
request_deserialized = deserialize(request)
else:
request_text = serialize(request)
request_deserialized = request
batch = isinstance(request_deserialized, list)
response_expected = batch or "id" in request_deserialized
self.log_request(request_text, trim_log_values=trim_log_values)
response = self.send_message(
request_text, response_expected=response_expected, **kwargs
)
self.log_response(response, trim_log_values=trim_log_values)
self.validate_response(response)
response.data = parse(
response.text, batch=batch, validate_against_schema=validate_against_schema
)
# If received a single error response, raise
if isinstance(response.data, ErrorResponse):
raise ReceivedErrorResponseError(response.data)
return response
@apply_self
def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
"""
Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
"""
return self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
@apply_self
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
def __getattr__(self, name: str) -> Callable:
"""
This gives us an alternate way to make a request.
>>> client.cube(3)
--> {"jsonrpc": "2.0", "method": "cube", "params": [3], "id": 1}
That's the same as saying `client.request("cube", 3)`.
"""
def attr_handler(*args: Any, **kwargs: Any) -> Response:
return self.request(name, *args, **kwargs)
return attr_handler
|
bcb/jsonrpcclient | jsonrpcclient/client.py | Client.log_response | python | def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
return log_(response.text, response_log, "info", trim=trim_log_values, **kwargs) | Log a response.
Note this is different to log_request, in that it takes a Response object, not a
string.
Args:
response: The Response object to log. Note this is different to log_request
which takes a string.
trim_log_values: Log an abbreviated version of the response. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/client.py#L93-L107 | [
"def log_(\n message: str,\n logger: logging.Logger,\n level: str = \"info\",\n extra: Optional[Dict] = None,\n trim: bool = False,\n) -> None:\n \"\"\"\n Log a request or response\n\n Args:\n message: JSON-RPC request or response string.\n level: Log level.\n extra: More details to include in the log entry.\n trim: Abbreviate log messages.\n \"\"\"\n if extra is None:\n extra = {}\n # Clean up the message for logging\n if message:\n message = message.replace(\"\\n\", \"\").replace(\" \", \" \").replace(\"{ \", \"{\")\n if trim:\n message = _trim_message(message)\n # Log.\n getattr(logger, level)(message, extra=extra)\n"
] | class Client(metaclass=ABCMeta):
"""
Protocol-agnostic base class for clients.
Subclasses must override `send_message` to transport the message.
"""
DEFAULT_REQUEST_LOG_FORMAT = "--> %(message)s"
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s"
@apply_config(config, converters={"id_generator": "getcallable"})
def __init__(
self,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
basic_logging: bool = False,
) -> None:
"""
Args:
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
basic_logging: Will create log handlers to output request & response
messages.
"""
self.trim_log_values = trim_log_values
self.validate_against_schema = validate_against_schema
self.id_generator = id_generator
if basic_logging:
self.basic_logging()
def basic_logging(self) -> None:
"""
Call this on the client object to create log handlers to output request and
response messages.
"""
# Request handler
if len(request_log.handlers) == 0:
request_handler = logging.StreamHandler()
request_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_REQUEST_LOG_FORMAT)
)
request_log.addHandler(request_handler)
request_log.setLevel(logging.INFO)
# Response handler
if len(response_log.handlers) == 0:
response_handler = logging.StreamHandler()
response_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_RESPONSE_LOG_FORMAT)
)
response_log.addHandler(response_handler)
response_log.setLevel(logging.INFO)
@apply_self
def log_request(
self, request: str, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a request.
Args:
request: The JSON-RPC request string.
trim_log_values: Log an abbreviated version of the request.
"""
return log_(request, request_log, "info", trim=trim_log_values, **kwargs)
@apply_self
@abstractmethod
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
"""
def validate_response(self, response: Response) -> None:
"""
Can be overridden for custom validation of the response.
Raise an exception to fail validation.
"""
pass
@apply_self
def send(
self,
request: Union[str, Dict, List],
trim_log_values: bool = False,
validate_against_schema: bool = True,
**kwargs: Any
) -> Response:
"""
Send a request, passing the whole JSON-RPC request object.
After sending, logs, validates and parses.
>>> client.send('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
<Response[1]>
Args:
request: The JSON-RPC request. Can be either a JSON-encoded string or a
Request/Notification object.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
kwargs: Clients can use this to configure an single request. For example,
HTTPClient passes this through to `requests.Session.send()`.
in the case of a Notification.
"""
# We need both the serialized and deserialized version of the request
if isinstance(request, str):
request_text = request
request_deserialized = deserialize(request)
else:
request_text = serialize(request)
request_deserialized = request
batch = isinstance(request_deserialized, list)
response_expected = batch or "id" in request_deserialized
self.log_request(request_text, trim_log_values=trim_log_values)
response = self.send_message(
request_text, response_expected=response_expected, **kwargs
)
self.log_response(response, trim_log_values=trim_log_values)
self.validate_response(response)
response.data = parse(
response.text, batch=batch, validate_against_schema=validate_against_schema
)
# If received a single error response, raise
if isinstance(response.data, ErrorResponse):
raise ReceivedErrorResponseError(response.data)
return response
@apply_self
def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
"""
Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
"""
return self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
@apply_self
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
def __getattr__(self, name: str) -> Callable:
"""
This gives us an alternate way to make a request.
>>> client.cube(3)
--> {"jsonrpc": "2.0", "method": "cube", "params": [3], "id": 1}
That's the same as saying `client.request("cube", 3)`.
"""
def attr_handler(*args: Any, **kwargs: Any) -> Response:
return self.request(name, *args, **kwargs)
return attr_handler
|
bcb/jsonrpcclient | jsonrpcclient/client.py | Client.notify | python | def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
return self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
) | Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/client.py#L181-L203 | null | class Client(metaclass=ABCMeta):
"""
Protocol-agnostic base class for clients.
Subclasses must override `send_message` to transport the message.
"""
DEFAULT_REQUEST_LOG_FORMAT = "--> %(message)s"
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s"
@apply_config(config, converters={"id_generator": "getcallable"})
def __init__(
self,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
basic_logging: bool = False,
) -> None:
"""
Args:
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
basic_logging: Will create log handlers to output request & response
messages.
"""
self.trim_log_values = trim_log_values
self.validate_against_schema = validate_against_schema
self.id_generator = id_generator
if basic_logging:
self.basic_logging()
def basic_logging(self) -> None:
"""
Call this on the client object to create log handlers to output request and
response messages.
"""
# Request handler
if len(request_log.handlers) == 0:
request_handler = logging.StreamHandler()
request_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_REQUEST_LOG_FORMAT)
)
request_log.addHandler(request_handler)
request_log.setLevel(logging.INFO)
# Response handler
if len(response_log.handlers) == 0:
response_handler = logging.StreamHandler()
response_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_RESPONSE_LOG_FORMAT)
)
response_log.addHandler(response_handler)
response_log.setLevel(logging.INFO)
@apply_self
def log_request(
self, request: str, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a request.
Args:
request: The JSON-RPC request string.
trim_log_values: Log an abbreviated version of the request.
"""
return log_(request, request_log, "info", trim=trim_log_values, **kwargs)
@apply_self
def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a response.
Note this is different to log_request, in that it takes a Response object, not a
string.
Args:
response: The Response object to log. Note this is different to log_request
which takes a string.
trim_log_values: Log an abbreviated version of the response.
"""
return log_(response.text, response_log, "info", trim=trim_log_values, **kwargs)
@abstractmethod
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
"""
def validate_response(self, response: Response) -> None:
"""
Can be overridden for custom validation of the response.
Raise an exception to fail validation.
"""
pass
@apply_self
def send(
self,
request: Union[str, Dict, List],
trim_log_values: bool = False,
validate_against_schema: bool = True,
**kwargs: Any
) -> Response:
"""
Send a request, passing the whole JSON-RPC request object.
After sending, logs, validates and parses.
>>> client.send('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
<Response[1]>
Args:
request: The JSON-RPC request. Can be either a JSON-encoded string or a
Request/Notification object.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
kwargs: Clients can use this to configure an single request. For example,
HTTPClient passes this through to `requests.Session.send()`.
in the case of a Notification.
"""
# We need both the serialized and deserialized version of the request
if isinstance(request, str):
request_text = request
request_deserialized = deserialize(request)
else:
request_text = serialize(request)
request_deserialized = request
batch = isinstance(request_deserialized, list)
response_expected = batch or "id" in request_deserialized
self.log_request(request_text, trim_log_values=trim_log_values)
response = self.send_message(
request_text, response_expected=response_expected, **kwargs
)
self.log_response(response, trim_log_values=trim_log_values)
self.validate_response(response)
response.data = parse(
response.text, batch=batch, validate_against_schema=validate_against_schema
)
# If received a single error response, raise
if isinstance(response.data, ErrorResponse):
raise ReceivedErrorResponseError(response.data)
return response
@apply_self
@apply_self
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
def __getattr__(self, name: str) -> Callable:
"""
This gives us an alternate way to make a request.
>>> client.cube(3)
--> {"jsonrpc": "2.0", "method": "cube", "params": [3], "id": 1}
That's the same as saying `client.request("cube", 3)`.
"""
def attr_handler(*args: Any, **kwargs: Any) -> Response:
return self.request(name, *args, **kwargs)
return attr_handler
|
bcb/jsonrpcclient | jsonrpcclient/client.py | Client.request | python | def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
) | Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/client.py#L206-L233 | null | class Client(metaclass=ABCMeta):
"""
Protocol-agnostic base class for clients.
Subclasses must override `send_message` to transport the message.
"""
DEFAULT_REQUEST_LOG_FORMAT = "--> %(message)s"
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s"
@apply_config(config, converters={"id_generator": "getcallable"})
def __init__(
self,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
basic_logging: bool = False,
) -> None:
"""
Args:
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
basic_logging: Will create log handlers to output request & response
messages.
"""
self.trim_log_values = trim_log_values
self.validate_against_schema = validate_against_schema
self.id_generator = id_generator
if basic_logging:
self.basic_logging()
def basic_logging(self) -> None:
"""
Call this on the client object to create log handlers to output request and
response messages.
"""
# Request handler
if len(request_log.handlers) == 0:
request_handler = logging.StreamHandler()
request_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_REQUEST_LOG_FORMAT)
)
request_log.addHandler(request_handler)
request_log.setLevel(logging.INFO)
# Response handler
if len(response_log.handlers) == 0:
response_handler = logging.StreamHandler()
response_handler.setFormatter(
logging.Formatter(fmt=self.DEFAULT_RESPONSE_LOG_FORMAT)
)
response_log.addHandler(response_handler)
response_log.setLevel(logging.INFO)
@apply_self
def log_request(
self, request: str, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a request.
Args:
request: The JSON-RPC request string.
trim_log_values: Log an abbreviated version of the request.
"""
return log_(request, request_log, "info", trim=trim_log_values, **kwargs)
@apply_self
def log_response(
self, response: Response, trim_log_values: bool = False, **kwargs: Any
) -> None:
"""
Log a response.
Note this is different to log_request, in that it takes a Response object, not a
string.
Args:
response: The Response object to log. Note this is different to log_request
which takes a string.
trim_log_values: Log an abbreviated version of the response.
"""
return log_(response.text, response_log, "info", trim=trim_log_values, **kwargs)
@abstractmethod
def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
"""
Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object.
"""
def validate_response(self, response: Response) -> None:
"""
Can be overridden for custom validation of the response.
Raise an exception to fail validation.
"""
pass
@apply_self
def send(
self,
request: Union[str, Dict, List],
trim_log_values: bool = False,
validate_against_schema: bool = True,
**kwargs: Any
) -> Response:
"""
Send a request, passing the whole JSON-RPC request object.
After sending, logs, validates and parses.
>>> client.send('{"jsonrpc": "2.0", "method": "ping", "id": 1}')
<Response[1]>
Args:
request: The JSON-RPC request. Can be either a JSON-encoded string or a
Request/Notification object.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
kwargs: Clients can use this to configure an single request. For example,
HTTPClient passes this through to `requests.Session.send()`.
in the case of a Notification.
"""
# We need both the serialized and deserialized version of the request
if isinstance(request, str):
request_text = request
request_deserialized = deserialize(request)
else:
request_text = serialize(request)
request_deserialized = request
batch = isinstance(request_deserialized, list)
response_expected = batch or "id" in request_deserialized
self.log_request(request_text, trim_log_values=trim_log_values)
response = self.send_message(
request_text, response_expected=response_expected, **kwargs
)
self.log_response(response, trim_log_values=trim_log_values)
self.validate_response(response)
response.data = parse(
response.text, batch=batch, validate_against_schema=validate_against_schema
)
# If received a single error response, raise
if isinstance(response.data, ErrorResponse):
raise ReceivedErrorResponseError(response.data)
return response
@apply_self
def notify(
self,
method_name: str,
*args: Any,
trim_log_values: Optional[bool] = None,
validate_against_schema: Optional[bool] = None,
**kwargs: Any
) -> Response:
"""
Send a JSON-RPC request, without expecting a response.
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
"""
return self.send(
Notification(method_name, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
)
@apply_self
def __getattr__(self, name: str) -> Callable:
"""
This gives us an alternate way to make a request.
>>> client.cube(3)
--> {"jsonrpc": "2.0", "method": "cube", "params": [3], "id": 1}
That's the same as saying `client.request("cube", 3)`.
"""
def attr_handler(*args: Any, **kwargs: Any) -> Response:
return self.request(name, *args, **kwargs)
return attr_handler
|
bcb/jsonrpcclient | jsonrpcclient/clients/socket_client.py | SocketClient.send_message | python | def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
payload = str(request) + self.delimiter
self.socket.send(payload.encode(self.encoding))
response = bytes()
decoded = None
# Receive the response until we find the delimiter.
# TODO Do not wait for a response if the message sent is a notification.
while True:
response += self.socket.recv(1024)
decoded = response.decode(self.encoding)
if len(decoded) < self.delimiter_length:
continue
# TODO Check that're not in the middle of the response.
elif decoded[-self.delimiter_length :] == self.delimiter:
break
assert decoded is not None
return Response(decoded[: -self.delimiter_length]) | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/clients/socket_client.py#L35-L68 | null | class SocketClient(Client):
"""
Args:
socket: Connected socket.
encoding: The charset to encode and decode the data with.
delimiter: String marking the end of a request or response.
*args: Passed through to Client class.
**kwargs: Passed through to Client class.
"""
def __init__(
self,
socket: socket.socket,
*args: Any,
encoding: str = "utf-8",
delimiter: str = "\n",
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self.socket = socket
self.delimiter = delimiter
self.encoding = encoding
self.delimiter_length = len(delimiter)
|
bcb/jsonrpcclient | jsonrpcclient/clients/http_client.py | HTTPClient.send_message | python | def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
response = self.session.post(self.endpoint, data=request.encode(), **kwargs)
return Response(response.text, raw=response) | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/clients/http_client.py#L50-L64 | null | class HTTPClient(Client):
"""Defines an HTTP client"""
# The default HTTP header
DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json"}
DEFAULT_RESPONSE_LOG_FORMAT = "<-- %(message)s (%(http_code)s %(http_reason)s)"
def __init__(self, endpoint: str, *args: Any, **kwargs: Any) -> None:
"""
Args:
endpoint: The server address.
"""
super().__init__(*args, **kwargs)
self.endpoint = endpoint
# Make use of Requests' sessions feature
self.session = Session()
self.session.headers.update(self.DEFAULT_HEADERS)
def log_response(self, response: Response, **kwargs: Any) -> None:
extra = (
{"http_code": response.raw.status_code, "http_reason": response.raw.reason}
if response.raw is not None
else {}
)
super().log_response(response, extra=extra, **kwargs)
def validate_response(self, response: Response) -> None:
if response.raw is not None and not 200 <= response.raw.status_code <= 299:
raise ReceivedNon2xxResponseError(response.raw.status_code)
|
bcb/jsonrpcclient | jsonrpcclient/clients/zeromq_client.py | ZeroMQClient.send_message | python | def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
self.socket.send_string(request)
return Response(self.socket.recv().decode()) | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. | train | https://github.com/bcb/jsonrpcclient/blob/5b5abc28d1466d694c80b80c427a5dcb275382bb/jsonrpcclient/clients/zeromq_client.py#L28-L42 | null | class ZeroMQClient(Client):
def __init__(
self, endpoint: str, *args: Any, socket_type: int = zmq.REQ, **kwargs: Any
) -> None:
"""
Args:
endpoint: The server address.
socket_type: The zeromq socket type.
"""
super().__init__(*args, **kwargs)
self.context = zmq.Context()
self.socket = self.context.socket(socket_type)
self.socket.connect(endpoint)
|
django-fluent/django-fluent-comments | fluent_comments/moderation.py | moderate_model | python | def moderate_model(ParentModel, publication_date_field=None, enable_comments_field=None):
attrs = {
'auto_close_field': publication_date_field,
'auto_moderate_field': publication_date_field,
'enable_field': enable_comments_field,
}
ModerationClass = type(ParentModel.__name__ + 'Moderator', (FluentCommentsModerator,), attrs)
moderator.register(ParentModel, ModerationClass) | Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation.
:param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model.
:param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date.
:type publication_date_field: str
:param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled.
:type enable_comments_field: str | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/moderation.py#L154-L170 | null | from __future__ import absolute_import
import logging
from akismet import SpamStatus
from django_comments.moderation import moderator, CommentModerator
from fluent_comments import appsettings
from fluent_comments.akismet import akismet_check
from fluent_comments.email import send_comment_posted
from fluent_comments.utils import split_words
try:
from urllib.parse import urljoin # Python 3
except ImportError:
from urlparse import urljoin # Python 2
logger = logging.getLogger(__name__)
# Akismet code originally based on django-comments-spamfighter.
__all__ = (
'FluentCommentsModerator',
'moderate_model',
'get_model_moderator',
'comments_are_open',
'comments_are_moderated',
)
class FluentCommentsModerator(CommentModerator):
"""
Moderation policy for fluent-comments.
"""
auto_close_field = None
auto_moderate_field = None
enable_field = None
close_after = appsettings.FLUENT_COMMENTS_CLOSE_AFTER_DAYS
moderate_after = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS
email_notification = appsettings.FLUENT_COMMENTS_USE_EMAIL_NOTIFICATION
akismet_check = appsettings.FLUENT_CONTENTS_USE_AKISMET
akismet_check_action = appsettings.FLUENT_COMMENTS_AKISMET_ACTION
moderate_bad_words = set(appsettings.FLUENT_COMMENTS_MODERATE_BAD_WORDS)
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on a given object.
Returns ``True`` if the comment should be allowed, ``False`` otherwise.
"""
# Parent class check
if not super(FluentCommentsModerator, self).allow(comment, content_object, request):
return False
# Akismet check
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if self.akismet_check_action == 'delete' and akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam):
return False # Akismet marked the comment as spam.
elif self.akismet_check_action == 'auto' and akismet_result == SpamStatus.DefiniteSpam:
return False # Clearly spam
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be allowed to show up immediately,
or should be marked non-public and await approval.
Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
"""
# Soft delete checks are done first, so these comments are not mistakenly "just moderated"
# for expiring the `close_after` date, but correctly get marked as spam instead.
# This helps staff to quickly see which comments need real moderation.
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if akismet_result:
# Typically action=delete never gets here, unless the service was having problems.
if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \
self.akismet_check_action in ('auto', 'soft_delete', 'delete'):
comment.is_removed = True # Set extra marker
# SpamStatus.Unknown or action=moderate will end up in the moderation queue
return True
# Parent class check
if super(FluentCommentsModerator, self).moderate(comment, content_object, request):
return True
# Bad words check
if self.moderate_bad_words:
input_words = split_words(comment.comment)
if self.moderate_bad_words.intersection(input_words):
return True
# Akismet check
if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'):
# Return True if akismet marks this comment as spam and we want to moderate it.
if akismet_check(comment, content_object, request):
return True
return False
def email(self, comment, content_object, request):
"""
Overwritten for a better email notification.
"""
if not self.email_notification:
return
send_comment_posted(comment, request)
class NullModerator(FluentCommentsModerator):
"""
A moderator class that has the same effect as not being here.
It allows all comments, disabling all moderation.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def allow(self, comment, content_object, request):
return True
def moderate(self, comment, content_object, request):
logger.info("Unconditionally allow comment, no default moderation set.")
return False
class AlwaysModerate(FluentCommentsModerator):
"""
A moderator class that will always mark the comment as moderated.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def moderate(self, comment, content_object, request):
# Still calling super in case Akismet marks the comment as spam.
return super(AlwaysModerate, self).moderate(comment, content_object, request) or True
class AlwaysDeny(FluentCommentsModerator):
"""
A moderator that will deny any comments to be posted.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def allow(self, comment, content_object, request):
logger.warning(
"Discarded comment on unregistered model '%s'",
content_object.__class__.__name__
)
return False
def get_model_moderator(model):
"""
Return the moderator class that is registered with a content object.
If there is no associated moderator with a class, None is returned.
:param model: The Django model registered with :func:`moderate_model`
:type model: :class:`~django.db.models.Model`
:return: The moderator class which holds the moderation policies.
:rtype: :class:`~django_comments.moderation.CommentModerator`
"""
try:
return moderator._registry[model]
except KeyError:
return None
def comments_are_open(content_object):
"""
Return whether comments are still open for a given target object.
"""
moderator = get_model_moderator(content_object.__class__)
if moderator is None:
return True
# Check the 'enable_field', 'auto_close_field' and 'close_after',
# by reusing the basic Django policies.
return CommentModerator.allow(moderator, None, content_object, None)
def comments_are_moderated(content_object):
"""
Return whether comments are moderated for a given target object.
"""
moderator = get_model_moderator(content_object.__class__)
if moderator is None:
return False
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(moderator, None, content_object, None)
|
django-fluent/django-fluent-comments | fluent_comments/moderation.py | comments_are_open | python | def comments_are_open(content_object):
moderator = get_model_moderator(content_object.__class__)
if moderator is None:
return True
# Check the 'enable_field', 'auto_close_field' and 'close_after',
# by reusing the basic Django policies.
return CommentModerator.allow(moderator, None, content_object, None) | Return whether comments are still open for a given target object. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/moderation.py#L189-L199 | [
"def get_model_moderator(model):\n \"\"\"\n Return the moderator class that is registered with a content object.\n If there is no associated moderator with a class, None is returned.\n\n :param model: The Django model registered with :func:`moderate_model`\n :type model: :class:`~django.db.models.Model`\n :return: The moderator class which holds the moderation policies.\n :rtype: :class:`~django_comments.moderation.CommentModerator`\n \"\"\"\n try:\n return moderator._registry[model]\n except KeyError:\n return None\n"
] | from __future__ import absolute_import
import logging
from akismet import SpamStatus
from django_comments.moderation import moderator, CommentModerator
from fluent_comments import appsettings
from fluent_comments.akismet import akismet_check
from fluent_comments.email import send_comment_posted
from fluent_comments.utils import split_words
try:
from urllib.parse import urljoin # Python 3
except ImportError:
from urlparse import urljoin # Python 2
logger = logging.getLogger(__name__)
# Akismet code originally based on django-comments-spamfighter.
__all__ = (
'FluentCommentsModerator',
'moderate_model',
'get_model_moderator',
'comments_are_open',
'comments_are_moderated',
)
class FluentCommentsModerator(CommentModerator):
"""
Moderation policy for fluent-comments.
"""
auto_close_field = None
auto_moderate_field = None
enable_field = None
close_after = appsettings.FLUENT_COMMENTS_CLOSE_AFTER_DAYS
moderate_after = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS
email_notification = appsettings.FLUENT_COMMENTS_USE_EMAIL_NOTIFICATION
akismet_check = appsettings.FLUENT_CONTENTS_USE_AKISMET
akismet_check_action = appsettings.FLUENT_COMMENTS_AKISMET_ACTION
moderate_bad_words = set(appsettings.FLUENT_COMMENTS_MODERATE_BAD_WORDS)
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on a given object.
Returns ``True`` if the comment should be allowed, ``False`` otherwise.
"""
# Parent class check
if not super(FluentCommentsModerator, self).allow(comment, content_object, request):
return False
# Akismet check
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if self.akismet_check_action == 'delete' and akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam):
return False # Akismet marked the comment as spam.
elif self.akismet_check_action == 'auto' and akismet_result == SpamStatus.DefiniteSpam:
return False # Clearly spam
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be allowed to show up immediately,
or should be marked non-public and await approval.
Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
"""
# Soft delete checks are done first, so these comments are not mistakenly "just moderated"
# for expiring the `close_after` date, but correctly get marked as spam instead.
# This helps staff to quickly see which comments need real moderation.
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if akismet_result:
# Typically action=delete never gets here, unless the service was having problems.
if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \
self.akismet_check_action in ('auto', 'soft_delete', 'delete'):
comment.is_removed = True # Set extra marker
# SpamStatus.Unknown or action=moderate will end up in the moderation queue
return True
# Parent class check
if super(FluentCommentsModerator, self).moderate(comment, content_object, request):
return True
# Bad words check
if self.moderate_bad_words:
input_words = split_words(comment.comment)
if self.moderate_bad_words.intersection(input_words):
return True
# Akismet check
if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'):
# Return True if akismet marks this comment as spam and we want to moderate it.
if akismet_check(comment, content_object, request):
return True
return False
def email(self, comment, content_object, request):
"""
Overwritten for a better email notification.
"""
if not self.email_notification:
return
send_comment_posted(comment, request)
class NullModerator(FluentCommentsModerator):
"""
A moderator class that has the same effect as not being here.
It allows all comments, disabling all moderation.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def allow(self, comment, content_object, request):
return True
def moderate(self, comment, content_object, request):
logger.info("Unconditionally allow comment, no default moderation set.")
return False
class AlwaysModerate(FluentCommentsModerator):
"""
A moderator class that will always mark the comment as moderated.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def moderate(self, comment, content_object, request):
# Still calling super in case Akismet marks the comment as spam.
return super(AlwaysModerate, self).moderate(comment, content_object, request) or True
class AlwaysDeny(FluentCommentsModerator):
"""
A moderator that will deny any comments to be posted.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def allow(self, comment, content_object, request):
logger.warning(
"Discarded comment on unregistered model '%s'",
content_object.__class__.__name__
)
return False
def moderate_model(ParentModel, publication_date_field=None, enable_comments_field=None):
"""
Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation.
:param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model.
:param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date.
:type publication_date_field: str
:param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled.
:type enable_comments_field: str
"""
attrs = {
'auto_close_field': publication_date_field,
'auto_moderate_field': publication_date_field,
'enable_field': enable_comments_field,
}
ModerationClass = type(ParentModel.__name__ + 'Moderator', (FluentCommentsModerator,), attrs)
moderator.register(ParentModel, ModerationClass)
def get_model_moderator(model):
"""
Return the moderator class that is registered with a content object.
If there is no associated moderator with a class, None is returned.
:param model: The Django model registered with :func:`moderate_model`
:type model: :class:`~django.db.models.Model`
:return: The moderator class which holds the moderation policies.
:rtype: :class:`~django_comments.moderation.CommentModerator`
"""
try:
return moderator._registry[model]
except KeyError:
return None
def comments_are_moderated(content_object):
"""
Return whether comments are moderated for a given target object.
"""
moderator = get_model_moderator(content_object.__class__)
if moderator is None:
return False
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(moderator, None, content_object, None)
|
django-fluent/django-fluent-comments | fluent_comments/moderation.py | comments_are_moderated | python | def comments_are_moderated(content_object):
moderator = get_model_moderator(content_object.__class__)
if moderator is None:
return False
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(moderator, None, content_object, None) | Return whether comments are moderated for a given target object. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/moderation.py#L202-L212 | [
"def get_model_moderator(model):\n \"\"\"\n Return the moderator class that is registered with a content object.\n If there is no associated moderator with a class, None is returned.\n\n :param model: The Django model registered with :func:`moderate_model`\n :type model: :class:`~django.db.models.Model`\n :return: The moderator class which holds the moderation policies.\n :rtype: :class:`~django_comments.moderation.CommentModerator`\n \"\"\"\n try:\n return moderator._registry[model]\n except KeyError:\n return None\n"
] | from __future__ import absolute_import
import logging
from akismet import SpamStatus
from django_comments.moderation import moderator, CommentModerator
from fluent_comments import appsettings
from fluent_comments.akismet import akismet_check
from fluent_comments.email import send_comment_posted
from fluent_comments.utils import split_words
try:
from urllib.parse import urljoin # Python 3
except ImportError:
from urlparse import urljoin # Python 2
logger = logging.getLogger(__name__)
# Akismet code originally based on django-comments-spamfighter.
__all__ = (
'FluentCommentsModerator',
'moderate_model',
'get_model_moderator',
'comments_are_open',
'comments_are_moderated',
)
class FluentCommentsModerator(CommentModerator):
"""
Moderation policy for fluent-comments.
"""
auto_close_field = None
auto_moderate_field = None
enable_field = None
close_after = appsettings.FLUENT_COMMENTS_CLOSE_AFTER_DAYS
moderate_after = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS
email_notification = appsettings.FLUENT_COMMENTS_USE_EMAIL_NOTIFICATION
akismet_check = appsettings.FLUENT_CONTENTS_USE_AKISMET
akismet_check_action = appsettings.FLUENT_COMMENTS_AKISMET_ACTION
moderate_bad_words = set(appsettings.FLUENT_COMMENTS_MODERATE_BAD_WORDS)
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on a given object.
Returns ``True`` if the comment should be allowed, ``False`` otherwise.
"""
# Parent class check
if not super(FluentCommentsModerator, self).allow(comment, content_object, request):
return False
# Akismet check
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if self.akismet_check_action == 'delete' and akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam):
return False # Akismet marked the comment as spam.
elif self.akismet_check_action == 'auto' and akismet_result == SpamStatus.DefiniteSpam:
return False # Clearly spam
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be allowed to show up immediately,
or should be marked non-public and await approval.
Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
"""
# Soft delete checks are done first, so these comments are not mistakenly "just moderated"
# for expiring the `close_after` date, but correctly get marked as spam instead.
# This helps staff to quickly see which comments need real moderation.
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if akismet_result:
# Typically action=delete never gets here, unless the service was having problems.
if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \
self.akismet_check_action in ('auto', 'soft_delete', 'delete'):
comment.is_removed = True # Set extra marker
# SpamStatus.Unknown or action=moderate will end up in the moderation queue
return True
# Parent class check
if super(FluentCommentsModerator, self).moderate(comment, content_object, request):
return True
# Bad words check
if self.moderate_bad_words:
input_words = split_words(comment.comment)
if self.moderate_bad_words.intersection(input_words):
return True
# Akismet check
if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'):
# Return True if akismet marks this comment as spam and we want to moderate it.
if akismet_check(comment, content_object, request):
return True
return False
def email(self, comment, content_object, request):
"""
Overwritten for a better email notification.
"""
if not self.email_notification:
return
send_comment_posted(comment, request)
class NullModerator(FluentCommentsModerator):
"""
A moderator class that has the same effect as not being here.
It allows all comments, disabling all moderation.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def allow(self, comment, content_object, request):
return True
def moderate(self, comment, content_object, request):
logger.info("Unconditionally allow comment, no default moderation set.")
return False
class AlwaysModerate(FluentCommentsModerator):
"""
A moderator class that will always mark the comment as moderated.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def moderate(self, comment, content_object, request):
# Still calling super in case Akismet marks the comment as spam.
return super(AlwaysModerate, self).moderate(comment, content_object, request) or True
class AlwaysDeny(FluentCommentsModerator):
"""
A moderator that will deny any comments to be posted.
This can be used in ``FLUENT_COMMENTS_DEFAULT_MODERATOR``.
"""
def allow(self, comment, content_object, request):
logger.warning(
"Discarded comment on unregistered model '%s'",
content_object.__class__.__name__
)
return False
def moderate_model(ParentModel, publication_date_field=None, enable_comments_field=None):
"""
Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation.
:param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model.
:param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date.
:type publication_date_field: str
:param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled.
:type enable_comments_field: str
"""
attrs = {
'auto_close_field': publication_date_field,
'auto_moderate_field': publication_date_field,
'enable_field': enable_comments_field,
}
ModerationClass = type(ParentModel.__name__ + 'Moderator', (FluentCommentsModerator,), attrs)
moderator.register(ParentModel, ModerationClass)
def get_model_moderator(model):
"""
Return the moderator class that is registered with a content object.
If there is no associated moderator with a class, None is returned.
:param model: The Django model registered with :func:`moderate_model`
:type model: :class:`~django.db.models.Model`
:return: The moderator class which holds the moderation policies.
:rtype: :class:`~django_comments.moderation.CommentModerator`
"""
try:
return moderator._registry[model]
except KeyError:
return None
def comments_are_open(content_object):
"""
Return whether comments are still open for a given target object.
"""
moderator = get_model_moderator(content_object.__class__)
if moderator is None:
return True
# Check the 'enable_field', 'auto_close_field' and 'close_after',
# by reusing the basic Django policies.
return CommentModerator.allow(moderator, None, content_object, None)
|
django-fluent/django-fluent-comments | fluent_comments/moderation.py | FluentCommentsModerator.allow | python | def allow(self, comment, content_object, request):
# Parent class check
if not super(FluentCommentsModerator, self).allow(comment, content_object, request):
return False
# Akismet check
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if self.akismet_check_action == 'delete' and akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam):
return False # Akismet marked the comment as spam.
elif self.akismet_check_action == 'auto' and akismet_result == SpamStatus.DefiniteSpam:
return False # Clearly spam
return True | Determine whether a given comment is allowed to be posted on a given object.
Returns ``True`` if the comment should be allowed, ``False`` otherwise. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/moderation.py#L46-L64 | null | class FluentCommentsModerator(CommentModerator):
"""
Moderation policy for fluent-comments.
"""
auto_close_field = None
auto_moderate_field = None
enable_field = None
close_after = appsettings.FLUENT_COMMENTS_CLOSE_AFTER_DAYS
moderate_after = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS
email_notification = appsettings.FLUENT_COMMENTS_USE_EMAIL_NOTIFICATION
akismet_check = appsettings.FLUENT_CONTENTS_USE_AKISMET
akismet_check_action = appsettings.FLUENT_COMMENTS_AKISMET_ACTION
moderate_bad_words = set(appsettings.FLUENT_COMMENTS_MODERATE_BAD_WORDS)
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be allowed to show up immediately,
or should be marked non-public and await approval.
Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
"""
# Soft delete checks are done first, so these comments are not mistakenly "just moderated"
# for expiring the `close_after` date, but correctly get marked as spam instead.
# This helps staff to quickly see which comments need real moderation.
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if akismet_result:
# Typically action=delete never gets here, unless the service was having problems.
if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \
self.akismet_check_action in ('auto', 'soft_delete', 'delete'):
comment.is_removed = True # Set extra marker
# SpamStatus.Unknown or action=moderate will end up in the moderation queue
return True
# Parent class check
if super(FluentCommentsModerator, self).moderate(comment, content_object, request):
return True
# Bad words check
if self.moderate_bad_words:
input_words = split_words(comment.comment)
if self.moderate_bad_words.intersection(input_words):
return True
# Akismet check
if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'):
# Return True if akismet marks this comment as spam and we want to moderate it.
if akismet_check(comment, content_object, request):
return True
return False
def email(self, comment, content_object, request):
"""
Overwritten for a better email notification.
"""
if not self.email_notification:
return
send_comment_posted(comment, request)
|
django-fluent/django-fluent-comments | fluent_comments/moderation.py | FluentCommentsModerator.moderate | python | def moderate(self, comment, content_object, request):
# Soft delete checks are done first, so these comments are not mistakenly "just moderated"
# for expiring the `close_after` date, but correctly get marked as spam instead.
# This helps staff to quickly see which comments need real moderation.
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if akismet_result:
# Typically action=delete never gets here, unless the service was having problems.
if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \
self.akismet_check_action in ('auto', 'soft_delete', 'delete'):
comment.is_removed = True # Set extra marker
# SpamStatus.Unknown or action=moderate will end up in the moderation queue
return True
# Parent class check
if super(FluentCommentsModerator, self).moderate(comment, content_object, request):
return True
# Bad words check
if self.moderate_bad_words:
input_words = split_words(comment.comment)
if self.moderate_bad_words.intersection(input_words):
return True
# Akismet check
if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'):
# Return True if akismet marks this comment as spam and we want to moderate it.
if akismet_check(comment, content_object, request):
return True
return False | Determine whether a given comment on a given object should be allowed to show up immediately,
or should be marked non-public and await approval.
Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/moderation.py#L66-L104 | [
"def akismet_check(comment, content_object, request):\n \"\"\"\n Connects to Akismet and evaluates to True if Akismet marks this comment as spam.\n\n :rtype: akismet.SpamStatus\n \"\"\"\n # Return previously cached response\n akismet_result = getattr(comment, '_akismet_result_', None)\n if akismet_result is not None:\n return akismet_result\n\n # Get Akismet data\n AKISMET_API_KEY = appsettings.AKISMET_API_KEY\n if not AKISMET_API_KEY:\n raise ImproperlyConfigured('You must set AKISMET_API_KEY to use comment moderation with Akismet.')\n\n current_domain = get_current_site(request).domain\n auto_blog_url = '{0}://{1}/'.format(request.is_secure() and 'https' or 'http', current_domain)\n blog_url = appsettings.AKISMET_BLOG_URL or auto_blog_url\n\n akismet = Akismet(\n AKISMET_API_KEY,\n blog=blog_url,\n is_test=int(bool(appsettings.AKISMET_IS_TEST)),\n application_user_agent='django-fluent-comments/{0}'.format(fluent_comments.__version__),\n )\n\n akismet_data = _get_akismet_data(blog_url, comment, content_object, request)\n akismet_result = akismet.check(**akismet_data) # raises AkismetServerError when key is invalid\n setattr(comment, \"_akismet_result_\", akismet_result)\n return akismet_result\n"
] | class FluentCommentsModerator(CommentModerator):
"""
Moderation policy for fluent-comments.
"""
auto_close_field = None
auto_moderate_field = None
enable_field = None
close_after = appsettings.FLUENT_COMMENTS_CLOSE_AFTER_DAYS
moderate_after = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS
email_notification = appsettings.FLUENT_COMMENTS_USE_EMAIL_NOTIFICATION
akismet_check = appsettings.FLUENT_CONTENTS_USE_AKISMET
akismet_check_action = appsettings.FLUENT_COMMENTS_AKISMET_ACTION
moderate_bad_words = set(appsettings.FLUENT_COMMENTS_MODERATE_BAD_WORDS)
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on a given object.
Returns ``True`` if the comment should be allowed, ``False`` otherwise.
"""
# Parent class check
if not super(FluentCommentsModerator, self).allow(comment, content_object, request):
return False
# Akismet check
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if self.akismet_check_action == 'delete' and akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam):
return False # Akismet marked the comment as spam.
elif self.akismet_check_action == 'auto' and akismet_result == SpamStatus.DefiniteSpam:
return False # Clearly spam
return True
def email(self, comment, content_object, request):
"""
Overwritten for a better email notification.
"""
if not self.email_notification:
return
send_comment_posted(comment, request)
|
django-fluent/django-fluent-comments | fluent_comments/moderation.py | FluentCommentsModerator.email | python | def email(self, comment, content_object, request):
if not self.email_notification:
return
send_comment_posted(comment, request) | Overwritten for a better email notification. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/moderation.py#L106-L113 | [
"def send_comment_posted(comment, request):\n \"\"\"\n Send the email to staff that an comment was posted.\n\n While the django_comments module has email support,\n it doesn't pass the 'request' to the context.\n This also changes the subject to show the page title.\n \"\"\"\n recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]\n site = get_current_site(request)\n content_object = comment.content_object\n content_title = force_text(content_object)\n\n if comment.is_removed:\n subject = u'[{0}] Spam comment on \"{1}\"'.format(site.name, content_title)\n elif not comment.is_public:\n subject = u'[{0}] Moderated comment on \"{1}\"'.format(site.name, content_title)\n else:\n subject = u'[{0}] New comment posted on \"{1}\"'.format(site.name, content_title)\n\n context = {\n 'site': site,\n 'comment': comment,\n 'content_object': content_object\n }\n\n message = render_to_string(\"comments/comment_notification_email.txt\", context, request=request)\n if appsettings.FLUENT_COMMENTS_MULTIPART_EMAILS:\n html_message = render_to_string(\"comments/comment_notification_email.html\", context, request=request)\n else:\n html_message = None\n\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,\n recipient_list, fail_silently=True, html_message=html_message)\n"
] | class FluentCommentsModerator(CommentModerator):
"""
Moderation policy for fluent-comments.
"""
auto_close_field = None
auto_moderate_field = None
enable_field = None
close_after = appsettings.FLUENT_COMMENTS_CLOSE_AFTER_DAYS
moderate_after = appsettings.FLUENT_COMMENTS_MODERATE_AFTER_DAYS
email_notification = appsettings.FLUENT_COMMENTS_USE_EMAIL_NOTIFICATION
akismet_check = appsettings.FLUENT_CONTENTS_USE_AKISMET
akismet_check_action = appsettings.FLUENT_COMMENTS_AKISMET_ACTION
moderate_bad_words = set(appsettings.FLUENT_COMMENTS_MODERATE_BAD_WORDS)
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on a given object.
Returns ``True`` if the comment should be allowed, ``False`` otherwise.
"""
# Parent class check
if not super(FluentCommentsModerator, self).allow(comment, content_object, request):
return False
# Akismet check
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if self.akismet_check_action == 'delete' and akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam):
return False # Akismet marked the comment as spam.
elif self.akismet_check_action == 'auto' and akismet_result == SpamStatus.DefiniteSpam:
return False # Clearly spam
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be allowed to show up immediately,
or should be marked non-public and await approval.
Returns ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise.
"""
# Soft delete checks are done first, so these comments are not mistakenly "just moderated"
# for expiring the `close_after` date, but correctly get marked as spam instead.
# This helps staff to quickly see which comments need real moderation.
if self.akismet_check:
akismet_result = akismet_check(comment, content_object, request)
if akismet_result:
# Typically action=delete never gets here, unless the service was having problems.
if akismet_result in (SpamStatus.ProbableSpam, SpamStatus.DefiniteSpam) and \
self.akismet_check_action in ('auto', 'soft_delete', 'delete'):
comment.is_removed = True # Set extra marker
# SpamStatus.Unknown or action=moderate will end up in the moderation queue
return True
# Parent class check
if super(FluentCommentsModerator, self).moderate(comment, content_object, request):
return True
# Bad words check
if self.moderate_bad_words:
input_words = split_words(comment.comment)
if self.moderate_bad_words.intersection(input_words):
return True
# Akismet check
if self.akismet_check and self.akismet_check_action not in ('soft_delete', 'delete'):
# Return True if akismet marks this comment as spam and we want to moderate it.
if akismet_check(comment, content_object, request):
return True
return False
|
django-fluent/django-fluent-comments | fluent_comments/email.py | send_comment_posted | python | def send_comment_posted(comment, request):
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
site = get_current_site(request)
content_object = comment.content_object
content_title = force_text(content_object)
if comment.is_removed:
subject = u'[{0}] Spam comment on "{1}"'.format(site.name, content_title)
elif not comment.is_public:
subject = u'[{0}] Moderated comment on "{1}"'.format(site.name, content_title)
else:
subject = u'[{0}] New comment posted on "{1}"'.format(site.name, content_title)
context = {
'site': site,
'comment': comment,
'content_object': content_object
}
message = render_to_string("comments/comment_notification_email.txt", context, request=request)
if appsettings.FLUENT_COMMENTS_MULTIPART_EMAILS:
html_message = render_to_string("comments/comment_notification_email.html", context, request=request)
else:
html_message = None
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,
recipient_list, fail_silently=True, html_message=html_message) | Send the email to staff that an comment was posted.
While the django_comments module has email support,
it doesn't pass the 'request' to the context.
This also changes the subject to show the page title. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/email.py#L9-L42 | null | from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from fluent_comments import appsettings
|
django-fluent/django-fluent-comments | fluent_comments/templatetags/fluent_comments_tags.py | AjaxCommentTags.parse | python | def parse(cls, parser, token):
# Process the template line.
tag_name, args, kwargs = parse_token_kwargs(
parser, token,
allowed_kwargs=cls.allowed_kwargs,
compile_args=False, # Only overrule here, keep at render() phase.
compile_kwargs=cls.compile_kwargs
)
# remove "for" keyword, so all other args can be resolved in render().
if args[0] == 'for':
args.pop(0)
# And apply the compilation afterwards
for i in range(len(args)):
args[i] = parser.compile_filter(args[i])
cls.validate_args(tag_name, *args, **kwargs)
return cls(tag_name, *args, **kwargs) | Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/templatetags/fluent_comments_tags.py#L26-L47 | null | class AjaxCommentTags(BaseInclusionNode):
"""
Custom inclusion node with some special parsing features.
Using the ``@register.inclusion_tag`` is not sufficient,
because some keywords require custom parsing.
"""
template_name = "fluent_comments/templatetags/ajax_comment_tags.html"
min_args = 1
max_args = 1
@classmethod
def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
"""
The main logic for the inclusion node, analogous to ``@register.inclusion_node``.
"""
target_object = tag_args[0] # moved one spot due to .pop(0)
new_context = {
'STATIC_URL': parent_context.get('STATIC_URL', None),
'USE_THREADEDCOMMENTS': appsettings.USE_THREADEDCOMMENTS,
'target_object': target_object,
}
# Be configuration independent:
if new_context['STATIC_URL'] is None:
try:
request = parent_context['request']
except KeyError:
new_context.update({'STATIC_URL': settings.STATIC_URL})
else:
new_context.update(context_processors.static(request))
return new_context
|
django-fluent/django-fluent-comments | fluent_comments/templatetags/fluent_comments_tags.py | AjaxCommentTags.get_context_data | python | def get_context_data(self, parent_context, *tag_args, **tag_kwargs):
target_object = tag_args[0] # moved one spot due to .pop(0)
new_context = {
'STATIC_URL': parent_context.get('STATIC_URL', None),
'USE_THREADEDCOMMENTS': appsettings.USE_THREADEDCOMMENTS,
'target_object': target_object,
}
# Be configuration independent:
if new_context['STATIC_URL'] is None:
try:
request = parent_context['request']
except KeyError:
new_context.update({'STATIC_URL': settings.STATIC_URL})
else:
new_context.update(context_processors.static(request))
return new_context | The main logic for the inclusion node, analogous to ``@register.inclusion_node``. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/templatetags/fluent_comments_tags.py#L49-L69 | null | class AjaxCommentTags(BaseInclusionNode):
"""
Custom inclusion node with some special parsing features.
Using the ``@register.inclusion_tag`` is not sufficient,
because some keywords require custom parsing.
"""
template_name = "fluent_comments/templatetags/ajax_comment_tags.html"
min_args = 1
max_args = 1
@classmethod
def parse(cls, parser, token):
"""
Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag.
"""
# Process the template line.
tag_name, args, kwargs = parse_token_kwargs(
parser, token,
allowed_kwargs=cls.allowed_kwargs,
compile_args=False, # Only overrule here, keep at render() phase.
compile_kwargs=cls.compile_kwargs
)
# remove "for" keyword, so all other args can be resolved in render().
if args[0] == 'for':
args.pop(0)
# And apply the compilation afterwards
for i in range(len(args)):
args[i] = parser.compile_filter(args[i])
cls.validate_args(tag_name, *args, **kwargs)
return cls(tag_name, *args, **kwargs)
|
django-fluent/django-fluent-comments | fluent_comments/utils.py | get_comment_template_name | python | def get_comment_template_name(comment):
ctype = ContentType.objects.get_for_id(comment.content_type_id)
return [
"comments/%s/%s/comment.html" % (ctype.app_label, ctype.model),
"comments/%s/comment.html" % ctype.app_label,
"comments/comment.html"
] | Internal function for the rendering of comments. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/utils.py#L14-L23 | null | """
Internal utils
"""
import re
from django.contrib.contenttypes.models import ContentType
from fluent_comments import appsettings
RE_INTERPUNCTION = re.compile(r'\W+')
def get_comment_context_data(comment, action=None):
"""
Internal function for the rendering of comments.
"""
return {
'comment': comment,
'action': action,
'preview': (action == 'preview'),
'USE_THREADEDCOMMENTS': appsettings.USE_THREADEDCOMMENTS,
}
def split_words(comment):
"""
Internal function to split words
"""
return set(RE_INTERPUNCTION.sub(' ', comment).split())
|
django-fluent/django-fluent-comments | fluent_comments/forms/helper.py | CompactLabelsCommentFormHelper.render_layout | python | def render_layout(self, form, context, template_pack=TEMPLATE_PACK):
# Writing the label values into the field placeholders.
# This is done at rendering time, so the Form.__init__() could update any labels before.
# Django 1.11 no longer lets EmailInput or URLInput inherit from TextInput,
# so checking for `Input` instead while excluding `HiddenInput`.
for field in form.fields.values():
if field.label and \
isinstance(field.widget, (Input, forms.Textarea)) and \
not isinstance(field.widget, forms.HiddenInput):
field.widget.attrs['placeholder'] = u"{0}:".format(field.label)
return super(CompactLabelsCommentFormHelper, self).render_layout(form, context, template_pack=template_pack) | Copy any field label to the ``placeholder`` attribute.
Note, this method is called when :attr:`layout` is defined. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/forms/helper.py#L67-L82 | null | class CompactLabelsCommentFormHelper(CommentFormHelper):
"""
Compact labels in the form, show them as placeholder text instead.
.. note::
Make sure that the :attr:`layout` attribute is defined and
it has fields added to it, otherwise the placeholders don't appear.
The text input can easily be resized using CSS like:
.. code-block: css
@media only screen and (min-width: 768px) {
form.comments-form input.form-control {
width: 50%;
}
}
"""
form_class = CommentFormHelper.form_class.replace('form-horizontal', 'form-vertical') + ' comments-form-compact'
label_class = 'sr-only'
field_class = ''
|
django-fluent/django-fluent-comments | fluent_comments/models.py | get_comments_for_model | python | def get_comments_for_model(content_object, include_moderated=False):
qs = get_comments_model().objects.for_model(content_object)
if not include_moderated:
qs = qs.filter(is_public=True, is_removed=False)
return qs | Return the QuerySet with all comments for a given model. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/models.py#L36-L45 | null | from django.contrib.contenttypes.fields import GenericRelation
from django.utils.translation import ugettext_lazy as _
from django_comments import get_model as get_comments_model
from django_comments.managers import CommentManager
from fluent_comments import appsettings
if appsettings.USE_THREADEDCOMMENTS:
from threadedcomments.models import ThreadedComment as BaseModel
else:
from django_comments.models import Comment as BaseModel
class FluentCommentManager(CommentManager):
"""
Manager to optimize SQL queries for comments.
"""
def get_queryset(self):
return super(CommentManager, self).get_queryset().select_related('user')
class FluentComment(BaseModel):
"""
Proxy model to make sure that a ``select_related()`` is performed on the ``user`` field.
"""
objects = FluentCommentManager()
class Meta:
verbose_name = _("Comment")
verbose_name_plural = _("Comments")
proxy = True
managed = False
class CommentsRelation(GenericRelation):
"""
A :class:`~django.contrib.contenttypes.generic.GenericRelation` which can be applied to a parent model that
is expected to have comments. For example:
.. code-block:: python
class Article(models.Model):
comments_set = CommentsRelation()
"""
def __init__(self, *args, **kwargs):
super(CommentsRelation, self).__init__(
to=get_comments_model(),
content_type_field='content_type',
object_id_field='object_pk',
**kwargs
)
|
django-fluent/django-fluent-comments | fluent_comments/forms/_captcha.py | CaptchaFormMixin._reorder_fields | python | def _reorder_fields(self, ordering):
if 'captcha' not in ordering:
raise ImproperlyConfigured(
"When using 'FLUENT_COMMENTS_FIELD_ORDER', "
"make sure the 'captcha' field included too to use '{}' form. ".format(
self.__class__.__name__
)
)
super(CaptchaFormMixin, self)._reorder_fields(ordering)
# Avoid making captcha required for previews.
if self.is_preview:
self.fields.pop('captcha') | Test that the 'captcha' field is really present.
This could be broken by a bad FLUENT_COMMENTS_FIELD_ORDER configuration. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/forms/_captcha.py#L5-L21 | null | class CaptchaFormMixin(object):
|
django-fluent/django-fluent-comments | fluent_comments/views.py | post_comment_ajax | python | def post_comment_ajax(request, using=None):
if not request.is_ajax():
return HttpResponseBadRequest("Expecting Ajax call")
# This is copied from django_comments.
# Basically that view does too much, and doesn't offer a hook to change the rendering.
# The request object is not passed to next_redirect for example.
#
# This is a separate view to integrate both features. Previously this used django-ajaxcomments
# which is unfortunately not thread-safe (it it changes the comment view per request).
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if request.user.is_authenticated:
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.username
if not data.get('email', ''):
data["email"] = request.user.email
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = apps.get_model(*ctype.split(".", 1))
target = model._default_manager.using(using).get(pk=object_pk)
except ValueError:
return CommentPostBadRequest("Invalid object_pk value: {0}".format(escape(object_pk)))
except (TypeError, LookupError):
return CommentPostBadRequest("Invalid content_type value: {0}".format(escape(ctype)))
except AttributeError:
return CommentPostBadRequest("The given content-type {0} does not resolve to a valid model.".format(escape(ctype)))
except ObjectDoesNotExist:
return CommentPostBadRequest("No object matching content-type {0} and object PK {1} exists.".format(escape(ctype), escape(object_pk)))
except (ValueError, ValidationError) as e:
return CommentPostBadRequest("Attempting go get content-type {0!r} and object PK {1!r} exists raised {2}".format(escape(ctype), escape(object_pk), e.__class__.__name__))
# Do we want to preview the comment?
is_preview = "preview" in data
# Construct the comment form
form = django_comments.get_form()(target, data=data, is_preview=is_preview)
# Check security information
if form.security_errors():
return CommentPostBadRequest("The comment form failed security verification: {0}".format(form.security_errors()))
# If there are errors or if we requested a preview show the comment
if is_preview:
comment = form.get_comment_object() if not form.errors else None
return _ajax_result(request, form, "preview", comment, object_id=object_pk)
if form.errors:
return _ajax_result(request, form, "post", object_id=object_pk)
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = request.META.get("REMOTE_ADDR", None)
if request.user.is_authenticated:
comment.user = request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=request
)
for (receiver, response) in responses:
if response is False:
return CommentPostBadRequest("comment_will_be_posted receiver {0} killed the comment".format(receiver.__name__))
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
return _ajax_result(request, form, "post", comment, object_id=object_pk) | Post a comment, via an Ajax call. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/views.py#L24-L107 | null | import json
import sys
import django_comments
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.http import HttpResponse, HttpResponseBadRequest
from django.template.loader import render_to_string
from django.utils.html import escape
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_POST
from django_comments import signals
from django_comments.views.comments import CommentPostBadRequest
from fluent_comments.utils import get_comment_template_name, get_comment_context_data
from fluent_comments import appsettings
if sys.version_info[0] >= 3:
long = int
@csrf_protect
@require_POST
def _ajax_result(request, form, action, comment=None, object_id=None):
# Based on django-ajaxcomments, BSD licensed.
# Copyright (c) 2009 Brandon Konkle and individual contributors.
#
# This code was extracted out of django-ajaxcomments because
# django-ajaxcomments is not threadsafe, and it was refactored afterwards.
success = True
json_errors = {}
if form.errors:
for field_name in form.errors:
field = form[field_name]
json_errors[field_name] = _render_errors(field)
success = False
json_return = {
'success': success,
'action': action,
'errors': json_errors,
'object_id': object_id,
'use_threadedcomments': bool(appsettings.USE_THREADEDCOMMENTS),
}
if comment is not None:
# Render the comment, like {% render_comment comment %} does
context = get_comment_context_data(comment, action)
context['request'] = request
template_name = get_comment_template_name(comment)
comment_html = render_to_string(template_name, context, request=request)
json_return.update({
'html': comment_html,
'comment_id': comment.id,
'parent_id': None,
'is_moderated': not comment.is_public, # is_public flags changes in comment_will_be_posted
})
if appsettings.USE_THREADEDCOMMENTS:
json_return['parent_id'] = comment.parent_id
json_response = json.dumps(json_return)
return HttpResponse(json_response, content_type="application/json")
def _render_errors(field):
"""
Render form errors in crispy-forms style.
"""
template = '{0}/layout/field_errors.html'.format(appsettings.CRISPY_TEMPLATE_PACK)
return render_to_string(template, {
'field': field,
'form_show_errors': True,
})
|
django-fluent/django-fluent-comments | fluent_comments/views.py | _render_errors | python | def _render_errors(field):
template = '{0}/layout/field_errors.html'.format(appsettings.CRISPY_TEMPLATE_PACK)
return render_to_string(template, {
'field': field,
'form_show_errors': True,
}) | Render form errors in crispy-forms style. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/views.py#L154-L162 | null | import json
import sys
import django_comments
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.http import HttpResponse, HttpResponseBadRequest
from django.template.loader import render_to_string
from django.utils.html import escape
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_POST
from django_comments import signals
from django_comments.views.comments import CommentPostBadRequest
from fluent_comments.utils import get_comment_template_name, get_comment_context_data
from fluent_comments import appsettings
if sys.version_info[0] >= 3:
long = int
@csrf_protect
@require_POST
def post_comment_ajax(request, using=None):
"""
Post a comment, via an Ajax call.
"""
if not request.is_ajax():
return HttpResponseBadRequest("Expecting Ajax call")
# This is copied from django_comments.
# Basically that view does too much, and doesn't offer a hook to change the rendering.
# The request object is not passed to next_redirect for example.
#
# This is a separate view to integrate both features. Previously this used django-ajaxcomments
# which is unfortunately not thread-safe (it it changes the comment view per request).
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if request.user.is_authenticated:
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.username
if not data.get('email', ''):
data["email"] = request.user.email
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = apps.get_model(*ctype.split(".", 1))
target = model._default_manager.using(using).get(pk=object_pk)
except ValueError:
return CommentPostBadRequest("Invalid object_pk value: {0}".format(escape(object_pk)))
except (TypeError, LookupError):
return CommentPostBadRequest("Invalid content_type value: {0}".format(escape(ctype)))
except AttributeError:
return CommentPostBadRequest("The given content-type {0} does not resolve to a valid model.".format(escape(ctype)))
except ObjectDoesNotExist:
return CommentPostBadRequest("No object matching content-type {0} and object PK {1} exists.".format(escape(ctype), escape(object_pk)))
except (ValueError, ValidationError) as e:
return CommentPostBadRequest("Attempting go get content-type {0!r} and object PK {1!r} exists raised {2}".format(escape(ctype), escape(object_pk), e.__class__.__name__))
# Do we want to preview the comment?
is_preview = "preview" in data
# Construct the comment form
form = django_comments.get_form()(target, data=data, is_preview=is_preview)
# Check security information
if form.security_errors():
return CommentPostBadRequest("The comment form failed security verification: {0}".format(form.security_errors()))
# If there are errors or if we requested a preview show the comment
if is_preview:
comment = form.get_comment_object() if not form.errors else None
return _ajax_result(request, form, "preview", comment, object_id=object_pk)
if form.errors:
return _ajax_result(request, form, "post", object_id=object_pk)
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = request.META.get("REMOTE_ADDR", None)
if request.user.is_authenticated:
comment.user = request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=request
)
for (receiver, response) in responses:
if response is False:
return CommentPostBadRequest("comment_will_be_posted receiver {0} killed the comment".format(receiver.__name__))
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
return _ajax_result(request, form, "post", comment, object_id=object_pk)
def _ajax_result(request, form, action, comment=None, object_id=None):
# Based on django-ajaxcomments, BSD licensed.
# Copyright (c) 2009 Brandon Konkle and individual contributors.
#
# This code was extracted out of django-ajaxcomments because
# django-ajaxcomments is not threadsafe, and it was refactored afterwards.
success = True
json_errors = {}
if form.errors:
for field_name in form.errors:
field = form[field_name]
json_errors[field_name] = _render_errors(field)
success = False
json_return = {
'success': success,
'action': action,
'errors': json_errors,
'object_id': object_id,
'use_threadedcomments': bool(appsettings.USE_THREADEDCOMMENTS),
}
if comment is not None:
# Render the comment, like {% render_comment comment %} does
context = get_comment_context_data(comment, action)
context['request'] = request
template_name = get_comment_template_name(comment)
comment_html = render_to_string(template_name, context, request=request)
json_return.update({
'html': comment_html,
'comment_id': comment.id,
'parent_id': None,
'is_moderated': not comment.is_public, # is_public flags changes in comment_will_be_posted
})
if appsettings.USE_THREADEDCOMMENTS:
json_return['parent_id'] = comment.parent_id
json_response = json.dumps(json_return)
return HttpResponse(json_response, content_type="application/json")
|
django-fluent/django-fluent-comments | fluent_comments/__init__.py | get_form | python | def get_form():
global form_class
from fluent_comments import appsettings
if form_class is None:
if appsettings.FLUENT_COMMENTS_FORM_CLASS:
from django.utils.module_loading import import_string
form_class = import_string(appsettings.FLUENT_COMMENTS_FORM_CLASS)
else:
from fluent_comments.forms import FluentCommentForm
form_class = FluentCommentForm
return form_class | Return the form to use for commenting. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/__init__.py#L26-L40 | null | """
API for :ref:`custom-comment-app-api`
"""
default_app_config = 'fluent_comments.apps.FluentCommentsApp'
form_class = None
model_class = None
# following PEP 440
__version__ = "2.1"
def get_model():
"""
Return the model to use for commenting.
"""
global model_class
if model_class is None:
from fluent_comments.models import FluentComment
# Our proxy model that performs select_related('user') for the comments
model_class = FluentComment
return model_class
|
django-fluent/django-fluent-comments | fluent_comments/receivers.py | load_default_moderator | python | def load_default_moderator():
if appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'default':
# Perform spam checks
return moderation.FluentCommentsModerator(None)
elif appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'deny':
# Deny all comments not from known registered models.
return moderation.AlwaysDeny(None)
elif str(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR).lower() == 'none':
# Disables default moderator
return moderation.NullModerator(None)
elif '.' in appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR:
return import_string(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR)(None)
else:
raise ImproperlyConfigured(
"Bad FLUENT_COMMENTS_DEFAULT_MODERATOR value. Provide default/deny/none or a dotted path"
) | Find a moderator object | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/receivers.py#L24-L42 | null | """
The comment signals are handled to fallback to a default moderator.
This avoids not checking for spam or sending email notifications
for comments that bypassed the moderator registration
(e.g. posting a comment on a different page).
This is especially useful when a django-fluent-contents "CommentsAreaItem"
element is added to a random page subclass (which is likely not registered).
"""
import logging
import django_comments
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.utils.module_loading import import_string
from django_comments import signals
from fluent_comments import appsettings, moderation
logger = logging.getLogger(__name__)
default_moderator = load_default_moderator()
CommentModel = django_comments.get_model()
@receiver(signals.comment_will_be_posted)
def on_comment_will_be_posted(sender, comment, request, **kwargs):
"""
Make sure both the Ajax and regular comments are checked for moderation.
This signal is also used to link moderators to the comment posting.
"""
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator and comment.__class__ is not CommentModel:
# Help with some hard to diagnose problems. The default Django moderator connects
# to the configured comment model. When this model differs from the signal sender,
# the the form stores a different model then COMMENTS_APP provides.
moderator = None
logger.warning(
"Comment of type '%s' was not moderated by '%s', "
"because the parent '%s' has a moderator installed for '%s' instead",
comment.__class__.__name__, moderator.__class__.__name__,
content_object.__class__.__name__, CommentModel.__name__
)
if moderator is None:
logger.info(
"Using default moderator for comment '%s' on parent '%s'",
comment.__class__.__name__, content_object.__class__.__name__
)
_run_default_moderator(comment, content_object, request)
def _run_default_moderator(comment, content_object, request):
"""
Run the default moderator
"""
# The default moderator will likely not check things like "auto close".
# It can still provide akismet and bad word checking.
if not default_moderator.allow(comment, content_object, request):
# Comment will be disallowed outright (HTTP 403 response)
return False
if default_moderator.moderate(comment, content_object, request):
comment.is_public = False
@receiver(signals.comment_was_posted)
def on_comment_posted(sender, comment, request, **kwargs):
"""
Send email notification of a new comment to site staff when email notifications have been requested.
"""
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator is None or comment.__class__ is not CommentModel:
# No custom moderator means no email would be sent.
# This still pass the comment to the default moderator.
default_moderator.email(comment, content_object, request)
|
django-fluent/django-fluent-comments | fluent_comments/receivers.py | on_comment_will_be_posted | python | def on_comment_will_be_posted(sender, comment, request, **kwargs):
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator and comment.__class__ is not CommentModel:
# Help with some hard to diagnose problems. The default Django moderator connects
# to the configured comment model. When this model differs from the signal sender,
# the the form stores a different model then COMMENTS_APP provides.
moderator = None
logger.warning(
"Comment of type '%s' was not moderated by '%s', "
"because the parent '%s' has a moderator installed for '%s' instead",
comment.__class__.__name__, moderator.__class__.__name__,
content_object.__class__.__name__, CommentModel.__name__
)
if moderator is None:
logger.info(
"Using default moderator for comment '%s' on parent '%s'",
comment.__class__.__name__, content_object.__class__.__name__
)
_run_default_moderator(comment, content_object, request) | Make sure both the Ajax and regular comments are checked for moderation.
This signal is also used to link moderators to the comment posting. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/receivers.py#L50-L74 | [
"def get_model_moderator(model):\n \"\"\"\n Return the moderator class that is registered with a content object.\n If there is no associated moderator with a class, None is returned.\n\n :param model: The Django model registered with :func:`moderate_model`\n :type model: :class:`~django.db.models.Model`\n :return: The moderator class which holds the moderation policies.\n :rtype: :class:`~django_comments.moderation.CommentModerator`\n \"\"\"\n try:\n return moderator._registry[model]\n except KeyError:\n return None\n",
"def _run_default_moderator(comment, content_object, request):\n \"\"\"\n Run the default moderator\n \"\"\"\n # The default moderator will likely not check things like \"auto close\".\n # It can still provide akismet and bad word checking.\n if not default_moderator.allow(comment, content_object, request):\n # Comment will be disallowed outright (HTTP 403 response)\n return False\n\n if default_moderator.moderate(comment, content_object, request):\n comment.is_public = False\n"
] | """
The comment signals are handled to fallback to a default moderator.
This avoids not checking for spam or sending email notifications
for comments that bypassed the moderator registration
(e.g. posting a comment on a different page).
This is especially useful when a django-fluent-contents "CommentsAreaItem"
element is added to a random page subclass (which is likely not registered).
"""
import logging
import django_comments
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.utils.module_loading import import_string
from django_comments import signals
from fluent_comments import appsettings, moderation
logger = logging.getLogger(__name__)
def load_default_moderator():
"""
Find a moderator object
"""
if appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'default':
# Perform spam checks
return moderation.FluentCommentsModerator(None)
elif appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'deny':
# Deny all comments not from known registered models.
return moderation.AlwaysDeny(None)
elif str(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR).lower() == 'none':
# Disables default moderator
return moderation.NullModerator(None)
elif '.' in appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR:
return import_string(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR)(None)
else:
raise ImproperlyConfigured(
"Bad FLUENT_COMMENTS_DEFAULT_MODERATOR value. Provide default/deny/none or a dotted path"
)
default_moderator = load_default_moderator()
CommentModel = django_comments.get_model()
@receiver(signals.comment_will_be_posted)
def _run_default_moderator(comment, content_object, request):
"""
Run the default moderator
"""
# The default moderator will likely not check things like "auto close".
# It can still provide akismet and bad word checking.
if not default_moderator.allow(comment, content_object, request):
# Comment will be disallowed outright (HTTP 403 response)
return False
if default_moderator.moderate(comment, content_object, request):
comment.is_public = False
@receiver(signals.comment_was_posted)
def on_comment_posted(sender, comment, request, **kwargs):
"""
Send email notification of a new comment to site staff when email notifications have been requested.
"""
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator is None or comment.__class__ is not CommentModel:
# No custom moderator means no email would be sent.
# This still pass the comment to the default moderator.
default_moderator.email(comment, content_object, request)
|
django-fluent/django-fluent-comments | fluent_comments/receivers.py | _run_default_moderator | python | def _run_default_moderator(comment, content_object, request):
# The default moderator will likely not check things like "auto close".
# It can still provide akismet and bad word checking.
if not default_moderator.allow(comment, content_object, request):
# Comment will be disallowed outright (HTTP 403 response)
return False
if default_moderator.moderate(comment, content_object, request):
comment.is_public = False | Run the default moderator | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/receivers.py#L77-L88 | null | """
The comment signals are handled to fallback to a default moderator.
This avoids not checking for spam or sending email notifications
for comments that bypassed the moderator registration
(e.g. posting a comment on a different page).
This is especially useful when a django-fluent-contents "CommentsAreaItem"
element is added to a random page subclass (which is likely not registered).
"""
import logging
import django_comments
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.utils.module_loading import import_string
from django_comments import signals
from fluent_comments import appsettings, moderation
logger = logging.getLogger(__name__)
def load_default_moderator():
"""
Find a moderator object
"""
if appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'default':
# Perform spam checks
return moderation.FluentCommentsModerator(None)
elif appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'deny':
# Deny all comments not from known registered models.
return moderation.AlwaysDeny(None)
elif str(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR).lower() == 'none':
# Disables default moderator
return moderation.NullModerator(None)
elif '.' in appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR:
return import_string(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR)(None)
else:
raise ImproperlyConfigured(
"Bad FLUENT_COMMENTS_DEFAULT_MODERATOR value. Provide default/deny/none or a dotted path"
)
default_moderator = load_default_moderator()
CommentModel = django_comments.get_model()
@receiver(signals.comment_will_be_posted)
def on_comment_will_be_posted(sender, comment, request, **kwargs):
"""
Make sure both the Ajax and regular comments are checked for moderation.
This signal is also used to link moderators to the comment posting.
"""
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator and comment.__class__ is not CommentModel:
# Help with some hard to diagnose problems. The default Django moderator connects
# to the configured comment model. When this model differs from the signal sender,
# the the form stores a different model then COMMENTS_APP provides.
moderator = None
logger.warning(
"Comment of type '%s' was not moderated by '%s', "
"because the parent '%s' has a moderator installed for '%s' instead",
comment.__class__.__name__, moderator.__class__.__name__,
content_object.__class__.__name__, CommentModel.__name__
)
if moderator is None:
logger.info(
"Using default moderator for comment '%s' on parent '%s'",
comment.__class__.__name__, content_object.__class__.__name__
)
_run_default_moderator(comment, content_object, request)
@receiver(signals.comment_was_posted)
def on_comment_posted(sender, comment, request, **kwargs):
"""
Send email notification of a new comment to site staff when email notifications have been requested.
"""
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator is None or comment.__class__ is not CommentModel:
# No custom moderator means no email would be sent.
# This still pass the comment to the default moderator.
default_moderator.email(comment, content_object, request)
|
django-fluent/django-fluent-comments | fluent_comments/receivers.py | on_comment_posted | python | def on_comment_posted(sender, comment, request, **kwargs):
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator is None or comment.__class__ is not CommentModel:
# No custom moderator means no email would be sent.
# This still pass the comment to the default moderator.
default_moderator.email(comment, content_object, request) | Send email notification of a new comment to site staff when email notifications have been requested. | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/receivers.py#L92-L102 | [
"def get_model_moderator(model):\n \"\"\"\n Return the moderator class that is registered with a content object.\n If there is no associated moderator with a class, None is returned.\n\n :param model: The Django model registered with :func:`moderate_model`\n :type model: :class:`~django.db.models.Model`\n :return: The moderator class which holds the moderation policies.\n :rtype: :class:`~django_comments.moderation.CommentModerator`\n \"\"\"\n try:\n return moderator._registry[model]\n except KeyError:\n return None\n"
] | """
The comment signals are handled to fallback to a default moderator.
This avoids not checking for spam or sending email notifications
for comments that bypassed the moderator registration
(e.g. posting a comment on a different page).
This is especially useful when a django-fluent-contents "CommentsAreaItem"
element is added to a random page subclass (which is likely not registered).
"""
import logging
import django_comments
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.utils.module_loading import import_string
from django_comments import signals
from fluent_comments import appsettings, moderation
logger = logging.getLogger(__name__)
def load_default_moderator():
"""
Find a moderator object
"""
if appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'default':
# Perform spam checks
return moderation.FluentCommentsModerator(None)
elif appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR == 'deny':
# Deny all comments not from known registered models.
return moderation.AlwaysDeny(None)
elif str(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR).lower() == 'none':
# Disables default moderator
return moderation.NullModerator(None)
elif '.' in appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR:
return import_string(appsettings.FLUENT_COMMENTS_DEFAULT_MODERATOR)(None)
else:
raise ImproperlyConfigured(
"Bad FLUENT_COMMENTS_DEFAULT_MODERATOR value. Provide default/deny/none or a dotted path"
)
default_moderator = load_default_moderator()
CommentModel = django_comments.get_model()
@receiver(signals.comment_will_be_posted)
def on_comment_will_be_posted(sender, comment, request, **kwargs):
"""
Make sure both the Ajax and regular comments are checked for moderation.
This signal is also used to link moderators to the comment posting.
"""
content_object = comment.content_object
moderator = moderation.get_model_moderator(content_object.__class__)
if moderator and comment.__class__ is not CommentModel:
# Help with some hard to diagnose problems. The default Django moderator connects
# to the configured comment model. When this model differs from the signal sender,
# the the form stores a different model then COMMENTS_APP provides.
moderator = None
logger.warning(
"Comment of type '%s' was not moderated by '%s', "
"because the parent '%s' has a moderator installed for '%s' instead",
comment.__class__.__name__, moderator.__class__.__name__,
content_object.__class__.__name__, CommentModel.__name__
)
if moderator is None:
logger.info(
"Using default moderator for comment '%s' on parent '%s'",
comment.__class__.__name__, content_object.__class__.__name__
)
_run_default_moderator(comment, content_object, request)
def _run_default_moderator(comment, content_object, request):
"""
Run the default moderator
"""
# The default moderator will likely not check things like "auto close".
# It can still provide akismet and bad word checking.
if not default_moderator.allow(comment, content_object, request):
# Comment will be disallowed outright (HTTP 403 response)
return False
if default_moderator.moderate(comment, content_object, request):
comment.is_public = False
@receiver(signals.comment_was_posted)
|
django-fluent/django-fluent-comments | fluent_comments/akismet.py | akismet_check | python | def akismet_check(comment, content_object, request):
# Return previously cached response
akismet_result = getattr(comment, '_akismet_result_', None)
if akismet_result is not None:
return akismet_result
# Get Akismet data
AKISMET_API_KEY = appsettings.AKISMET_API_KEY
if not AKISMET_API_KEY:
raise ImproperlyConfigured('You must set AKISMET_API_KEY to use comment moderation with Akismet.')
current_domain = get_current_site(request).domain
auto_blog_url = '{0}://{1}/'.format(request.is_secure() and 'https' or 'http', current_domain)
blog_url = appsettings.AKISMET_BLOG_URL or auto_blog_url
akismet = Akismet(
AKISMET_API_KEY,
blog=blog_url,
is_test=int(bool(appsettings.AKISMET_IS_TEST)),
application_user_agent='django-fluent-comments/{0}'.format(fluent_comments.__version__),
)
akismet_data = _get_akismet_data(blog_url, comment, content_object, request)
akismet_result = akismet.check(**akismet_data) # raises AkismetServerError when key is invalid
setattr(comment, "_akismet_result_", akismet_result)
return akismet_result | Connects to Akismet and evaluates to True if Akismet marks this comment as spam.
:rtype: akismet.SpamStatus | train | https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/akismet.py#L17-L47 | [
"def _get_akismet_data(blog_url, comment, content_object, request):\n # Field documentation:\n # http://akismet.com/development/api/#comment-check\n data = {\n # Comment info\n 'permalink': urljoin(blog_url, content_object.get_absolute_url()),\n # see http://blog.akismet.com/2012/06/19/pro-tip-tell-us-your-comment_type/\n 'comment_type': 'comment', # comment, trackback, pingback\n 'comment_author': getattr(comment, 'name', ''),\n 'comment_author_email': getattr(comment, 'email', ''),\n 'comment_author_url': getattr(comment, 'url', ''),\n 'comment_content': smart_str(comment.comment),\n 'comment_date': comment.submit_date,\n\n # Request info\n 'referrer': request.META.get('HTTP_REFERER', ''),\n 'user_agent': request.META.get('HTTP_USER_AGENT', ''),\n 'user_ip': comment.ip_address,\n }\n\n if comment.user_id and comment.user.is_superuser:\n data['user_role'] = 'administrator' # always passes test\n\n # If the language is known, provide it.\n language = _get_article_language(content_object)\n if language:\n data['blog_lang'] = language\n\n return data\n"
] | from __future__ import absolute_import
from akismet import Akismet, SpamStatus
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import smart_str
import fluent_comments
from fluent_comments import appsettings
try:
from urllib.parse import urljoin # Python 3
except ImportError:
from urlparse import urljoin # Python 2
def _get_akismet_data(blog_url, comment, content_object, request):
# Field documentation:
# http://akismet.com/development/api/#comment-check
data = {
# Comment info
'permalink': urljoin(blog_url, content_object.get_absolute_url()),
# see http://blog.akismet.com/2012/06/19/pro-tip-tell-us-your-comment_type/
'comment_type': 'comment', # comment, trackback, pingback
'comment_author': getattr(comment, 'name', ''),
'comment_author_email': getattr(comment, 'email', ''),
'comment_author_url': getattr(comment, 'url', ''),
'comment_content': smart_str(comment.comment),
'comment_date': comment.submit_date,
# Request info
'referrer': request.META.get('HTTP_REFERER', ''),
'user_agent': request.META.get('HTTP_USER_AGENT', ''),
'user_ip': comment.ip_address,
}
if comment.user_id and comment.user.is_superuser:
data['user_role'] = 'administrator' # always passes test
# If the language is known, provide it.
language = _get_article_language(content_object)
if language:
data['blog_lang'] = language
return data
def _get_article_language(article):
try:
# django-parler uses this attribute
return article.get_current_language()
except AttributeError:
pass
try:
return article.language_code
except AttributeError:
pass
return None
|
mattjj/pylds | pylds/util.py | symm_block_tridiag_matmul | python | def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out | Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L21-L39 | null | import autograd.numpy as np
from pylds.lds_messages_interface import info_E_step, kalman_info_filter, info_sample
def random_rotation(n, theta=None):
if theta is None:
# Sample a random, slow rotation
theta = 0.5 * np.pi * np.random.rand()
if n == 1:
return np.random.rand() * np.eye(1)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n, n))
out[:2, :2] = rot
q = np.linalg.qr(np.random.randn(n, n))[0]
return q.dot(out).dot(q.T)
def solve_symm_block_tridiag(H_diag, H_upper_diag, v):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
assert v.shape == (T, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = v
_, y, _, _ = info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
"""
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel()
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel()
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel()
return ab if lower else transpose_lower_banded_matrix(ab)
def transpose_lower_banded_matrix(Lab):
# This is painful
Uab = np.flipud(Lab)
u = Uab.shape[0] - 1
for i in range(1,u+1):
Uab[-(i+1), i:] = Uab[-(i+1), :-i]
Uab[-(i + 1), :i] = 0
return Uab
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None):
"""
use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
from scipy.linalg import solveh_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \
if ab is None else ab
x = solveh_banded(ab, v.ravel(), lower=True)
return x.reshape(v.shape)
def scipy_sample_block_tridiag(H_diag, H_upper_diag, size=1, ab=None, z=None):
from scipy.linalg import cholesky_banded, solve_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=False) \
if ab is None else ab
Uab = cholesky_banded(ab, lower=False)
z = np.random.randn(ab.shape[1], size) if z is None else z
# If lower = False, we have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
# where A = U^{-1}. Samples are Az = U^{-1}z = x, or equivalently Ux = z.
return solve_banded((0, Uab.shape[0]-1), Uab, z)
def sample_block_tridiag(H_diag, H_upper_diag):
"""
helper function for sampling block tridiag gaussians.
this is only for speed comparison with the solve approach.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T,D))
y = info_sample(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def logdet_symm_block_tridiag(H_diag, H_upper_diag):
"""
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1
|
mattjj/pylds | pylds/util.py | convert_block_tridiag_to_banded | python | def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel()
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel()
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel()
return ab if lower else transpose_lower_banded_matrix(ab) | convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L64-L101 | [
"def transpose_lower_banded_matrix(Lab):\n # This is painful\n Uab = np.flipud(Lab)\n u = Uab.shape[0] - 1\n for i in range(1,u+1):\n Uab[-(i+1), i:] = Uab[-(i+1), :-i]\n Uab[-(i + 1), :i] = 0\n return Uab\n"
] | import autograd.numpy as np
from pylds.lds_messages_interface import info_E_step, kalman_info_filter, info_sample
def random_rotation(n, theta=None):
if theta is None:
# Sample a random, slow rotation
theta = 0.5 * np.pi * np.random.rand()
if n == 1:
return np.random.rand() * np.eye(1)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n, n))
out[:2, :2] = rot
q = np.linalg.qr(np.random.randn(n, n))[0]
return q.dot(out).dot(q.T)
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
"""
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out
def solve_symm_block_tridiag(H_diag, H_upper_diag, v):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
assert v.shape == (T, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = v
_, y, _, _ = info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def transpose_lower_banded_matrix(Lab):
# This is painful
Uab = np.flipud(Lab)
u = Uab.shape[0] - 1
for i in range(1,u+1):
Uab[-(i+1), i:] = Uab[-(i+1), :-i]
Uab[-(i + 1), :i] = 0
return Uab
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None):
"""
use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
from scipy.linalg import solveh_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \
if ab is None else ab
x = solveh_banded(ab, v.ravel(), lower=True)
return x.reshape(v.shape)
def scipy_sample_block_tridiag(H_diag, H_upper_diag, size=1, ab=None, z=None):
from scipy.linalg import cholesky_banded, solve_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=False) \
if ab is None else ab
Uab = cholesky_banded(ab, lower=False)
z = np.random.randn(ab.shape[1], size) if z is None else z
# If lower = False, we have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
# where A = U^{-1}. Samples are Az = U^{-1}z = x, or equivalently Ux = z.
return solve_banded((0, Uab.shape[0]-1), Uab, z)
def sample_block_tridiag(H_diag, H_upper_diag):
"""
helper function for sampling block tridiag gaussians.
this is only for speed comparison with the solve approach.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T,D))
y = info_sample(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def logdet_symm_block_tridiag(H_diag, H_upper_diag):
"""
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1
|
mattjj/pylds | pylds/util.py | scipy_solve_symm_block_tridiag | python | def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None):
from scipy.linalg import solveh_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \
if ab is None else ab
x = solveh_banded(ab, v.ravel(), lower=True)
return x.reshape(v.shape) | use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L114-L124 | [
"def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):\n \"\"\"\n convert blocks to banded matrix representation required for scipy.\n we are using the \"lower form.\"\n see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html\n \"\"\"\n T, D, _ = H_diag.shape\n assert H_diag.ndim == 3 and H_diag.shape[2] == D\n assert H_upper_diag.shape == (T - 1, D, D)\n H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)\n\n ab = np.zeros((2 * D, T * D))\n\n # Fill in blocks along the diagonal\n for d in range(D):\n # Get indices of (-d)-th diagonal of H_diag\n i = np.arange(d, D)\n j = np.arange(0, D - d)\n h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))\n ab[d] = h.ravel()\n\n # Fill in lower left corner of blocks below the diagonal\n for d in range(0, D):\n # Get indices of (-d)-th diagonal of H_diag\n i = np.arange(d, D)\n j = np.arange(0, D - d)\n h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))\n ab[D + d, :D * (T - 1)] = h.ravel()\n\n # Fill in upper corner of blocks below the diagonal\n for d in range(1, D):\n # Get indices of (+d)-th diagonal of H_lower_diag\n i = np.arange(0, D - d)\n j = np.arange(d, D)\n h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))\n ab[D - d, :D * (T - 1)] += h.ravel()\n\n return ab if lower else transpose_lower_banded_matrix(ab)\n"
] | import autograd.numpy as np
from pylds.lds_messages_interface import info_E_step, kalman_info_filter, info_sample
def random_rotation(n, theta=None):
if theta is None:
# Sample a random, slow rotation
theta = 0.5 * np.pi * np.random.rand()
if n == 1:
return np.random.rand() * np.eye(1)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n, n))
out[:2, :2] = rot
q = np.linalg.qr(np.random.randn(n, n))[0]
return q.dot(out).dot(q.T)
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
"""
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out
def solve_symm_block_tridiag(H_diag, H_upper_diag, v):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
assert v.shape == (T, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = v
_, y, _, _ = info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
"""
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel()
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel()
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel()
return ab if lower else transpose_lower_banded_matrix(ab)
def transpose_lower_banded_matrix(Lab):
# This is painful
Uab = np.flipud(Lab)
u = Uab.shape[0] - 1
for i in range(1,u+1):
Uab[-(i+1), i:] = Uab[-(i+1), :-i]
Uab[-(i + 1), :i] = 0
return Uab
def scipy_sample_block_tridiag(H_diag, H_upper_diag, size=1, ab=None, z=None):
from scipy.linalg import cholesky_banded, solve_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=False) \
if ab is None else ab
Uab = cholesky_banded(ab, lower=False)
z = np.random.randn(ab.shape[1], size) if z is None else z
# If lower = False, we have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
# where A = U^{-1}. Samples are Az = U^{-1}z = x, or equivalently Ux = z.
return solve_banded((0, Uab.shape[0]-1), Uab, z)
def sample_block_tridiag(H_diag, H_upper_diag):
"""
helper function for sampling block tridiag gaussians.
this is only for speed comparison with the solve approach.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T,D))
y = info_sample(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def logdet_symm_block_tridiag(H_diag, H_upper_diag):
"""
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1
|
mattjj/pylds | pylds/util.py | sample_block_tridiag | python | def sample_block_tridiag(H_diag, H_upper_diag):
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T,D))
y = info_sample(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y | helper function for sampling block tridiag gaussians.
this is only for speed comparison with the solve approach. | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L141-L160 | null | import autograd.numpy as np
from pylds.lds_messages_interface import info_E_step, kalman_info_filter, info_sample
def random_rotation(n, theta=None):
if theta is None:
# Sample a random, slow rotation
theta = 0.5 * np.pi * np.random.rand()
if n == 1:
return np.random.rand() * np.eye(1)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n, n))
out[:2, :2] = rot
q = np.linalg.qr(np.random.randn(n, n))[0]
return q.dot(out).dot(q.T)
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
"""
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out
def solve_symm_block_tridiag(H_diag, H_upper_diag, v):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
assert v.shape == (T, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = v
_, y, _, _ = info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
"""
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel()
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel()
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel()
return ab if lower else transpose_lower_banded_matrix(ab)
def transpose_lower_banded_matrix(Lab):
# This is painful
Uab = np.flipud(Lab)
u = Uab.shape[0] - 1
for i in range(1,u+1):
Uab[-(i+1), i:] = Uab[-(i+1), :-i]
Uab[-(i + 1), :i] = 0
return Uab
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None):
"""
use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
from scipy.linalg import solveh_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \
if ab is None else ab
x = solveh_banded(ab, v.ravel(), lower=True)
return x.reshape(v.shape)
def scipy_sample_block_tridiag(H_diag, H_upper_diag, size=1, ab=None, z=None):
from scipy.linalg import cholesky_banded, solve_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=False) \
if ab is None else ab
Uab = cholesky_banded(ab, lower=False)
z = np.random.randn(ab.shape[1], size) if z is None else z
# If lower = False, we have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
# where A = U^{-1}. Samples are Az = U^{-1}z = x, or equivalently Ux = z.
return solve_banded((0, Uab.shape[0]-1), Uab, z)
def logdet_symm_block_tridiag(H_diag, H_upper_diag):
"""
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1
|
mattjj/pylds | pylds/util.py | logdet_symm_block_tridiag | python | def logdet_symm_block_tridiag(H_diag, H_upper_diag):
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ | compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|. | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L163-L196 | null | import autograd.numpy as np
from pylds.lds_messages_interface import info_E_step, kalman_info_filter, info_sample
def random_rotation(n, theta=None):
if theta is None:
# Sample a random, slow rotation
theta = 0.5 * np.pi * np.random.rand()
if n == 1:
return np.random.rand() * np.eye(1)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n, n))
out[:2, :2] = rot
q = np.linalg.qr(np.random.randn(n, n))[0]
return q.dot(out).dot(q.T)
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
"""
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out
def solve_symm_block_tridiag(H_diag, H_upper_diag, v):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
assert v.shape == (T, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = v
_, y, _, _ = info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
"""
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel()
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel()
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel()
return ab if lower else transpose_lower_banded_matrix(ab)
def transpose_lower_banded_matrix(Lab):
# This is painful
Uab = np.flipud(Lab)
u = Uab.shape[0] - 1
for i in range(1,u+1):
Uab[-(i+1), i:] = Uab[-(i+1), :-i]
Uab[-(i + 1), :i] = 0
return Uab
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None):
"""
use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
from scipy.linalg import solveh_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \
if ab is None else ab
x = solveh_banded(ab, v.ravel(), lower=True)
return x.reshape(v.shape)
def scipy_sample_block_tridiag(H_diag, H_upper_diag, size=1, ab=None, z=None):
from scipy.linalg import cholesky_banded, solve_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=False) \
if ab is None else ab
Uab = cholesky_banded(ab, lower=False)
z = np.random.randn(ab.shape[1], size) if z is None else z
# If lower = False, we have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
# where A = U^{-1}. Samples are Az = U^{-1}z = x, or equivalently Ux = z.
return solve_banded((0, Uab.shape[0]-1), Uab, z)
def sample_block_tridiag(H_diag, H_upper_diag):
"""
helper function for sampling block tridiag gaussians.
this is only for speed comparison with the solve approach.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T,D))
y = info_sample(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1
|
mattjj/pylds | pylds/util.py | compute_symm_block_tridiag_covariances | python | def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag):
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T, D))
_, _, sigmas, E_xt_xtp1 = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return sigmas, E_xt_xtp1 | use the info smoother to solve a symmetric block tridiagonal system | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/util.py#L199-L218 | null | import autograd.numpy as np
from pylds.lds_messages_interface import info_E_step, kalman_info_filter, info_sample
def random_rotation(n, theta=None):
if theta is None:
# Sample a random, slow rotation
theta = 0.5 * np.pi * np.random.rand()
if n == 1:
return np.random.rand() * np.eye(1)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
out = np.zeros((n, n))
out[:2, :2] = rot
q = np.linalg.qr(np.random.randn(n, n))[0]
return q.dot(out).dot(q.T)
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):
"""
Compute matrix-vector product with a symmetric block
tridiagonal matrix H and vector v.
:param H_diag: block diagonal terms of H
:param H_upper_diag: upper block diagonal terms of H
:param v: vector to multiple
:return: H * v
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T-1, D, D)
assert v.shape == (T, D)
out = np.matmul(H_diag, v[:, :, None])[:, :, 0]
out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]
out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]
return out
def solve_symm_block_tridiag(H_diag, H_upper_diag, v):
"""
use the info smoother to solve a symmetric block tridiagonal system
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
assert v.shape == (T, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = v
_, y, _, _ = info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True):
"""
convert blocks to banded matrix representation required for scipy.
we are using the "lower form."
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
H_lower_diag = np.swapaxes(H_upper_diag, -2, -1)
ab = np.zeros((2 * D, T * D))
# Fill in blocks along the diagonal
for d in range(D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_diag[:, i, j], np.zeros((T, d))))
ab[d] = h.ravel()
# Fill in lower left corner of blocks below the diagonal
for d in range(0, D):
# Get indices of (-d)-th diagonal of H_diag
i = np.arange(d, D)
j = np.arange(0, D - d)
h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d))))
ab[D + d, :D * (T - 1)] = h.ravel()
# Fill in upper corner of blocks below the diagonal
for d in range(1, D):
# Get indices of (+d)-th diagonal of H_lower_diag
i = np.arange(0, D - d)
j = np.arange(d, D)
h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j]))
ab[D - d, :D * (T - 1)] += h.ravel()
return ab if lower else transpose_lower_banded_matrix(ab)
def transpose_lower_banded_matrix(Lab):
# This is painful
Uab = np.flipud(Lab)
u = Uab.shape[0] - 1
for i in range(1,u+1):
Uab[-(i+1), i:] = Uab[-(i+1), :-i]
Uab[-(i + 1), :i] = 0
return Uab
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None):
"""
use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system
see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
"""
from scipy.linalg import solveh_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \
if ab is None else ab
x = solveh_banded(ab, v.ravel(), lower=True)
return x.reshape(v.shape)
def scipy_sample_block_tridiag(H_diag, H_upper_diag, size=1, ab=None, z=None):
from scipy.linalg import cholesky_banded, solve_banded
ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=False) \
if ab is None else ab
Uab = cholesky_banded(ab, lower=False)
z = np.random.randn(ab.shape[1], size) if z is None else z
# If lower = False, we have (U^T U)^{-1} = U^{-1} U^{-T} = AA^T = Sigma
# where A = U^{-1}. Samples are Az = U^{-1}z = x, or equivalently Ux = z.
return solve_banded((0, Uab.shape[0]-1), Uab, z)
def sample_block_tridiag(H_diag, H_upper_diag):
"""
helper function for sampling block tridiag gaussians.
this is only for speed comparison with the solve approach.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
J_21 = np.swapaxes(H_upper_diag, -1, -2)
J_node = H_diag
h_node = np.zeros((T,D))
y = info_sample(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)),
J_node, h_node, np.zeros(T))
return y
def logdet_symm_block_tridiag(H_diag, H_upper_diag):
"""
compute the log determinant of a positive definite,
symmetric block tridiag matrix. Use the Kalman
info filter to do so. Specifically, the KF computes
the normalizer:
log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi
We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from
this we solve for log |J|.
"""
T, D, _ = H_diag.shape
assert H_diag.ndim == 3 and H_diag.shape[2] == D
assert H_upper_diag.shape == (T - 1, D, D)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
log_Z_init = 0
J_21 = np.swapaxes(H_upper_diag, -1, -2)
log_Z_pair = 0
J_node = H_diag
h_node = np.zeros((T, D))
log_Z_node = 0
logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init,
J_11, J_21, J_22, h_1, h_2, log_Z_pair,
J_node, h_node, log_Z_node)
# logZ = -1/2 log |J| + n/2 log 2 \pi
logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi))
return logdetJ
|
mattjj/pylds | pylds/states.py | LDSStatesZeroInflatedCountData.resample_zeroinflation_variables | python | def resample_zeroinflation_variables(self):
# TODO: move this to cython?
T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b
indptr = [0]
indices = []
vals = []
offset = 0
X = np.hstack((self.gaussian_states, self.inputs))
for t in range(T):
# Evaluate probability of data
y_t = np.zeros(N)
ns_t = self.data.indices[self.data.indptr[t]:self.data.indptr[t+1]]
y_t[ns_t] = self.data.data[self.data.indptr[t]:self.data.indptr[t+1]]
ll = self.emission_distn._elementwise_log_likelihood((X[t], y_t))
ll = ll.ravel()
# Evaluate the probability that each emission was "exposed",
# i.e. p(z_tn = 1 | y_tn, x_tn)
log_p_exposed = np.log(self.rho) + ll
log_p_exposed -= np.log(np.exp(log_p_exposed) + (1-self.rho) * (y_t == 0))
# Sample zero inflation mask
z_t = np.random.rand(N) < np.exp(log_p_exposed)
# Construct the sparse matrix
t_inds = np.where(z_t)[0]
indices.append(t_inds)
vals.append(y_t[t_inds])
offset += t_inds.size
indptr.append(offset)
# Construct a sparse matrix
vals = np.concatenate(vals)
indices = np.concatenate(indices)
indptr = np.array(indptr)
self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N)) | There's no way around the fact that we have to look at every
data point, even the zeros here. | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/states.py#L905-L944 | null | class LDSStatesZeroInflatedCountData(LDSStatesMissingData, _LDSStatesGibbs):
"""
In many cases, the observation dimension is so large and so sparse
that a Bernoulli, Poisson, etc. is not a good model. Moreover, it
is computationally demanding to compute the likelihood for so many
terms. Zero-inflated models address both concerns. Let,
z_{t,n} ~ Bern(rho)
y_{t,n} ~ p(y_{t,n} | c_n.dot(x_t) + d_n)) if z_{t,n} = 1
= 0 o.w.
If z_{t,n} = 1, we say that datapoint was "exposed." That is, the
observation y_{t,n} reflects the underlying latent state. The
observation may be zero, but that is still informative. However,
if the datapoint was not exposed (which can only happen if y_{t,n}=0),
then this term does not reflect the underlying state.
Thus, Z is effectively a mask on the data, and the likelihood only
depends on places where z_{t,n} = 1. Moreover, we only have to
introduce auxiliary variables for the entries that are unmasked.
"""
def __init__(self,model, data=None, **kwargs):
# The data must be provided in sparse row format
# This makes it easy to iterate over rows. Basically,
# for each row, t, it is easy to get the output dimensions, n,
# such that y_{t,n} > 0.
super(LDSStatesZeroInflatedCountData, self).\
__init__(model, data=data, **kwargs)
# Initialize the Polya-gamma samplers
num_threads = ppg.get_omp_num_threads()
seeds = np.random.randint(2 ** 16, size=num_threads)
self.ppgs = [ppg.PyPolyaGamma(seed) for seed in seeds]
# Initialize the masked data
if data is not None:
assert isinstance(data, csr_matrix), "Data must be a sparse row matrix for zero-inflated models"
# Initialize a sparse matrix of masked data. The mask
# specifies which observations were "exposed" and which
# were determinisitcally zero. In other words, the mask
# gives the data values at the places where z_{t,n} = 1.
T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b
indptr = [0]
indices = []
vals = []
offset = 0
for t in range(T):
# Get the nonzero entries in the t-th row
ns_t = data.indices[data.indptr[t]:data.indptr[t + 1]]
y_t = np.zeros(N)
y_t[ns_t] = data.data[data.indptr[t]:data.indptr[t + 1]]
# Sample zero inflation mask
z_t = np.random.rand(N) < self.rho
z_t[ns_t] = True
# Construct the sparse matrix
t_inds = np.where(z_t)[0]
indices.append(t_inds)
vals.append(y_t[t_inds])
offset += t_inds.size
indptr.append(offset)
# Construct a sparse matrix
vals = np.concatenate(vals)
indices = np.concatenate(indices)
indptr = np.array(indptr)
self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N))
# DEBUG: Start with all the data
# dense_data = data.toarray()
# values = dense_data.ravel()
# indices = np.tile(np.arange(self.D_emission), (self.T,))
# indptrs = np.arange(self.T+1) * self.D_emission
# self.masked_data = csr_matrix((values, indices, indptrs), (self.T, self.D_emission))
# assert np.allclose(self.masked_data.toarray(), dense_data)
self.resample_auxiliary_variables()
else:
self.masked_data = None
self.omega = None
@property
def rho(self):
return self.model.rho
@property
def sigma_obs(self):
raise Exception("Count data does not have sigma_obs")
def generate_obs(self):
# Go through each time bin, get the discrete latent state,
# use that to index into the emission_distns to get samples
T, p = self.T, self.D_emission
ed = self.emission_distn
gss = self.gaussian_states
data = np.empty((T,p),dtype='double')
# TODO: Do this sparsely
for t in range(self.T):
data[t] = \
ed.rvs(x=np.hstack((gss[t][None, :], self.inputs[t][None,:])),
return_xy=False)
# Zero out data
zeros = np.random.rand(p) > self.rho
data[t][zeros] = 0
data = csr_matrix(data)
return data
@property
def info_emission_params(self):
T, D_latent, D_emission = self.T, self.D_latent, self.D_emission
masked_data, inputs, omega = self.masked_data, self.inputs, self.omega
emission_distn = self.emission_distn
C = emission_distn.A[:, :D_latent]
CCT = np.array([np.outer(c, c) for c in C]).reshape((D_emission, D_latent**2))
D = emission_distn.A[:,D_latent:]
b = emission_distn.b
J_node = omega.dot(CCT).reshape((T, D_latent, D_latent))
kappa = emission_distn.kappa_func(masked_data.data)
kappa = csr_matrix((kappa, masked_data.indices, masked_data.indptr), shape=masked_data.shape)
h_node = kappa.dot(C)
# Unfortunately, the following operations would require dense arrays of size (TxD_emisison)
# h_node += -(omega * b.T).dot(C)
# h_node += -(omega * inputs.dot(D.T)).dot(C)
# This might not be much faster, but it should avoid making
# dense arrays
for t in range(T):
ns_t = masked_data.indices[masked_data.indptr[t]:masked_data.indptr[t+1]]
om_t = omega.data[omega.indptr[t]:omega.indptr[t+1]]
h_node[t] -= (om_t * b[ns_t][:,0]).dot(C[ns_t])
h_node[t] -= (om_t * inputs[t].dot(D[ns_t].T)).dot(C[ns_t])
# TODO: See comment in _LDSStatesCountData for info on the log normalizers
# The same applies to this zero-inflated data
log_Z_node = np.zeros(self.T)
return J_node, h_node, log_Z_node
@property
def expected_info_emission_params(self):
raise NotImplementedError("Mean field with count observations is not yet supported")
@property
def expected_extra_info_params(self):
raise NotImplementedError("Mean field with count observations is not yet supported")
@property
def psi(self):
T, C, D, ed = self.T, self.C, self.D, self.emission_distn
data, size, indices, indptr \
= self.masked_data, self.masked_data.size, \
self.masked_data.indices, self.masked_data.indptr
psi = np.zeros(size)
offset = 0
for t in range(T):
for n in indices[indptr[t]:indptr[t + 1]]:
psi[offset] = self.gaussian_states[t].dot(C[n])
psi[offset] += self.inputs[t].dot(D[n])
psi[offset] += ed.b[n]
offset += 1
return csr_matrix((psi, indices, indptr), shape=data.shape)
def resample(self, niter=1):
self.resample_zeroinflation_variables()
self.resample_auxiliary_variables()
self.resample_gaussian_states()
def resample_auxiliary_variables(self):
# TODO: move this to cython
T, C, D, ed = self.T, self.C, self.D, self.emission_distn
data, size, indices, indptr \
= self.masked_data, self.masked_data.size, \
self.masked_data.indices, self.masked_data.indptr
psi = np.zeros(size)
offset = 0
for t in range(T):
for n in indices[indptr[t]:indptr[t+1]]:
psi[offset] = self.gaussian_states[t].dot(C[n])
psi[offset] += self.inputs[t].dot(D[n])
psi[offset] += ed.b[n]
offset += 1
psi = csr_matrix((psi, indices, indptr), shape=data.shape)
b = ed.b_func(data)
# Allocate vector for omega
self.omega = np.zeros(size)
ppg.pgdrawvpar(self.ppgs, b.data, psi.data, self.omega)
self.omega = csr_matrix((self.omega, indices, indptr), shape=data.shape)
def smooth(self):
# TODO: By assumption, the data is too large to construct
# TODO: a dense smoothing matrix. Let's support a column-wise
# TODO: smoothing operation instead.
warn("Zero inflated smoothing is instantiating a dense matrix!")
X = np.column_stack((self.gaussian_states, self.inputs))
mean = self.rho * self.emission_distn.mean(X)
return mean
|
mattjj/pylds | pylds/distributions.py | PoissonRegression.expected_log_likelihood | python | def expected_log_likelihood(self, mus, sigmas, y):
# Flatten the covariance
T = mus.shape[0]
D = self.D_in
sigs_vec = sigmas.reshape((T, D ** 2))
# Compute the log likelihood of each column
ll = np.zeros((T, self.D_out))
for n in range(self.D_out):
an = self.A[n]
E_loglmbda = np.dot(mus, an)
ll[:,n] += y[:,n] * E_loglmbda
# Vectorized log likelihood calculation
aa_vec = np.outer(an, an).reshape((D ** 2,))
ll[:,n] = -np.exp(E_loglmbda + 0.5 * np.dot(sigs_vec, aa_vec))
return ll | Compute the expected log likelihood for a mean and
covariance of x and an observed value of y. | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/distributions.py#L55-L79 | null | class PoissonRegression(Regression):
"""
Poisson regression with Gaussian distributed inputs and exp link:
y ~ Poisson(exp(Ax))
where x ~ N(mu, sigma)
Currently, we only support maximum likelihood estimation of the
parameters A given the distribution over inputs, x, and
the observed outputs, y.
We compute the expected log likelihood in closed form (since
we can do this with the exp link function), and we use Autograd
to compute its gradients.
"""
def __init__(self, D_out, D_in, A=None, verbose=False):
self._D_out, self._D_in = D_out, D_in
self.verbose = verbose
if A is not None:
assert A.shape == (D_out, D_in)
self.A = A.copy()
else:
self.A = 0.01 * np.random.randn(D_out, D_in)
self.sigma = None
@property
def D_in(self):
return self._D_in
@property
def D_out(self):
return self._D_out
def log_likelihood(self,xy):
assert isinstance(xy, tuple)
x, y = xy
loglmbda = x.dot(self.A.T)
lmbda = np.exp(loglmbda)
return -gammaln(y+1) - lmbda + y * loglmbda
def predict(self, x):
return np.exp(x.dot(self.A.T))
def rvs(self,x=None,size=1,return_xy=True):
x = np.random.normal(size=(size, self.D_in)) if x is None else x
y = np.random.poisson(self.predict(x))
return np.hstack((x, y)) if return_xy else y
def max_likelihood(self, data, weights=None,stats=None):
"""
Maximize the likelihood for a given value of x
:param data:
:param weights:
:param stats:
:return:
"""
raise NotImplementedError
def max_expected_likelihood(self, stats, verbose=False):
# These aren't really "sufficient" statistics, since we
# need the mean and covariance for each time bin.
EyxuT = np.sum([s[0] for s in stats], axis=0)
mus = np.vstack([s[1] for s in stats])
sigmas = np.vstack([s[2] for s in stats])
inputs = np.vstack([s[3] for s in stats])
masks = np.vstack(s[4] for s in stats)
T = mus.shape[0]
D_latent = mus.shape[1]
sigmas_vec = sigmas.reshape((T, D_latent**2))
# Optimize each row of A independently
ns = progprint_xrange(self.D_out) if verbose else range(self.D_out)
for n in ns:
# Flatten the covariance to enable vectorized calculations
def ll_vec(an):
ll = 0
ll += np.dot(an, EyxuT[n])
# Vectorized log likelihood calculation
loglmbda = np.dot(mus, an)
aa_vec = np.outer(an[:D_latent], an[:D_latent]).reshape((D_latent ** 2,))
trms = np.exp(loglmbda + 0.5 * np.dot(sigmas_vec, aa_vec))
ll -= np.sum(trms[masks[:, n]])
if not np.isfinite(ll):
return -np.inf
return ll / T
obj = lambda x: -ll_vec(x)
itr = [0]
def cbk(x):
itr[0] += 1
print("M_step iteration ", itr[0])
res = minimize(value_and_grad(obj), self.A[n],
jac=True,
callback=cbk if verbose else None)
assert res.success
self.A[n] = res.x
|
mattjj/pylds | pylds/distributions.py | BernoulliRegression.max_likelihood | python | def max_likelihood(self, data, weights=None, stats=None):
if isinstance(data, list):
x = np.vstack([d[0] for d in data])
y = np.vstack([d[1] for d in data])
elif isinstance(data, tuple):
assert len(data) == 2
elif isinstance(data, np.ndarray):
x, y = data[:,:self.D_in], data[:, self.D_in:]
else:
raise Exception("Invalid data type")
from sklearn.linear_model import LogisticRegression
for n in progprint_xrange(self.D_out):
lr = LogisticRegression(fit_intercept=False)
lr.fit(x, y[:,n])
self.A[n] = lr.coef_ | Maximize the likelihood for given data
:param data:
:param weights:
:param stats:
:return: | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/distributions.py#L204-L226 | null | class BernoulliRegression(Regression):
"""
Bernoulli regression with Gaussian distributed inputs and logistic link:
y ~ Bernoulli(logistic(Ax))
where x ~ N(mu, sigma)
Currently, we only support maximum likelihood estimation of the
parameter A given the distribution over inputs, x, and
the observed outputs, y.
We approximate the expected log likelihood with Monte Carlo.
"""
def __init__(self, D_out, D_in, A=None, verbose=False):
self._D_out, self._D_in = D_out, D_in
self.verbose = verbose
if A is not None:
assert A.shape == (D_out, D_in)
self.A = A.copy()
else:
self.A = 0.01 * np.random.randn(D_out, D_in)
self.sigma = None
@property
def D_in(self):
return self._D_in
@property
def D_out(self):
return self._D_out
def log_likelihood(self,xy):
assert isinstance(xy, tuple)
x, y = xy
psi = x.dot(self.A.T)
# First term is linear
ll = y * psi
# Compute second term with log-sum-exp trick (see above)
logm = np.maximum(0, psi)
ll -= np.sum(logm)
ll -= np.sum(np.log(np.exp(-logm) + np.exp(psi - logm)))
return ll
def predict(self, x):
return 1 / (1 + np.exp(-x.dot(self.A.T)))
def rvs(self, x=None, size=1, return_xy=True):
x = np.random.normal(size=(size, self.D_in)) if x is None else x
y = np.random.rand(x.shape[0], self.D_out) < self.predict(x)
return np.hstack((x, y)) if return_xy else y
def max_expected_likelihood(self, stats, verbose=False, n_smpls=1):
# These aren't really "sufficient" statistics, since we
# need the mean and covariance for each time bin.
EyxuT = np.sum([s[0] for s in stats], axis=0)
mus = np.vstack([s[1] for s in stats])
sigmas = np.vstack([s[2] for s in stats])
inputs = np.vstack([s[3] for s in stats])
T = mus.shape[0]
D_latent = mus.shape[1]
# Draw Monte Carlo samples of x
sigmas_chol = np.linalg.cholesky(sigmas)
x_smpls = mus[:, :, None] + np.matmul(sigmas_chol, np.random.randn(T, D_latent, n_smpls))
# Optimize each row of A independently
ns = progprint_xrange(self.D_out) if verbose else range(self.D_out)
for n in ns:
def ll_vec(an):
ll = 0
# todo include mask
# First term is linear in psi
ll += np.dot(an, EyxuT[n])
# Second term depends only on x and cannot be computed in closed form
# Instead, Monte Carlo sample x
psi_smpls = np.einsum('tdm, d -> tm', x_smpls, an[:D_latent])
psi_smpls = psi_smpls + np.dot(inputs, an[D_latent:])[:, None]
logm = np.maximum(0, psi_smpls)
trm2_smpls = logm + np.log(np.exp(-logm) + np.exp(psi_smpls - logm))
ll -= np.sum(trm2_smpls) / n_smpls
if not np.isfinite(ll):
return -np.inf
return ll / T
obj = lambda x: -ll_vec(x)
itr = [0]
def cbk(x):
itr[0] += 1
print("M_step iteration ", itr[0])
res = minimize(value_and_grad(obj), self.A[n],
jac=True,
# callback=cbk if verbose else None)
callback=None)
assert res.success
self.A[n] = res.x
|
mattjj/pylds | pylds/laplace.py | _LaplaceApproxLDSStatesBase.log_conditional_likelihood | python | def log_conditional_likelihood(self, x):
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll | likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L26-L37 | [
"def local_log_likelihood(self, xt, yt, ut):\n \"\"\"\n Return log p(yt | xt). Implement this in base classes.\n \"\"\"\n raise NotImplementedError\n"
] | class _LaplaceApproxLDSStatesBase(_LDSStates):
"""
Support variational inference via Laplace approximation.
The key is a definition of the log likelihood,
log p(y_t | x_t, \theta)
Combining this with a Gaussian LDS prior on the states,
we can compute the gradient and Hessian of the log likelihood.
"""
def local_log_likelihood(self, xt, yt, ut):
"""
Return log p(yt | xt). Implement this in base classes.
"""
raise NotImplementedError
def grad_local_log_likelihood(self, x):
"""
return d/dxt log p(yt | xt) evaluated at xt
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
gfun = grad(self.local_log_likelihood)
g = np.zeros((T, D))
for t in range(T):
g[t] += gfun(x[t], self.data[t], self.inputs[t])
return g
def hessian_local_log_likelihood(self, x):
"""
return d^2/dxt^2 log p(y | x) for each time bin
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
hfun = hessian(self.local_log_likelihood)
H_diag = np.zeros((T, D, D))
for t in range(T):
H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])
return H_diag
@property
def sparse_J_prior(self):
T, D = self.T, self.D_latent
J_init, _, _ = self.info_init_params
J_11, J_21, J_22, _, _, _ = self.info_dynamics_params
# Collect the Gaussian LDS prior terms
J_diag = np.zeros((T, D, D))
J_diag[0] += J_init
J_diag[:-1] += J_11
J_diag[1:] += J_22
J_upper_diag = np.repeat(J_21.T[None, :, :], T - 1, axis=0)
return J_diag, J_upper_diag
def log_joint(self, x):
"""
Compute the log joint probability p(x, y)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# prior log p(x) -- quadratic terms
J_diag, J_upper_diag = self.sparse_J_prior
lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x))
# prior log p(x) -- linear terms
_, h_init, log_Z_init = self.info_init_params
_, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params
lp += x[0].dot(h_init)
lp += np.sum(x[:-1] * h1)
lp += np.sum(x[1:] * h2)
# prior log p(x) -- normalization constants
lp += log_Z_init
lp += np.sum(log_Z_dyn)
# likelihood log p(y | x)
lp += self.log_conditional_likelihood(x)
return lp
def sparse_hessian_log_joint(self, x):
"""
The Hessian includes the quadratic terms of the Gaussian LDS prior
as well as the Hessian of the local log likelihood.
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
J_diag, J_upper_diag = self.sparse_J_prior
H_diag, H_upper_diag = -J_diag, -J_upper_diag
# Collect the likelihood terms
H_diag += self.hessian_local_log_likelihood(x)
# Subtract a little bit to ensure negative definiteness
H_diag -= 1e-8 * np.eye(D)
return H_diag, H_upper_diag
def hessian_vector_product_log_joint(self, x, v):
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
return symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g
def laplace_approximation(self, method="newton", verbose=False, tol=1e-7, **kwargs):
if method.lower() == "newton":
return self._laplace_approximation_newton(verbose=verbose, tol=tol, **kwargs)
elif method.lower() == "bfgs":
return self._laplace_approximation_bfgs(verbose=verbose, tol=tol, **kwargs)
else:
raise Exception("Invalid method: {}".format(method))
def _laplace_approximation_bfgs(self, tol=1e-7, verbose=False):
from scipy.optimize import minimize
# Gradient ascent on the log joint probability to get mu
T, D = self.T, self.D_latent
scale = self.T * self.D_emission
obj = lambda xflat: -self.log_joint(xflat.reshape((T, D))) / scale
jac = lambda xflat: -self.gradient_log_joint(xflat.reshape((T, D))).ravel() / scale
hvp = lambda xflat, v: -self.hessian_vector_product_log_joint(
xflat.reshape((T, D)), v.reshape((T, D))).ravel() / scale
x0 = self.gaussian_states.reshape((T * D,))
# Make callback
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", obj(x).round(2),
"\tAvg Grad: ", jac(x).mean().round(2))
itr[0] += 1
# Second order method
if verbose:
print("Fitting Laplace approximation")
res = minimize(obj, x0,
tol=tol,
method="Newton-CG",
jac=jac,
hessp=hvp,
callback=cbk if verbose else None)
assert res.success
mu = res.x
assert np.all(np.isfinite(mu))
if verbose: print("Done")
# Unflatten and compute the expected sufficient statistics
return mu.reshape((T, D))
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False):
"""
Solve a block tridiagonal system with message passing.
"""
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag
scale = self.T * self.D_emission
def newton_step(x, stepsz):
assert 0 <= stepsz <= 1
g = self.gradient_log_joint(x)
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,
-H_upper_diag / scale,
g / scale)
return x - stepsz * Hinv_g
if verbose:
print("Fitting Laplace approximation")
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", (self.log_joint(x) / scale).round(4),
"\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4))
itr[0] += 1
# Solve for optimal x with Newton's method
x = self.gaussian_states
dx = np.inf
while dx >= tol:
xnew = newton_step(x, stepsz)
dx = np.mean(abs(xnew - x))
x = xnew
if verbose:
cbk(x)
assert np.all(np.isfinite(x))
if verbose:
print("Done")
return x
def log_likelihood(self):
if self._normalizer is None:
self.E_step()
return self._normalizer
def E_step(self, verbose=False):
self.gaussian_states = self.laplace_approximation(verbose=verbose)
# Compute normalizer and covariances with E step
T, D = self.T, self.D_latent
H_diag, H_upper_diag = self.sparse_hessian_log_joint(self.gaussian_states)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
# Negate the Hessian since precision is -H
J_21 = np.swapaxes(-H_upper_diag, -1, -2)
J_node = -H_diag
h_node = np.zeros((T, D))
logZ, _, self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T - 1)),
J_node, h_node, np.zeros(T))
# Laplace approximation -- normalizer is the joint times
# the normalizer from the Gaussian approx.
self._normalizer = self.log_joint(self.gaussian_states) + logZ
self._set_expected_stats(self.gaussian_states, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, mu, sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, y = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = sigmas + mu[:, :, None] * mu[:, None, :]
E_x_uT = mu[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (mu[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Compute the expectations for the observations
E_yxT = np.sum(y[:, :, None] * mu[:, None, :], axis=0)
E_yuT = y.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = np.array([E_yxuT, mu, sigmas, inputs, np.ones_like(y, dtype=bool)])
def smooth(self):
return self.emission_distn.predict(np.hstack((self.gaussian_states, self.inputs)))
|
mattjj/pylds | pylds/laplace.py | _LaplaceApproxLDSStatesBase.grad_local_log_likelihood | python | def grad_local_log_likelihood(self, x):
T, D = self.T, self.D_latent
assert x.shape == (T, D)
gfun = grad(self.local_log_likelihood)
g = np.zeros((T, D))
for t in range(T):
g[t] += gfun(x[t], self.data[t], self.inputs[t])
return g | return d/dxt log p(yt | xt) evaluated at xt
Optionally override this in base classes | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L39-L51 | null | class _LaplaceApproxLDSStatesBase(_LDSStates):
"""
Support variational inference via Laplace approximation.
The key is a definition of the log likelihood,
log p(y_t | x_t, \theta)
Combining this with a Gaussian LDS prior on the states,
we can compute the gradient and Hessian of the log likelihood.
"""
def local_log_likelihood(self, xt, yt, ut):
"""
Return log p(yt | xt). Implement this in base classes.
"""
raise NotImplementedError
def log_conditional_likelihood(self, x):
"""
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll
def hessian_local_log_likelihood(self, x):
"""
return d^2/dxt^2 log p(y | x) for each time bin
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
hfun = hessian(self.local_log_likelihood)
H_diag = np.zeros((T, D, D))
for t in range(T):
H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])
return H_diag
@property
def sparse_J_prior(self):
T, D = self.T, self.D_latent
J_init, _, _ = self.info_init_params
J_11, J_21, J_22, _, _, _ = self.info_dynamics_params
# Collect the Gaussian LDS prior terms
J_diag = np.zeros((T, D, D))
J_diag[0] += J_init
J_diag[:-1] += J_11
J_diag[1:] += J_22
J_upper_diag = np.repeat(J_21.T[None, :, :], T - 1, axis=0)
return J_diag, J_upper_diag
def log_joint(self, x):
"""
Compute the log joint probability p(x, y)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# prior log p(x) -- quadratic terms
J_diag, J_upper_diag = self.sparse_J_prior
lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x))
# prior log p(x) -- linear terms
_, h_init, log_Z_init = self.info_init_params
_, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params
lp += x[0].dot(h_init)
lp += np.sum(x[:-1] * h1)
lp += np.sum(x[1:] * h2)
# prior log p(x) -- normalization constants
lp += log_Z_init
lp += np.sum(log_Z_dyn)
# likelihood log p(y | x)
lp += self.log_conditional_likelihood(x)
return lp
def sparse_hessian_log_joint(self, x):
"""
The Hessian includes the quadratic terms of the Gaussian LDS prior
as well as the Hessian of the local log likelihood.
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
J_diag, J_upper_diag = self.sparse_J_prior
H_diag, H_upper_diag = -J_diag, -J_upper_diag
# Collect the likelihood terms
H_diag += self.hessian_local_log_likelihood(x)
# Subtract a little bit to ensure negative definiteness
H_diag -= 1e-8 * np.eye(D)
return H_diag, H_upper_diag
def hessian_vector_product_log_joint(self, x, v):
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
return symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g
def laplace_approximation(self, method="newton", verbose=False, tol=1e-7, **kwargs):
if method.lower() == "newton":
return self._laplace_approximation_newton(verbose=verbose, tol=tol, **kwargs)
elif method.lower() == "bfgs":
return self._laplace_approximation_bfgs(verbose=verbose, tol=tol, **kwargs)
else:
raise Exception("Invalid method: {}".format(method))
def _laplace_approximation_bfgs(self, tol=1e-7, verbose=False):
from scipy.optimize import minimize
# Gradient ascent on the log joint probability to get mu
T, D = self.T, self.D_latent
scale = self.T * self.D_emission
obj = lambda xflat: -self.log_joint(xflat.reshape((T, D))) / scale
jac = lambda xflat: -self.gradient_log_joint(xflat.reshape((T, D))).ravel() / scale
hvp = lambda xflat, v: -self.hessian_vector_product_log_joint(
xflat.reshape((T, D)), v.reshape((T, D))).ravel() / scale
x0 = self.gaussian_states.reshape((T * D,))
# Make callback
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", obj(x).round(2),
"\tAvg Grad: ", jac(x).mean().round(2))
itr[0] += 1
# Second order method
if verbose:
print("Fitting Laplace approximation")
res = minimize(obj, x0,
tol=tol,
method="Newton-CG",
jac=jac,
hessp=hvp,
callback=cbk if verbose else None)
assert res.success
mu = res.x
assert np.all(np.isfinite(mu))
if verbose: print("Done")
# Unflatten and compute the expected sufficient statistics
return mu.reshape((T, D))
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False):
"""
Solve a block tridiagonal system with message passing.
"""
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag
scale = self.T * self.D_emission
def newton_step(x, stepsz):
assert 0 <= stepsz <= 1
g = self.gradient_log_joint(x)
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,
-H_upper_diag / scale,
g / scale)
return x - stepsz * Hinv_g
if verbose:
print("Fitting Laplace approximation")
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", (self.log_joint(x) / scale).round(4),
"\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4))
itr[0] += 1
# Solve for optimal x with Newton's method
x = self.gaussian_states
dx = np.inf
while dx >= tol:
xnew = newton_step(x, stepsz)
dx = np.mean(abs(xnew - x))
x = xnew
if verbose:
cbk(x)
assert np.all(np.isfinite(x))
if verbose:
print("Done")
return x
def log_likelihood(self):
if self._normalizer is None:
self.E_step()
return self._normalizer
def E_step(self, verbose=False):
self.gaussian_states = self.laplace_approximation(verbose=verbose)
# Compute normalizer and covariances with E step
T, D = self.T, self.D_latent
H_diag, H_upper_diag = self.sparse_hessian_log_joint(self.gaussian_states)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
# Negate the Hessian since precision is -H
J_21 = np.swapaxes(-H_upper_diag, -1, -2)
J_node = -H_diag
h_node = np.zeros((T, D))
logZ, _, self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T - 1)),
J_node, h_node, np.zeros(T))
# Laplace approximation -- normalizer is the joint times
# the normalizer from the Gaussian approx.
self._normalizer = self.log_joint(self.gaussian_states) + logZ
self._set_expected_stats(self.gaussian_states, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, mu, sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, y = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = sigmas + mu[:, :, None] * mu[:, None, :]
E_x_uT = mu[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (mu[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Compute the expectations for the observations
E_yxT = np.sum(y[:, :, None] * mu[:, None, :], axis=0)
E_yuT = y.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = np.array([E_yxuT, mu, sigmas, inputs, np.ones_like(y, dtype=bool)])
def smooth(self):
return self.emission_distn.predict(np.hstack((self.gaussian_states, self.inputs)))
|
mattjj/pylds | pylds/laplace.py | _LaplaceApproxLDSStatesBase.hessian_local_log_likelihood | python | def hessian_local_log_likelihood(self, x):
T, D = self.T, self.D_latent
assert x.shape == (T, D)
hfun = hessian(self.local_log_likelihood)
H_diag = np.zeros((T, D, D))
for t in range(T):
H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])
return H_diag | return d^2/dxt^2 log p(y | x) for each time bin
Optionally override this in base classes | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L53-L65 | null | class _LaplaceApproxLDSStatesBase(_LDSStates):
"""
Support variational inference via Laplace approximation.
The key is a definition of the log likelihood,
log p(y_t | x_t, \theta)
Combining this with a Gaussian LDS prior on the states,
we can compute the gradient and Hessian of the log likelihood.
"""
def local_log_likelihood(self, xt, yt, ut):
"""
Return log p(yt | xt). Implement this in base classes.
"""
raise NotImplementedError
def log_conditional_likelihood(self, x):
"""
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll
def grad_local_log_likelihood(self, x):
"""
return d/dxt log p(yt | xt) evaluated at xt
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
gfun = grad(self.local_log_likelihood)
g = np.zeros((T, D))
for t in range(T):
g[t] += gfun(x[t], self.data[t], self.inputs[t])
return g
@property
def sparse_J_prior(self):
T, D = self.T, self.D_latent
J_init, _, _ = self.info_init_params
J_11, J_21, J_22, _, _, _ = self.info_dynamics_params
# Collect the Gaussian LDS prior terms
J_diag = np.zeros((T, D, D))
J_diag[0] += J_init
J_diag[:-1] += J_11
J_diag[1:] += J_22
J_upper_diag = np.repeat(J_21.T[None, :, :], T - 1, axis=0)
return J_diag, J_upper_diag
def log_joint(self, x):
"""
Compute the log joint probability p(x, y)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# prior log p(x) -- quadratic terms
J_diag, J_upper_diag = self.sparse_J_prior
lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x))
# prior log p(x) -- linear terms
_, h_init, log_Z_init = self.info_init_params
_, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params
lp += x[0].dot(h_init)
lp += np.sum(x[:-1] * h1)
lp += np.sum(x[1:] * h2)
# prior log p(x) -- normalization constants
lp += log_Z_init
lp += np.sum(log_Z_dyn)
# likelihood log p(y | x)
lp += self.log_conditional_likelihood(x)
return lp
def sparse_hessian_log_joint(self, x):
"""
The Hessian includes the quadratic terms of the Gaussian LDS prior
as well as the Hessian of the local log likelihood.
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
J_diag, J_upper_diag = self.sparse_J_prior
H_diag, H_upper_diag = -J_diag, -J_upper_diag
# Collect the likelihood terms
H_diag += self.hessian_local_log_likelihood(x)
# Subtract a little bit to ensure negative definiteness
H_diag -= 1e-8 * np.eye(D)
return H_diag, H_upper_diag
def hessian_vector_product_log_joint(self, x, v):
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
return symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g
def laplace_approximation(self, method="newton", verbose=False, tol=1e-7, **kwargs):
if method.lower() == "newton":
return self._laplace_approximation_newton(verbose=verbose, tol=tol, **kwargs)
elif method.lower() == "bfgs":
return self._laplace_approximation_bfgs(verbose=verbose, tol=tol, **kwargs)
else:
raise Exception("Invalid method: {}".format(method))
def _laplace_approximation_bfgs(self, tol=1e-7, verbose=False):
from scipy.optimize import minimize
# Gradient ascent on the log joint probability to get mu
T, D = self.T, self.D_latent
scale = self.T * self.D_emission
obj = lambda xflat: -self.log_joint(xflat.reshape((T, D))) / scale
jac = lambda xflat: -self.gradient_log_joint(xflat.reshape((T, D))).ravel() / scale
hvp = lambda xflat, v: -self.hessian_vector_product_log_joint(
xflat.reshape((T, D)), v.reshape((T, D))).ravel() / scale
x0 = self.gaussian_states.reshape((T * D,))
# Make callback
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", obj(x).round(2),
"\tAvg Grad: ", jac(x).mean().round(2))
itr[0] += 1
# Second order method
if verbose:
print("Fitting Laplace approximation")
res = minimize(obj, x0,
tol=tol,
method="Newton-CG",
jac=jac,
hessp=hvp,
callback=cbk if verbose else None)
assert res.success
mu = res.x
assert np.all(np.isfinite(mu))
if verbose: print("Done")
# Unflatten and compute the expected sufficient statistics
return mu.reshape((T, D))
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False):
"""
Solve a block tridiagonal system with message passing.
"""
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag
scale = self.T * self.D_emission
def newton_step(x, stepsz):
assert 0 <= stepsz <= 1
g = self.gradient_log_joint(x)
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,
-H_upper_diag / scale,
g / scale)
return x - stepsz * Hinv_g
if verbose:
print("Fitting Laplace approximation")
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", (self.log_joint(x) / scale).round(4),
"\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4))
itr[0] += 1
# Solve for optimal x with Newton's method
x = self.gaussian_states
dx = np.inf
while dx >= tol:
xnew = newton_step(x, stepsz)
dx = np.mean(abs(xnew - x))
x = xnew
if verbose:
cbk(x)
assert np.all(np.isfinite(x))
if verbose:
print("Done")
return x
def log_likelihood(self):
if self._normalizer is None:
self.E_step()
return self._normalizer
def E_step(self, verbose=False):
self.gaussian_states = self.laplace_approximation(verbose=verbose)
# Compute normalizer and covariances with E step
T, D = self.T, self.D_latent
H_diag, H_upper_diag = self.sparse_hessian_log_joint(self.gaussian_states)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
# Negate the Hessian since precision is -H
J_21 = np.swapaxes(-H_upper_diag, -1, -2)
J_node = -H_diag
h_node = np.zeros((T, D))
logZ, _, self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T - 1)),
J_node, h_node, np.zeros(T))
# Laplace approximation -- normalizer is the joint times
# the normalizer from the Gaussian approx.
self._normalizer = self.log_joint(self.gaussian_states) + logZ
self._set_expected_stats(self.gaussian_states, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, mu, sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, y = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = sigmas + mu[:, :, None] * mu[:, None, :]
E_x_uT = mu[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (mu[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Compute the expectations for the observations
E_yxT = np.sum(y[:, :, None] * mu[:, None, :], axis=0)
E_yuT = y.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = np.array([E_yxuT, mu, sigmas, inputs, np.ones_like(y, dtype=bool)])
def smooth(self):
return self.emission_distn.predict(np.hstack((self.gaussian_states, self.inputs)))
|
mattjj/pylds | pylds/laplace.py | _LaplaceApproxLDSStatesBase.log_joint | python | def log_joint(self, x):
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# prior log p(x) -- quadratic terms
J_diag, J_upper_diag = self.sparse_J_prior
lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x))
# prior log p(x) -- linear terms
_, h_init, log_Z_init = self.info_init_params
_, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params
lp += x[0].dot(h_init)
lp += np.sum(x[:-1] * h1)
lp += np.sum(x[1:] * h2)
# prior log p(x) -- normalization constants
lp += log_Z_init
lp += np.sum(log_Z_dyn)
# likelihood log p(y | x)
lp += self.log_conditional_likelihood(x)
return lp | Compute the log joint probability p(x, y) | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L82-L107 | [
"def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):\n \"\"\"\n Compute matrix-vector product with a symmetric block\n tridiagonal matrix H and vector v.\n\n :param H_diag: block diagonal terms of H\n :param H_upper_diag: upper block diagonal terms of H\n :param v: vector to multiple\n :return: H * v\n \"\"\"\n T, D, _ = H_diag.shape\n assert H_diag.ndim == 3 and H_diag.shape[2] == D\n assert H_upper_diag.shape == (T-1, D, D)\n assert v.shape == (T, D)\n\n out = np.matmul(H_diag, v[:, :, None])[:, :, 0]\n out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]\n out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]\n return out\n",
"def log_conditional_likelihood(self, x):\n \"\"\"\n likelihood \\sum_t log p(y_t | x_t)\n Optionally override this in base classes\n \"\"\"\n T, D = self.T, self.D_latent\n assert x.shape == (T, D)\n\n ll = 0\n for t in range(self.T):\n ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])\n return ll\n"
] | class _LaplaceApproxLDSStatesBase(_LDSStates):
"""
Support variational inference via Laplace approximation.
The key is a definition of the log likelihood,
log p(y_t | x_t, \theta)
Combining this with a Gaussian LDS prior on the states,
we can compute the gradient and Hessian of the log likelihood.
"""
def local_log_likelihood(self, xt, yt, ut):
"""
Return log p(yt | xt). Implement this in base classes.
"""
raise NotImplementedError
def log_conditional_likelihood(self, x):
"""
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll
def grad_local_log_likelihood(self, x):
"""
return d/dxt log p(yt | xt) evaluated at xt
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
gfun = grad(self.local_log_likelihood)
g = np.zeros((T, D))
for t in range(T):
g[t] += gfun(x[t], self.data[t], self.inputs[t])
return g
def hessian_local_log_likelihood(self, x):
"""
return d^2/dxt^2 log p(y | x) for each time bin
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
hfun = hessian(self.local_log_likelihood)
H_diag = np.zeros((T, D, D))
for t in range(T):
H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])
return H_diag
@property
def sparse_J_prior(self):
T, D = self.T, self.D_latent
J_init, _, _ = self.info_init_params
J_11, J_21, J_22, _, _, _ = self.info_dynamics_params
# Collect the Gaussian LDS prior terms
J_diag = np.zeros((T, D, D))
J_diag[0] += J_init
J_diag[:-1] += J_11
J_diag[1:] += J_22
J_upper_diag = np.repeat(J_21.T[None, :, :], T - 1, axis=0)
return J_diag, J_upper_diag
def sparse_hessian_log_joint(self, x):
"""
The Hessian includes the quadratic terms of the Gaussian LDS prior
as well as the Hessian of the local log likelihood.
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
J_diag, J_upper_diag = self.sparse_J_prior
H_diag, H_upper_diag = -J_diag, -J_upper_diag
# Collect the likelihood terms
H_diag += self.hessian_local_log_likelihood(x)
# Subtract a little bit to ensure negative definiteness
H_diag -= 1e-8 * np.eye(D)
return H_diag, H_upper_diag
def hessian_vector_product_log_joint(self, x, v):
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
return symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g
def laplace_approximation(self, method="newton", verbose=False, tol=1e-7, **kwargs):
if method.lower() == "newton":
return self._laplace_approximation_newton(verbose=verbose, tol=tol, **kwargs)
elif method.lower() == "bfgs":
return self._laplace_approximation_bfgs(verbose=verbose, tol=tol, **kwargs)
else:
raise Exception("Invalid method: {}".format(method))
def _laplace_approximation_bfgs(self, tol=1e-7, verbose=False):
from scipy.optimize import minimize
# Gradient ascent on the log joint probability to get mu
T, D = self.T, self.D_latent
scale = self.T * self.D_emission
obj = lambda xflat: -self.log_joint(xflat.reshape((T, D))) / scale
jac = lambda xflat: -self.gradient_log_joint(xflat.reshape((T, D))).ravel() / scale
hvp = lambda xflat, v: -self.hessian_vector_product_log_joint(
xflat.reshape((T, D)), v.reshape((T, D))).ravel() / scale
x0 = self.gaussian_states.reshape((T * D,))
# Make callback
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", obj(x).round(2),
"\tAvg Grad: ", jac(x).mean().round(2))
itr[0] += 1
# Second order method
if verbose:
print("Fitting Laplace approximation")
res = minimize(obj, x0,
tol=tol,
method="Newton-CG",
jac=jac,
hessp=hvp,
callback=cbk if verbose else None)
assert res.success
mu = res.x
assert np.all(np.isfinite(mu))
if verbose: print("Done")
# Unflatten and compute the expected sufficient statistics
return mu.reshape((T, D))
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False):
"""
Solve a block tridiagonal system with message passing.
"""
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag
scale = self.T * self.D_emission
def newton_step(x, stepsz):
assert 0 <= stepsz <= 1
g = self.gradient_log_joint(x)
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,
-H_upper_diag / scale,
g / scale)
return x - stepsz * Hinv_g
if verbose:
print("Fitting Laplace approximation")
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", (self.log_joint(x) / scale).round(4),
"\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4))
itr[0] += 1
# Solve for optimal x with Newton's method
x = self.gaussian_states
dx = np.inf
while dx >= tol:
xnew = newton_step(x, stepsz)
dx = np.mean(abs(xnew - x))
x = xnew
if verbose:
cbk(x)
assert np.all(np.isfinite(x))
if verbose:
print("Done")
return x
def log_likelihood(self):
if self._normalizer is None:
self.E_step()
return self._normalizer
def E_step(self, verbose=False):
self.gaussian_states = self.laplace_approximation(verbose=verbose)
# Compute normalizer and covariances with E step
T, D = self.T, self.D_latent
H_diag, H_upper_diag = self.sparse_hessian_log_joint(self.gaussian_states)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
# Negate the Hessian since precision is -H
J_21 = np.swapaxes(-H_upper_diag, -1, -2)
J_node = -H_diag
h_node = np.zeros((T, D))
logZ, _, self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T - 1)),
J_node, h_node, np.zeros(T))
# Laplace approximation -- normalizer is the joint times
# the normalizer from the Gaussian approx.
self._normalizer = self.log_joint(self.gaussian_states) + logZ
self._set_expected_stats(self.gaussian_states, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, mu, sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, y = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = sigmas + mu[:, :, None] * mu[:, None, :]
E_x_uT = mu[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (mu[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Compute the expectations for the observations
E_yxT = np.sum(y[:, :, None] * mu[:, None, :], axis=0)
E_yuT = y.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = np.array([E_yxuT, mu, sigmas, inputs, np.ones_like(y, dtype=bool)])
def smooth(self):
return self.emission_distn.predict(np.hstack((self.gaussian_states, self.inputs)))
|
mattjj/pylds | pylds/laplace.py | _LaplaceApproxLDSStatesBase.sparse_hessian_log_joint | python | def sparse_hessian_log_joint(self, x):
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
J_diag, J_upper_diag = self.sparse_J_prior
H_diag, H_upper_diag = -J_diag, -J_upper_diag
# Collect the likelihood terms
H_diag += self.hessian_local_log_likelihood(x)
# Subtract a little bit to ensure negative definiteness
H_diag -= 1e-8 * np.eye(D)
return H_diag, H_upper_diag | The Hessian includes the quadratic terms of the Gaussian LDS prior
as well as the Hessian of the local log likelihood. | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L109-L127 | [
"def hessian_local_log_likelihood(self, x):\n \"\"\"\n return d^2/dxt^2 log p(y | x) for each time bin\n Optionally override this in base classes\n \"\"\"\n T, D = self.T, self.D_latent\n assert x.shape == (T, D)\n\n hfun = hessian(self.local_log_likelihood)\n H_diag = np.zeros((T, D, D))\n for t in range(T):\n H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])\n return H_diag\n"
] | class _LaplaceApproxLDSStatesBase(_LDSStates):
"""
Support variational inference via Laplace approximation.
The key is a definition of the log likelihood,
log p(y_t | x_t, \theta)
Combining this with a Gaussian LDS prior on the states,
we can compute the gradient and Hessian of the log likelihood.
"""
def local_log_likelihood(self, xt, yt, ut):
"""
Return log p(yt | xt). Implement this in base classes.
"""
raise NotImplementedError
def log_conditional_likelihood(self, x):
"""
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll
def grad_local_log_likelihood(self, x):
"""
return d/dxt log p(yt | xt) evaluated at xt
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
gfun = grad(self.local_log_likelihood)
g = np.zeros((T, D))
for t in range(T):
g[t] += gfun(x[t], self.data[t], self.inputs[t])
return g
def hessian_local_log_likelihood(self, x):
"""
return d^2/dxt^2 log p(y | x) for each time bin
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
hfun = hessian(self.local_log_likelihood)
H_diag = np.zeros((T, D, D))
for t in range(T):
H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])
return H_diag
@property
def sparse_J_prior(self):
T, D = self.T, self.D_latent
J_init, _, _ = self.info_init_params
J_11, J_21, J_22, _, _, _ = self.info_dynamics_params
# Collect the Gaussian LDS prior terms
J_diag = np.zeros((T, D, D))
J_diag[0] += J_init
J_diag[:-1] += J_11
J_diag[1:] += J_22
J_upper_diag = np.repeat(J_21.T[None, :, :], T - 1, axis=0)
return J_diag, J_upper_diag
def log_joint(self, x):
"""
Compute the log joint probability p(x, y)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# prior log p(x) -- quadratic terms
J_diag, J_upper_diag = self.sparse_J_prior
lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x))
# prior log p(x) -- linear terms
_, h_init, log_Z_init = self.info_init_params
_, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params
lp += x[0].dot(h_init)
lp += np.sum(x[:-1] * h1)
lp += np.sum(x[1:] * h2)
# prior log p(x) -- normalization constants
lp += log_Z_init
lp += np.sum(log_Z_dyn)
# likelihood log p(y | x)
lp += self.log_conditional_likelihood(x)
return lp
def hessian_vector_product_log_joint(self, x, v):
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
return symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g
def laplace_approximation(self, method="newton", verbose=False, tol=1e-7, **kwargs):
if method.lower() == "newton":
return self._laplace_approximation_newton(verbose=verbose, tol=tol, **kwargs)
elif method.lower() == "bfgs":
return self._laplace_approximation_bfgs(verbose=verbose, tol=tol, **kwargs)
else:
raise Exception("Invalid method: {}".format(method))
def _laplace_approximation_bfgs(self, tol=1e-7, verbose=False):
from scipy.optimize import minimize
# Gradient ascent on the log joint probability to get mu
T, D = self.T, self.D_latent
scale = self.T * self.D_emission
obj = lambda xflat: -self.log_joint(xflat.reshape((T, D))) / scale
jac = lambda xflat: -self.gradient_log_joint(xflat.reshape((T, D))).ravel() / scale
hvp = lambda xflat, v: -self.hessian_vector_product_log_joint(
xflat.reshape((T, D)), v.reshape((T, D))).ravel() / scale
x0 = self.gaussian_states.reshape((T * D,))
# Make callback
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", obj(x).round(2),
"\tAvg Grad: ", jac(x).mean().round(2))
itr[0] += 1
# Second order method
if verbose:
print("Fitting Laplace approximation")
res = minimize(obj, x0,
tol=tol,
method="Newton-CG",
jac=jac,
hessp=hvp,
callback=cbk if verbose else None)
assert res.success
mu = res.x
assert np.all(np.isfinite(mu))
if verbose: print("Done")
# Unflatten and compute the expected sufficient statistics
return mu.reshape((T, D))
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False):
"""
Solve a block tridiagonal system with message passing.
"""
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag
scale = self.T * self.D_emission
def newton_step(x, stepsz):
assert 0 <= stepsz <= 1
g = self.gradient_log_joint(x)
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,
-H_upper_diag / scale,
g / scale)
return x - stepsz * Hinv_g
if verbose:
print("Fitting Laplace approximation")
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", (self.log_joint(x) / scale).round(4),
"\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4))
itr[0] += 1
# Solve for optimal x with Newton's method
x = self.gaussian_states
dx = np.inf
while dx >= tol:
xnew = newton_step(x, stepsz)
dx = np.mean(abs(xnew - x))
x = xnew
if verbose:
cbk(x)
assert np.all(np.isfinite(x))
if verbose:
print("Done")
return x
def log_likelihood(self):
if self._normalizer is None:
self.E_step()
return self._normalizer
def E_step(self, verbose=False):
self.gaussian_states = self.laplace_approximation(verbose=verbose)
# Compute normalizer and covariances with E step
T, D = self.T, self.D_latent
H_diag, H_upper_diag = self.sparse_hessian_log_joint(self.gaussian_states)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
# Negate the Hessian since precision is -H
J_21 = np.swapaxes(-H_upper_diag, -1, -2)
J_node = -H_diag
h_node = np.zeros((T, D))
logZ, _, self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T - 1)),
J_node, h_node, np.zeros(T))
# Laplace approximation -- normalizer is the joint times
# the normalizer from the Gaussian approx.
self._normalizer = self.log_joint(self.gaussian_states) + logZ
self._set_expected_stats(self.gaussian_states, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, mu, sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, y = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = sigmas + mu[:, :, None] * mu[:, None, :]
E_x_uT = mu[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (mu[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Compute the expectations for the observations
E_yxT = np.sum(y[:, :, None] * mu[:, None, :], axis=0)
E_yuT = y.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = np.array([E_yxuT, mu, sigmas, inputs, np.ones_like(y, dtype=bool)])
def smooth(self):
return self.emission_distn.predict(np.hstack((self.gaussian_states, self.inputs)))
|
mattjj/pylds | pylds/laplace.py | _LaplaceApproxLDSStatesBase.gradient_log_joint | python | def gradient_log_joint(self, x):
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g | The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt) | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L133-L162 | [
"def symm_block_tridiag_matmul(H_diag, H_upper_diag, v):\n \"\"\"\n Compute matrix-vector product with a symmetric block\n tridiagonal matrix H and vector v.\n\n :param H_diag: block diagonal terms of H\n :param H_upper_diag: upper block diagonal terms of H\n :param v: vector to multiple\n :return: H * v\n \"\"\"\n T, D, _ = H_diag.shape\n assert H_diag.ndim == 3 and H_diag.shape[2] == D\n assert H_upper_diag.shape == (T-1, D, D)\n assert v.shape == (T, D)\n\n out = np.matmul(H_diag, v[:, :, None])[:, :, 0]\n out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0]\n out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0]\n return out\n",
"def grad_local_log_likelihood(self, x):\n \"\"\"\n return d/dxt log p(yt | xt) evaluated at xt\n Optionally override this in base classes\n \"\"\"\n T, D = self.T, self.D_latent\n assert x.shape == (T, D)\n gfun = grad(self.local_log_likelihood)\n\n g = np.zeros((T, D))\n for t in range(T):\n g[t] += gfun(x[t], self.data[t], self.inputs[t])\n return g\n",
"def grad_local_log_likelihood(self, x):\n \"\"\"\n d/dx y^T Cx + y^T d - exp(Cx+d)\n = y^T C - exp(Cx+d)^T C\n = (y - lmbda)^T C\n \"\"\"\n # Observation likelihoods\n lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T))\n return (self.data - lmbda).dot(self.C)\n"
] | class _LaplaceApproxLDSStatesBase(_LDSStates):
"""
Support variational inference via Laplace approximation.
The key is a definition of the log likelihood,
log p(y_t | x_t, \theta)
Combining this with a Gaussian LDS prior on the states,
we can compute the gradient and Hessian of the log likelihood.
"""
def local_log_likelihood(self, xt, yt, ut):
"""
Return log p(yt | xt). Implement this in base classes.
"""
raise NotImplementedError
def log_conditional_likelihood(self, x):
"""
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll
def grad_local_log_likelihood(self, x):
"""
return d/dxt log p(yt | xt) evaluated at xt
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
gfun = grad(self.local_log_likelihood)
g = np.zeros((T, D))
for t in range(T):
g[t] += gfun(x[t], self.data[t], self.inputs[t])
return g
def hessian_local_log_likelihood(self, x):
"""
return d^2/dxt^2 log p(y | x) for each time bin
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
hfun = hessian(self.local_log_likelihood)
H_diag = np.zeros((T, D, D))
for t in range(T):
H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])
return H_diag
@property
def sparse_J_prior(self):
T, D = self.T, self.D_latent
J_init, _, _ = self.info_init_params
J_11, J_21, J_22, _, _, _ = self.info_dynamics_params
# Collect the Gaussian LDS prior terms
J_diag = np.zeros((T, D, D))
J_diag[0] += J_init
J_diag[:-1] += J_11
J_diag[1:] += J_22
J_upper_diag = np.repeat(J_21.T[None, :, :], T - 1, axis=0)
return J_diag, J_upper_diag
def log_joint(self, x):
"""
Compute the log joint probability p(x, y)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# prior log p(x) -- quadratic terms
J_diag, J_upper_diag = self.sparse_J_prior
lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x))
# prior log p(x) -- linear terms
_, h_init, log_Z_init = self.info_init_params
_, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params
lp += x[0].dot(h_init)
lp += np.sum(x[:-1] * h1)
lp += np.sum(x[1:] * h2)
# prior log p(x) -- normalization constants
lp += log_Z_init
lp += np.sum(log_Z_dyn)
# likelihood log p(y | x)
lp += self.log_conditional_likelihood(x)
return lp
def sparse_hessian_log_joint(self, x):
"""
The Hessian includes the quadratic terms of the Gaussian LDS prior
as well as the Hessian of the local log likelihood.
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
J_diag, J_upper_diag = self.sparse_J_prior
H_diag, H_upper_diag = -J_diag, -J_upper_diag
# Collect the likelihood terms
H_diag += self.hessian_local_log_likelihood(x)
# Subtract a little bit to ensure negative definiteness
H_diag -= 1e-8 * np.eye(D)
return H_diag, H_upper_diag
def hessian_vector_product_log_joint(self, x, v):
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
return symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
def laplace_approximation(self, method="newton", verbose=False, tol=1e-7, **kwargs):
if method.lower() == "newton":
return self._laplace_approximation_newton(verbose=verbose, tol=tol, **kwargs)
elif method.lower() == "bfgs":
return self._laplace_approximation_bfgs(verbose=verbose, tol=tol, **kwargs)
else:
raise Exception("Invalid method: {}".format(method))
def _laplace_approximation_bfgs(self, tol=1e-7, verbose=False):
from scipy.optimize import minimize
# Gradient ascent on the log joint probability to get mu
T, D = self.T, self.D_latent
scale = self.T * self.D_emission
obj = lambda xflat: -self.log_joint(xflat.reshape((T, D))) / scale
jac = lambda xflat: -self.gradient_log_joint(xflat.reshape((T, D))).ravel() / scale
hvp = lambda xflat, v: -self.hessian_vector_product_log_joint(
xflat.reshape((T, D)), v.reshape((T, D))).ravel() / scale
x0 = self.gaussian_states.reshape((T * D,))
# Make callback
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", obj(x).round(2),
"\tAvg Grad: ", jac(x).mean().round(2))
itr[0] += 1
# Second order method
if verbose:
print("Fitting Laplace approximation")
res = minimize(obj, x0,
tol=tol,
method="Newton-CG",
jac=jac,
hessp=hvp,
callback=cbk if verbose else None)
assert res.success
mu = res.x
assert np.all(np.isfinite(mu))
if verbose: print("Done")
# Unflatten and compute the expected sufficient statistics
return mu.reshape((T, D))
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False):
"""
Solve a block tridiagonal system with message passing.
"""
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag
scale = self.T * self.D_emission
def newton_step(x, stepsz):
assert 0 <= stepsz <= 1
g = self.gradient_log_joint(x)
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,
-H_upper_diag / scale,
g / scale)
return x - stepsz * Hinv_g
if verbose:
print("Fitting Laplace approximation")
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", (self.log_joint(x) / scale).round(4),
"\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4))
itr[0] += 1
# Solve for optimal x with Newton's method
x = self.gaussian_states
dx = np.inf
while dx >= tol:
xnew = newton_step(x, stepsz)
dx = np.mean(abs(xnew - x))
x = xnew
if verbose:
cbk(x)
assert np.all(np.isfinite(x))
if verbose:
print("Done")
return x
def log_likelihood(self):
if self._normalizer is None:
self.E_step()
return self._normalizer
def E_step(self, verbose=False):
self.gaussian_states = self.laplace_approximation(verbose=verbose)
# Compute normalizer and covariances with E step
T, D = self.T, self.D_latent
H_diag, H_upper_diag = self.sparse_hessian_log_joint(self.gaussian_states)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
# Negate the Hessian since precision is -H
J_21 = np.swapaxes(-H_upper_diag, -1, -2)
J_node = -H_diag
h_node = np.zeros((T, D))
logZ, _, self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T - 1)),
J_node, h_node, np.zeros(T))
# Laplace approximation -- normalizer is the joint times
# the normalizer from the Gaussian approx.
self._normalizer = self.log_joint(self.gaussian_states) + logZ
self._set_expected_stats(self.gaussian_states, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, mu, sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, y = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = sigmas + mu[:, :, None] * mu[:, None, :]
E_x_uT = mu[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (mu[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Compute the expectations for the observations
E_yxT = np.sum(y[:, :, None] * mu[:, None, :], axis=0)
E_yuT = y.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = np.array([E_yxuT, mu, sigmas, inputs, np.ones_like(y, dtype=bool)])
def smooth(self):
return self.emission_distn.predict(np.hstack((self.gaussian_states, self.inputs)))
|
mattjj/pylds | pylds/laplace.py | _LaplaceApproxLDSStatesBase._laplace_approximation_newton | python | def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False):
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag
scale = self.T * self.D_emission
def newton_step(x, stepsz):
assert 0 <= stepsz <= 1
g = self.gradient_log_joint(x)
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,
-H_upper_diag / scale,
g / scale)
return x - stepsz * Hinv_g
if verbose:
print("Fitting Laplace approximation")
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", (self.log_joint(x) / scale).round(4),
"\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4))
itr[0] += 1
# Solve for optimal x with Newton's method
x = self.gaussian_states
dx = np.inf
while dx >= tol:
xnew = newton_step(x, stepsz)
dx = np.mean(abs(xnew - x))
x = xnew
if verbose:
cbk(x)
assert np.all(np.isfinite(x))
if verbose:
print("Done")
return x | Solve a block tridiagonal system with message passing. | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L212-L253 | [
"def newton_step(x, stepsz):\n assert 0 <= stepsz <= 1\n g = self.gradient_log_joint(x)\n H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)\n Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale,\n -H_upper_diag / scale,\n g / scale)\n return x - stepsz * Hinv_g\n",
"def cbk(x):\n print(\"Iteration: \", itr[0],\n \"\\tObjective: \", (self.log_joint(x) / scale).round(4),\n \"\\tAvg Grad: \", (self.gradient_log_joint(x).mean() / scale).round(4))\n itr[0] += 1\n"
] | class _LaplaceApproxLDSStatesBase(_LDSStates):
"""
Support variational inference via Laplace approximation.
The key is a definition of the log likelihood,
log p(y_t | x_t, \theta)
Combining this with a Gaussian LDS prior on the states,
we can compute the gradient and Hessian of the log likelihood.
"""
def local_log_likelihood(self, xt, yt, ut):
"""
Return log p(yt | xt). Implement this in base classes.
"""
raise NotImplementedError
def log_conditional_likelihood(self, x):
"""
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll
def grad_local_log_likelihood(self, x):
"""
return d/dxt log p(yt | xt) evaluated at xt
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
gfun = grad(self.local_log_likelihood)
g = np.zeros((T, D))
for t in range(T):
g[t] += gfun(x[t], self.data[t], self.inputs[t])
return g
def hessian_local_log_likelihood(self, x):
"""
return d^2/dxt^2 log p(y | x) for each time bin
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
hfun = hessian(self.local_log_likelihood)
H_diag = np.zeros((T, D, D))
for t in range(T):
H_diag[t] = hfun(x[t], self.data[t], self.inputs[t])
return H_diag
@property
def sparse_J_prior(self):
T, D = self.T, self.D_latent
J_init, _, _ = self.info_init_params
J_11, J_21, J_22, _, _, _ = self.info_dynamics_params
# Collect the Gaussian LDS prior terms
J_diag = np.zeros((T, D, D))
J_diag[0] += J_init
J_diag[:-1] += J_11
J_diag[1:] += J_22
J_upper_diag = np.repeat(J_21.T[None, :, :], T - 1, axis=0)
return J_diag, J_upper_diag
def log_joint(self, x):
"""
Compute the log joint probability p(x, y)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# prior log p(x) -- quadratic terms
J_diag, J_upper_diag = self.sparse_J_prior
lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x))
# prior log p(x) -- linear terms
_, h_init, log_Z_init = self.info_init_params
_, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params
lp += x[0].dot(h_init)
lp += np.sum(x[:-1] * h1)
lp += np.sum(x[1:] * h2)
# prior log p(x) -- normalization constants
lp += log_Z_init
lp += np.sum(log_Z_dyn)
# likelihood log p(y | x)
lp += self.log_conditional_likelihood(x)
return lp
def sparse_hessian_log_joint(self, x):
"""
The Hessian includes the quadratic terms of the Gaussian LDS prior
as well as the Hessian of the local log likelihood.
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
J_diag, J_upper_diag = self.sparse_J_prior
H_diag, H_upper_diag = -J_diag, -J_upper_diag
# Collect the likelihood terms
H_diag += self.hessian_local_log_likelihood(x)
# Subtract a little bit to ensure negative definiteness
H_diag -= 1e-8 * np.eye(D)
return H_diag, H_upper_diag
def hessian_vector_product_log_joint(self, x, v):
H_diag, H_upper_diag = self.sparse_hessian_log_joint(x)
return symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
def gradient_log_joint(self, x):
"""
The gradient of the log joint probability.
For the Gaussian terms, this is
d/dx [-1/2 x^T J x + h^T x] = -Jx + h.
For the likelihood terms, we have for each time t
d/dx log p(yt | xt)
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
# Collect the Gaussian LDS prior terms
_, h_init, _ = self.info_init_params
_, _, _, h1, h2, _ = self.info_dynamics_params
H_diag, H_upper_diag = self.sparse_J_prior
# Compute the gradient from the prior
g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x)
g[0] += h_init
g[:-1] += h1
g[1:] += h2
# Compute gradient from the likelihood terms
g += self.grad_local_log_likelihood(x)
return g
def laplace_approximation(self, method="newton", verbose=False, tol=1e-7, **kwargs):
if method.lower() == "newton":
return self._laplace_approximation_newton(verbose=verbose, tol=tol, **kwargs)
elif method.lower() == "bfgs":
return self._laplace_approximation_bfgs(verbose=verbose, tol=tol, **kwargs)
else:
raise Exception("Invalid method: {}".format(method))
def _laplace_approximation_bfgs(self, tol=1e-7, verbose=False):
from scipy.optimize import minimize
# Gradient ascent on the log joint probability to get mu
T, D = self.T, self.D_latent
scale = self.T * self.D_emission
obj = lambda xflat: -self.log_joint(xflat.reshape((T, D))) / scale
jac = lambda xflat: -self.gradient_log_joint(xflat.reshape((T, D))).ravel() / scale
hvp = lambda xflat, v: -self.hessian_vector_product_log_joint(
xflat.reshape((T, D)), v.reshape((T, D))).ravel() / scale
x0 = self.gaussian_states.reshape((T * D,))
# Make callback
itr = [0]
def cbk(x):
print("Iteration: ", itr[0],
"\tObjective: ", obj(x).round(2),
"\tAvg Grad: ", jac(x).mean().round(2))
itr[0] += 1
# Second order method
if verbose:
print("Fitting Laplace approximation")
res = minimize(obj, x0,
tol=tol,
method="Newton-CG",
jac=jac,
hessp=hvp,
callback=cbk if verbose else None)
assert res.success
mu = res.x
assert np.all(np.isfinite(mu))
if verbose: print("Done")
# Unflatten and compute the expected sufficient statistics
return mu.reshape((T, D))
def log_likelihood(self):
if self._normalizer is None:
self.E_step()
return self._normalizer
def E_step(self, verbose=False):
self.gaussian_states = self.laplace_approximation(verbose=verbose)
# Compute normalizer and covariances with E step
T, D = self.T, self.D_latent
H_diag, H_upper_diag = self.sparse_hessian_log_joint(self.gaussian_states)
J_init = J_11 = J_22 = np.zeros((D, D))
h_init = h_1 = h_2 = np.zeros((D,))
# Negate the Hessian since precision is -H
J_21 = np.swapaxes(-H_upper_diag, -1, -2)
J_node = -H_diag
h_node = np.zeros((T, D))
logZ, _, self.smoothed_sigmas, E_xtp1_xtT = \
info_E_step(J_init, h_init, 0,
J_11, J_21, J_22, h_1, h_2, np.zeros((T - 1)),
J_node, h_node, np.zeros(T))
# Laplace approximation -- normalizer is the joint times
# the normalizer from the Gaussian approx.
self._normalizer = self.log_joint(self.gaussian_states) + logZ
self._set_expected_stats(self.gaussian_states, self.smoothed_sigmas, E_xtp1_xtT)
def _set_expected_stats(self, mu, sigmas, E_xtp1_xtT):
# Get the emission stats
p, n, d, T, inputs, y = \
self.D_emission, self.D_latent, self.D_input, self.T, \
self.inputs, self.data
E_x_xT = sigmas + mu[:, :, None] * mu[:, None, :]
E_x_uT = mu[:, :, None] * self.inputs[:, None, :]
E_u_uT = self.inputs[:, :, None] * self.inputs[:, None, :]
E_xu_xuT = np.concatenate((
np.concatenate((E_x_xT, E_x_uT), axis=2),
np.concatenate((np.transpose(E_x_uT, (0, 2, 1)), E_u_uT), axis=2)),
axis=1)
E_xut_xutT = E_xu_xuT[:-1].sum(0)
E_xtp1_xtp1T = E_x_xT[1:].sum(0)
E_xtp1_xtT = E_xtp1_xtT.sum(0)
E_xtp1_utT = (mu[1:, :, None] * inputs[:-1, None, :]).sum(0)
E_xtp1_xutT = np.hstack((E_xtp1_xtT, E_xtp1_utT))
self.E_dynamics_stats = np.array(
[E_xtp1_xtp1T, E_xtp1_xutT, E_xut_xutT, self.T - 1])
# Compute the expectations for the observations
E_yxT = np.sum(y[:, :, None] * mu[:, None, :], axis=0)
E_yuT = y.T.dot(inputs)
E_yxuT = np.hstack((E_yxT, E_yuT))
self.E_emission_stats = np.array([E_yxuT, mu, sigmas, inputs, np.ones_like(y, dtype=bool)])
def smooth(self):
return self.emission_distn.predict(np.hstack((self.gaussian_states, self.inputs)))
|
mattjj/pylds | pylds/laplace.py | LaplaceApproxPoissonLDSStates.grad_local_log_likelihood | python | def grad_local_log_likelihood(self, x):
# Observation likelihoods
lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T))
return (self.data - lmbda).dot(self.C) | d/dx y^T Cx + y^T d - exp(Cx+d)
= y^T C - exp(Cx+d)^T C
= (y - lmbda)^T C | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L349-L357 | null | class LaplaceApproxPoissonLDSStates(_LaplaceApproxLDSStatesBase):
"""
Poisson observations
"""
def local_log_likelihood(self, xt, yt, ut):
# Observation likelihoods
C, D = self.C, self.D
loglmbda = np.dot(C, xt) + np.dot(D, ut)
lmbda = np.exp(loglmbda)
ll = np.sum(yt * loglmbda)
ll -= np.sum(lmbda)
ll -= np.sum(gammaln(yt + 1))
return ll
# Override likelihood, gradient, and hessian with vectorized forms
def log_conditional_likelihood(self, x):
# Observation likelihoods
C, D = self.C, self.D
loglmbda = np.dot(x, C.T) + np.dot(self.inputs, D.T)
lmbda = np.exp(loglmbda)
ll = np.sum(self.data * loglmbda)
ll -= np.sum(lmbda)
ll -= np.sum(gammaln(self.data + 1))
return ll
def hessian_local_log_likelihood(self, x):
"""
d/dx (y - lmbda)^T C = d/dx -exp(Cx + d)^T C
= -C^T exp(Cx + d)^T C
"""
# Observation likelihoods
lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T))
return np.einsum('tn, ni, nj ->tij', -lmbda, self.C, self.C)
# Test hooks
def test_joint_probability(self, x):
# A differentiable function to compute the joint probability for a given
# latent state sequence
import autograd.numpy as anp
T = self.T
ll = 0
# Initial likelihood
mu_init, sigma_init = self.mu_init, self.sigma_init
ll += -0.5 * anp.dot(x[0] - mu_init, anp.linalg.solve(sigma_init, x[0] - mu_init))
# Transition likelihoods
A, B, Q = self.A, self.B, self.sigma_states
xpred = anp.dot(x[:T-1], A.T) + anp.dot(self.inputs[:T-1], B.T)
dx = x[1:] - xpred
ll += -0.5 * (dx.T * anp.linalg.solve(Q, dx.T)).sum()
# Observation likelihoods
y = self.data
C, D = self.C, self.D
loglmbda = (anp.dot(x, C.T) + anp.dot(self.inputs, D.T))
lmbda = anp.exp(loglmbda)
ll += anp.sum(y * loglmbda)
ll -= anp.sum(lmbda)
if anp.isnan(ll):
ll = -anp.inf
return ll
def test_gradient_log_joint(self, x):
return grad(self.test_joint_probability)(x)
def test_hessian_log_joint(self, x):
return hessian(self.test_joint_probability)(x)
|
mattjj/pylds | pylds/laplace.py | LaplaceApproxPoissonLDSStates.hessian_local_log_likelihood | python | def hessian_local_log_likelihood(self, x):
# Observation likelihoods
lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T))
return np.einsum('tn, ni, nj ->tij', -lmbda, self.C, self.C) | d/dx (y - lmbda)^T C = d/dx -exp(Cx + d)^T C
= -C^T exp(Cx + d)^T C | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L359-L366 | null | class LaplaceApproxPoissonLDSStates(_LaplaceApproxLDSStatesBase):
"""
Poisson observations
"""
def local_log_likelihood(self, xt, yt, ut):
# Observation likelihoods
C, D = self.C, self.D
loglmbda = np.dot(C, xt) + np.dot(D, ut)
lmbda = np.exp(loglmbda)
ll = np.sum(yt * loglmbda)
ll -= np.sum(lmbda)
ll -= np.sum(gammaln(yt + 1))
return ll
# Override likelihood, gradient, and hessian with vectorized forms
def log_conditional_likelihood(self, x):
# Observation likelihoods
C, D = self.C, self.D
loglmbda = np.dot(x, C.T) + np.dot(self.inputs, D.T)
lmbda = np.exp(loglmbda)
ll = np.sum(self.data * loglmbda)
ll -= np.sum(lmbda)
ll -= np.sum(gammaln(self.data + 1))
return ll
def grad_local_log_likelihood(self, x):
"""
d/dx y^T Cx + y^T d - exp(Cx+d)
= y^T C - exp(Cx+d)^T C
= (y - lmbda)^T C
"""
# Observation likelihoods
lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T))
return (self.data - lmbda).dot(self.C)
# Test hooks
def test_joint_probability(self, x):
# A differentiable function to compute the joint probability for a given
# latent state sequence
import autograd.numpy as anp
T = self.T
ll = 0
# Initial likelihood
mu_init, sigma_init = self.mu_init, self.sigma_init
ll += -0.5 * anp.dot(x[0] - mu_init, anp.linalg.solve(sigma_init, x[0] - mu_init))
# Transition likelihoods
A, B, Q = self.A, self.B, self.sigma_states
xpred = anp.dot(x[:T-1], A.T) + anp.dot(self.inputs[:T-1], B.T)
dx = x[1:] - xpred
ll += -0.5 * (dx.T * anp.linalg.solve(Q, dx.T)).sum()
# Observation likelihoods
y = self.data
C, D = self.C, self.D
loglmbda = (anp.dot(x, C.T) + anp.dot(self.inputs, D.T))
lmbda = anp.exp(loglmbda)
ll += anp.sum(y * loglmbda)
ll -= anp.sum(lmbda)
if anp.isnan(ll):
ll = -anp.inf
return ll
def test_gradient_log_joint(self, x):
return grad(self.test_joint_probability)(x)
def test_hessian_log_joint(self, x):
return hessian(self.test_joint_probability)(x)
|
mattjj/pylds | pylds/laplace.py | LaplaceApproxBernoulliLDSStates.grad_local_log_likelihood | python | def grad_local_log_likelihood(self, x):
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
p = 1. / (1 + np.exp(-psi))
return (y - p).dot(C) | d/d \psi y \psi - log (1 + exp(\psi))
= y - exp(\psi) / (1 + exp(\psi))
= y - sigma(psi)
= y - p
d \psi / dx = C
d / dx = (y - sigma(psi)) * C | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L458-L472 | null | class LaplaceApproxBernoulliLDSStates(_LaplaceApproxLDSStatesBase):
"""
Bernoulli observations with Laplace approximation
Let \psi_t = C x_t + D u_t
p(y_t = 1 | x_t) = \sigma(\psi_t)
log p(y_t | x_t) = y_t \log \sigma(\psi_t) +
(1-y_t) \log \sigma(-\psi_t) +
= y_t \psi_t - log (1 + exp(\psi_t))
use the log-sum-exp trick to compute this:
= y_t \psi_t - log {exp(0) + exp(\psi_t)}
= y_t \psi_t - log {m [exp(0 - log m) + exp(\psi_t - log m)]}
= y_t \psi_t - log m - log {exp(-log m) + exp(\psi_t - log m)}
set log m = max(0, psi)
"""
def local_log_likelihood(self, xt, yt, ut):
# Observation likelihoods
C, D = self.C, self.D
psi = C.dot(xt) + D.dot(ut)
ll = np.sum(yt * psi)
# Compute second term with log-sum-exp trick (see above)
logm = np.maximum(0, psi)
ll -= np.sum(logm)
ll -= np.sum(np.log(np.exp(-logm) + np.exp(psi - logm)))
return ll
# Override likelihood, gradient, and hessian with vectorized forms
def log_conditional_likelihood(self, x):
# Observation likelihoods
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
# First term is linear in psi
ll = np.sum(y * psi)
# Compute second term with log-sum-exp trick (see above)
logm = np.maximum(0, psi)
ll -= np.sum(logm)
ll -= np.sum(np.log(np.exp(-logm) + np.exp(psi - logm)))
return ll
def hessian_local_log_likelihood(self, x):
"""
d/dx (y - p) * C
= -dpsi/dx (dp/d\psi) C
= -C p (1-p) C
"""
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
p = 1. / (1 + np.exp(-psi))
dp_dpsi = p * (1 - p)
return np.einsum('tn, ni, nj ->tij', -dp_dpsi, self.C, self.C)
# Test hooks
def test_joint_probability(self, x):
# A differentiable function to compute the joint probability for a given
# latent state sequence
import autograd.numpy as anp
T = self.T
ll = 0
# Initial likelihood
mu_init, sigma_init = self.mu_init, self.sigma_init
ll += -0.5 * anp.dot(x[0] - mu_init, anp.linalg.solve(sigma_init, x[0] - mu_init))
# Transition likelihoods
A, B, Q = self.A, self.B, self.sigma_states
xpred = anp.dot(x[:T-1], A.T) + anp.dot(self.inputs[:T-1], B.T)
dx = x[1:] - xpred
ll += -0.5 * (dx.T * anp.linalg.solve(Q, dx.T)).sum()
# Observation likelihoods
y = self.data
C, D = self.C, self.D
psi = (anp.dot(x, C.T) + anp.dot(self.inputs, D.T))
ll += anp.sum(y * psi)
ll -= anp.sum(np.log(1 + np.exp(psi)))
if anp.isnan(ll):
ll = -anp.inf
return ll
def test_gradient_log_joint(self, x):
return grad(self.test_joint_probability)(x)
def test_hessian_log_joint(self, x):
return hessian(self.test_joint_probability)(x)
|
mattjj/pylds | pylds/laplace.py | LaplaceApproxBernoulliLDSStates.hessian_local_log_likelihood | python | def hessian_local_log_likelihood(self, x):
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
p = 1. / (1 + np.exp(-psi))
dp_dpsi = p * (1 - p)
return np.einsum('tn, ni, nj ->tij', -dp_dpsi, self.C, self.C) | d/dx (y - p) * C
= -dpsi/dx (dp/d\psi) C
= -C p (1-p) C | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L474-L484 | null | class LaplaceApproxBernoulliLDSStates(_LaplaceApproxLDSStatesBase):
"""
Bernoulli observations with Laplace approximation
Let \psi_t = C x_t + D u_t
p(y_t = 1 | x_t) = \sigma(\psi_t)
log p(y_t | x_t) = y_t \log \sigma(\psi_t) +
(1-y_t) \log \sigma(-\psi_t) +
= y_t \psi_t - log (1 + exp(\psi_t))
use the log-sum-exp trick to compute this:
= y_t \psi_t - log {exp(0) + exp(\psi_t)}
= y_t \psi_t - log {m [exp(0 - log m) + exp(\psi_t - log m)]}
= y_t \psi_t - log m - log {exp(-log m) + exp(\psi_t - log m)}
set log m = max(0, psi)
"""
def local_log_likelihood(self, xt, yt, ut):
# Observation likelihoods
C, D = self.C, self.D
psi = C.dot(xt) + D.dot(ut)
ll = np.sum(yt * psi)
# Compute second term with log-sum-exp trick (see above)
logm = np.maximum(0, psi)
ll -= np.sum(logm)
ll -= np.sum(np.log(np.exp(-logm) + np.exp(psi - logm)))
return ll
# Override likelihood, gradient, and hessian with vectorized forms
def log_conditional_likelihood(self, x):
# Observation likelihoods
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
# First term is linear in psi
ll = np.sum(y * psi)
# Compute second term with log-sum-exp trick (see above)
logm = np.maximum(0, psi)
ll -= np.sum(logm)
ll -= np.sum(np.log(np.exp(-logm) + np.exp(psi - logm)))
return ll
def grad_local_log_likelihood(self, x):
"""
d/d \psi y \psi - log (1 + exp(\psi))
= y - exp(\psi) / (1 + exp(\psi))
= y - sigma(psi)
= y - p
d \psi / dx = C
d / dx = (y - sigma(psi)) * C
"""
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
p = 1. / (1 + np.exp(-psi))
return (y - p).dot(C)
# Test hooks
def test_joint_probability(self, x):
# A differentiable function to compute the joint probability for a given
# latent state sequence
import autograd.numpy as anp
T = self.T
ll = 0
# Initial likelihood
mu_init, sigma_init = self.mu_init, self.sigma_init
ll += -0.5 * anp.dot(x[0] - mu_init, anp.linalg.solve(sigma_init, x[0] - mu_init))
# Transition likelihoods
A, B, Q = self.A, self.B, self.sigma_states
xpred = anp.dot(x[:T-1], A.T) + anp.dot(self.inputs[:T-1], B.T)
dx = x[1:] - xpred
ll += -0.5 * (dx.T * anp.linalg.solve(Q, dx.T)).sum()
# Observation likelihoods
y = self.data
C, D = self.C, self.D
psi = (anp.dot(x, C.T) + anp.dot(self.inputs, D.T))
ll += anp.sum(y * psi)
ll -= anp.sum(np.log(1 + np.exp(psi)))
if anp.isnan(ll):
ll = -anp.inf
return ll
def test_gradient_log_joint(self, x):
return grad(self.test_joint_probability)(x)
def test_hessian_log_joint(self, x):
return hessian(self.test_joint_probability)(x)
|
mattjj/pylds | pylds/models.py | ZeroInflatedCountLDS.resample_emission_distn | python | def resample_emission_distn(self):
masked_datas = [s.masked_data.tocsc() for s in self.states_list]
xs = [np.hstack((s.gaussian_states, s.inputs))for s in self.states_list]
for n in range(self.D_obs):
# Get the nonzero values of the nth column
rowns = [md.indices[md.indptr[n]:md.indptr[n+1]] for md in masked_datas]
xns = [x[r] for x,r in zip(xs, rowns)]
yns = [s.masked_data.getcol(n).data for s in self.states_list]
maskns = [np.ones_like(y, dtype=bool) for y in yns]
omegans = [s.omega.getcol(n).data for s in self.states_list]
self.emission_distn._resample_row_of_emission_matrix(n, xns, yns, maskns, omegans) | Now for the expensive part... the data is stored in a sparse row
format, which is good for updating the latent states (since we
primarily rely on dot products with the data, which can be
efficiently performed for CSR matrices).
However, in order to update the n-th row of the emission matrix,
we need to know which counts are observed in the n-th column of data.
This involves converting the data to a sparse column format, which
can require (time) intensive re-indexing. | train | https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/models.py#L434-L456 | null | class ZeroInflatedCountLDS(_LDSGibbsSampling, _LDSBase):
_states_class = LDSStatesZeroInflatedCountData
def __init__(self, rho, *args, **kwargs):
"""
:param rho: Probability of count drawn from model
With pr 1-rho, the emission is deterministically zero
"""
super(ZeroInflatedCountLDS, self).__init__(*args, **kwargs)
self.rho = rho
def add_data(self,data, inputs=None, mask=None, **kwargs):
self.states_list.append(self._states_class(model=self, data=data, inputs=inputs, mask=mask, **kwargs))
return self
def _generate_obs(self,s, inputs):
if s.data is None:
# TODO: Do this sparsely
inputs = np.zeros((s.T, 0)) if inputs is None else inputs
data = self.emission_distn.rvs(
x=np.hstack((s.gaussian_states, inputs)), return_xy=False)
# Zero out data
zeros = np.random.rand(s.T, self.D_obs) > self.rho
data[zeros] = 0
from scipy.sparse import csr_matrix
s.data = csr_matrix(data)
else:
# filling in missing data
raise NotImplementedError
return s.data
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/fastadir/fastadir.py | FastaDir.fetch | python | def fetch(self, seq_id, start=None, end=None):
rec = self._db.execute("""select * from seqinfo where seq_id = ? order by added desc""", [seq_id]).fetchone()
if rec is None:
raise KeyError(seq_id)
if self._writing and self._writing["relpath"] == rec["relpath"]:
logger.warning("""Fetching from file opened for writing;
closing first ({})""".format(rec["relpath"]))
self.commit()
path = os.path.join(self._root_dir, rec["relpath"])
fabgz = self._open_for_reading(path)
return fabgz.fetch(seq_id, start, end) | fetch sequence by seq_id, optionally with start, end bounds | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/fastadir/fastadir.py#L102-L118 | [
"def commit(self):\n if self._writing is not None:\n self._writing[\"fabgz\"].close()\n self._db.commit()\n self._writing = None\n"
] | class FastaDir(BaseReader, BaseWriter):
"""This class provides simple a simple key-value interface to a
directory of compressed fasta files.
Sequences are stored in dated fasta files. Dating the files
enables compact storage with multiple releases (using hard links)
and efficient incremental updates and transfers (e.g., via rsync).
The fasta files are compressed with block gzip, enabling fast
random access to arbitrary regions of even large (chromosome-size)
sequences (thanks to pysam.FastaFile).
When the key is a hash based on sequence (e.g., SHA512), the
combination provides a convenient non-redundant storage of
sequences, with fast access to sequences and sequence slices,
compact storage and easy replication.
The two primary methods are:
* seq_id <- store(seq, seq_id): store a sequence
* seq <- fetch(seq_id, [s, e]): return sequence (slice)
"""
def __init__(self, root_dir, writeable=False, check_same_thread=True):
"""Creates a new sequence repository if necessary, and then opens it"""
self._root_dir = root_dir
self._db_path = os.path.join(self._root_dir, "db.sqlite3")
self._writing = None
self._db = None
self._writeable = writeable
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
self._upgrade_db()
self._db = sqlite3.connect(self._db_path, check_same_thread=check_same_thread)
schema_version = self.schema_version()
self._db.row_factory = sqlite3.Row
# if we're not at the expected schema version for this code, bail
if schema_version != expected_schema_version:
raise RuntimeError("""Upgrade required: Database schema
version is {} and code expects {}""".format(schema_version, expected_schema_version))
# ############################################################################
# Special methods
def __contains__(self, seq_id):
c = self._db.execute("select exists(select 1 from seqinfo where seq_id = ? limit 1) as ex", [seq_id]).fetchone()
return True if c["ex"] else False
def __iter__(self):
sql = "select * from seqinfo order by seq_id"
for rec in self._db.execute(sql):
recd = dict(rec)
recd["seq"] = self.fetch(rec["seq_id"])
yield recd
def __len__(self):
return self.stats()["n_sequences"]
# ############################################################################
# Public methods
def commit(self):
if self._writing is not None:
self._writing["fabgz"].close()
self._db.commit()
self._writing = None
def schema_version(self):
"""return schema version as integer"""
try:
return int(
self._db.execute("""select value from meta
where key = 'schema version'""").fetchone()[0])
except sqlite3.OperationalError:
return None
def stats(self):
sql = """select count(distinct seq_id) n_sequences, sum(len) tot_length,
min(added) min_ts, max(added) as max_ts, count(distinct relpath) as
n_files from seqinfo"""
return dict(self._db.execute(sql).fetchone())
def store(self, seq_id, seq):
"""store a sequence with key seq_id. The sequence itself is stored in
a fasta file and a reference to it in the sqlite3 database.
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
# open a file for writing if necessary
# path: <root_dir>/<reldir>/<basename>
# <---- relpath ---->
# <------ dir_ ----->
# <----------- path ----------->
if self._writing is None:
reldir = datetime.datetime.utcnow().strftime("%Y/%m%d/%H%M")
basename = str(time.time()) + ".fa.bgz"
relpath = os.path.join(reldir, basename)
dir_ = os.path.join(self._root_dir, reldir)
path = os.path.join(self._root_dir, reldir, basename)
makedirs(dir_, exist_ok=True)
fabgz = FabgzWriter(path)
self._writing = {"relpath": relpath, "fabgz": fabgz}
logger.info("Opened for writing: " + path)
self._writing["fabgz"].store(seq_id, seq)
alpha = "".join(sorted(set(seq)))
self._db.execute("""insert into seqinfo (seq_id, len, alpha, relpath)
values (?, ?, ?,?)""", (seq_id, len(seq), alpha, self._writing["relpath"]))
return seq_id
# ############################################################################
# Internal methods
def _upgrade_db(self):
"""upgrade db using scripts for specified (current) schema version"""
migration_path = "_data/migrations"
sqlite3.connect(self._db_path).close() # ensure that it exists
db_url = "sqlite:///" + self._db_path
backend = yoyo.get_backend(db_url)
migration_dir = pkg_resources.resource_filename(__package__, migration_path)
migrations = yoyo.read_migrations(migration_dir)
migrations_to_apply = backend.to_apply(migrations)
backend.apply_migrations(migrations_to_apply)
@lru_cache()
def _open_for_reading(self, path):
logger.info("Opening for reading: " + path)
return FabgzReader(path)
def _dump_aliases(self):
import prettytable
fields = "seq_id len alpha added relpath".split()
pt = prettytable.PrettyTable(field_names=fields)
for r in self._db.execute("select * from seqinfo"):
pt.add_row([r[f] for f in fields])
print(pt)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/fastadir/fastadir.py | FastaDir.store | python | def store(self, seq_id, seq):
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
# open a file for writing if necessary
# path: <root_dir>/<reldir>/<basename>
# <---- relpath ---->
# <------ dir_ ----->
# <----------- path ----------->
if self._writing is None:
reldir = datetime.datetime.utcnow().strftime("%Y/%m%d/%H%M")
basename = str(time.time()) + ".fa.bgz"
relpath = os.path.join(reldir, basename)
dir_ = os.path.join(self._root_dir, reldir)
path = os.path.join(self._root_dir, reldir, basename)
makedirs(dir_, exist_ok=True)
fabgz = FabgzWriter(path)
self._writing = {"relpath": relpath, "fabgz": fabgz}
logger.info("Opened for writing: " + path)
self._writing["fabgz"].store(seq_id, seq)
alpha = "".join(sorted(set(seq)))
self._db.execute("""insert into seqinfo (seq_id, len, alpha, relpath)
values (?, ?, ?,?)""", (seq_id, len(seq), alpha, self._writing["relpath"]))
return seq_id | store a sequence with key seq_id. The sequence itself is stored in
a fasta file and a reference to it in the sqlite3 database. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/fastadir/fastadir.py#L135-L165 | null | class FastaDir(BaseReader, BaseWriter):
"""This class provides simple a simple key-value interface to a
directory of compressed fasta files.
Sequences are stored in dated fasta files. Dating the files
enables compact storage with multiple releases (using hard links)
and efficient incremental updates and transfers (e.g., via rsync).
The fasta files are compressed with block gzip, enabling fast
random access to arbitrary regions of even large (chromosome-size)
sequences (thanks to pysam.FastaFile).
When the key is a hash based on sequence (e.g., SHA512), the
combination provides a convenient non-redundant storage of
sequences, with fast access to sequences and sequence slices,
compact storage and easy replication.
The two primary methods are:
* seq_id <- store(seq, seq_id): store a sequence
* seq <- fetch(seq_id, [s, e]): return sequence (slice)
"""
def __init__(self, root_dir, writeable=False, check_same_thread=True):
"""Creates a new sequence repository if necessary, and then opens it"""
self._root_dir = root_dir
self._db_path = os.path.join(self._root_dir, "db.sqlite3")
self._writing = None
self._db = None
self._writeable = writeable
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
self._upgrade_db()
self._db = sqlite3.connect(self._db_path, check_same_thread=check_same_thread)
schema_version = self.schema_version()
self._db.row_factory = sqlite3.Row
# if we're not at the expected schema version for this code, bail
if schema_version != expected_schema_version:
raise RuntimeError("""Upgrade required: Database schema
version is {} and code expects {}""".format(schema_version, expected_schema_version))
# ############################################################################
# Special methods
def __contains__(self, seq_id):
c = self._db.execute("select exists(select 1 from seqinfo where seq_id = ? limit 1) as ex", [seq_id]).fetchone()
return True if c["ex"] else False
def __iter__(self):
sql = "select * from seqinfo order by seq_id"
for rec in self._db.execute(sql):
recd = dict(rec)
recd["seq"] = self.fetch(rec["seq_id"])
yield recd
def __len__(self):
return self.stats()["n_sequences"]
# ############################################################################
# Public methods
def commit(self):
if self._writing is not None:
self._writing["fabgz"].close()
self._db.commit()
self._writing = None
def fetch(self, seq_id, start=None, end=None):
"""fetch sequence by seq_id, optionally with start, end bounds
"""
rec = self._db.execute("""select * from seqinfo where seq_id = ? order by added desc""", [seq_id]).fetchone()
if rec is None:
raise KeyError(seq_id)
if self._writing and self._writing["relpath"] == rec["relpath"]:
logger.warning("""Fetching from file opened for writing;
closing first ({})""".format(rec["relpath"]))
self.commit()
path = os.path.join(self._root_dir, rec["relpath"])
fabgz = self._open_for_reading(path)
return fabgz.fetch(seq_id, start, end)
def schema_version(self):
"""return schema version as integer"""
try:
return int(
self._db.execute("""select value from meta
where key = 'schema version'""").fetchone()[0])
except sqlite3.OperationalError:
return None
def stats(self):
sql = """select count(distinct seq_id) n_sequences, sum(len) tot_length,
min(added) min_ts, max(added) as max_ts, count(distinct relpath) as
n_files from seqinfo"""
return dict(self._db.execute(sql).fetchone())
# ############################################################################
# Internal methods
def _upgrade_db(self):
"""upgrade db using scripts for specified (current) schema version"""
migration_path = "_data/migrations"
sqlite3.connect(self._db_path).close() # ensure that it exists
db_url = "sqlite:///" + self._db_path
backend = yoyo.get_backend(db_url)
migration_dir = pkg_resources.resource_filename(__package__, migration_path)
migrations = yoyo.read_migrations(migration_dir)
migrations_to_apply = backend.to_apply(migrations)
backend.apply_migrations(migrations_to_apply)
@lru_cache()
def _open_for_reading(self, path):
logger.info("Opening for reading: " + path)
return FabgzReader(path)
def _dump_aliases(self):
import prettytable
fields = "seq_id len alpha added relpath".split()
pt = prettytable.PrettyTable(field_names=fields)
for r in self._db.execute("select * from seqinfo"):
pt.add_row([r[f] for f in fields])
print(pt)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/py2compat/_makedirs.py | makedirs | python | def makedirs(name, mode=0o777, exist_ok=False):
if os.path.exists(name):
if not exist_ok:
raise FileExistsError("File exists: " + name)
else:
os.makedirs(name, mode) | cheapo replacement for py3 makedirs with support for exist_ok | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/py2compat/_makedirs.py#L10-L19 | null | import os
import six
class FileExistsError(OSError):
pass
|
biocommons/biocommons.seqrepo | misc/threading-verification.py | fetch_in_thread | python | def fetch_in_thread(sr, nsa):
def fetch_seq(q, nsa):
pid, ppid = os.getpid(), os.getppid()
q.put((pid, ppid, sr[nsa]))
q = Queue()
p = Process(target=fetch_seq, args=(q, nsa))
p.start()
pid, ppid, seq = q.get()
p.join()
assert pid != ppid, "sequence was not fetched from thread"
return pid, ppid, seq | fetch a sequence in a thread | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/misc/threading-verification.py#L32-L48 | null | # See https://github.com/biocommons/biocommons.seqrepo/issues/11
# Issue: seqrepo raises exceptions in (some) multithreaded environments
# Here's my understanding: sqlite support for multthreading (Python or
# otherwise) varies by version, platform, and compilation flags. It
# is always safe to create separate readers after spawning threads.
# Alternatively, depending on sqlite library support, it *may* be
# possible to share an instances across threads by allocating before
# spawning threads. Furthermore, some versions of sqlite issue
# warnings if the library is used in a threaded environment without
# check_same_thread=False, but that this check is advisory only (i.e.,
# it doesn't change library behavior).
# For Reece:
# sys.platform: linux2
# sys.version: 2.7.13 (default, Jan 19 2017, 14:48:08) [GCC 6.3.0 20170118]
# sqlite3.sqlite_version: 3.16.2
# pid 9659 created SeqRepo(root_dir=/tmp/sr, writeable=False)
# (9660, 9659, 'SMELLASSWEET')
# pid 9659 created SeqRepo(root_dir=/tmp/sr, writeable=True)
# (9662, 9659, 'SMELLASSWEET')
import os
from multiprocessing import Process, Queue
import sqlite3
import sys
from biocommons.seqrepo import SeqRepo
def fetch_in_thread(sr, nsa):
"""fetch a sequence in a thread
"""
def fetch_seq(q, nsa):
pid, ppid = os.getpid(), os.getppid()
q.put((pid, ppid, sr[nsa]))
q = Queue()
p = Process(target=fetch_seq, args=(q, nsa))
p.start()
pid, ppid, seq = q.get()
p.join()
assert pid != ppid, "sequence was not fetched from thread"
return pid, ppid, seq
def make_seqrepo(writeable):
sr = SeqRepo("/tmp/sr", writeable=True)
sr.store("SMELLASSWEET", [{"namespace": "en", "alias": "rose"}, {"namespace": "fr", "alias": "rose"}])
if writeable is False:
del sr
sr = SeqRepo("/tmp/sr", writeable=writeable)
print("pid {pid} created {sr}".format(pid=os.getpid(), sr=sr))
return sr
if __name__ == "__main__":
nsa = "en:rose"
def _test(sr):
r = fetch_in_thread(sr, nsa)
print(r)
print("sys.platform: " + sys.platform)
print("sys.version: " + sys.version.replace("\n", " "))
print("sqlite3.sqlite_version: " + sqlite3.sqlite_version)
_test(make_seqrepo(writeable=False))
_test(make_seqrepo(writeable=True))
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/fastaiter/fastaiter.py | FastaIter | python | def FastaIter(handle):
header = None
for line in handle:
if line.startswith(">"):
if header is not None: # not the first record
yield header, "".join(seq_lines)
seq_lines = list()
header = line[1:].rstrip()
else:
if header is not None: # not the first record
seq_lines.append(line.strip())
if header is not None:
yield header, "".join(seq_lines)
else: # no FASTA records in file
return | generator that returns (header, sequence) tuples from an open FASTA file handle
Lines before the start of the first record are ignored. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/fastaiter/fastaiter.py#L1-L21 | null | |
biocommons/biocommons.seqrepo | biocommons/seqrepo/py2compat/_which.py | which | python | def which(file):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None | >>> which("sh") is not None
True
>>> which("bogus-executable-that-doesn't-exist") is None
True | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/py2compat/_which.py#L3-L15 | null | import os
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/fastadir/fabgz.py | _get_bgzip_version | python | def _get_bgzip_version(exe):
p = subprocess.Popen([exe, "-h"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output = p.communicate()
version_line = output[0].splitlines()[1]
version = re.match(r"(?:Version:|bgzip \(htslib\))\s+(\d+\.\d+(\.\d+)?)", version_line).group(1)
return version | return bgzip version as string | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/fastadir/fabgz.py#L32-L38 | null | """classes to read and write block gzip fasta files
A file may not currently be opened for reading and writing at the same time
Files must be named as .fa.bgz to be recognized as blocked gzip compressed
"""
from __future__ import unicode_literals
import io
import logging
import os
import re
import stat
import subprocess
import six
from pysam import FastaFile
from ..py2compat import which
logger = logging.getLogger(__name__)
line_width = 100
min_bgzip_version_info = (1, 2, 1)
def _find_bgzip():
"""return path to bgzip if found and meets version requirements, else exception"""
missing_file_exception = OSError if six.PY2 else FileNotFoundError
min_bgzip_version = ".".join(map(str, min_bgzip_version_info))
exe = os.environ.get("SEQREPO_BGZIP_PATH", which("bgzip") or "/usr/bin/bgzip")
try:
bgzip_version = _get_bgzip_version(exe)
except AttributeError:
raise RuntimeError("Didn't find version string in bgzip executable ({exe})".format(exe=exe))
except missing_file_exception:
raise RuntimeError("{exe} doesn't exist; you need to install htslib (See https://github.com/biocommons/biocommons.seqrepo#requirements)".format(exe=exe))
except Exception:
raise RuntimeError("Unknown error while executing {exe}".format(exe=exe))
bgzip_version_info = tuple(map(int, bgzip_version.split(".")))
if bgzip_version_info < min_bgzip_version_info:
raise RuntimeError("bgzip ({exe}) {ev} is too old; >= {rv} is required; please upgrade".format(
exe=exe, ev=bgzip_version, rv=min_bgzip_version))
logger.info("Using bgzip {ev} ({exe})".format(ev=bgzip_version, exe=exe))
return exe
class FabgzReader(object):
def __init__(self, filename):
self._fh = FastaFile(filename)
def fetch(self, seq_id, start=None, end=None):
return self._fh.fetch(seq_id.encode("ascii"), start, end)
def keys(self):
return self._fh.references
def __len__(self):
return self._fh.nreferences
def __getitem__(self, ac):
return self.fetch(ac)
@property
def filename(self):
return self._fh.filename
class FabgzWriter(object):
# TODO: Use temp filename until indexes are built and perms are set, then rename
def __init__(self, filename):
super(FabgzWriter, self).__init__()
self.filename = filename
self._fh = None
self._basepath, suffix = os.path.splitext(self.filename)
if suffix != ".bgz":
raise RuntimeError("Path must end with .bgz")
self._bgzip_exe = _find_bgzip()
files = [self.filename, self.filename + ".fai", self.filename + ".gzi", self._basepath]
if any(os.path.exists(fn) for fn in files):
raise RuntimeError("One or more target files already exists ({})".format(", ".join(files)))
self._fh = io.open(self._basepath, encoding="ascii", mode="w")
logger.debug("opened " + self.filename + " for writing")
self._added = set()
def store(self, seq_id, seq):
def wrap_lines(seq, line_width):
for i in range(0, len(seq), line_width):
yield seq[i:i + line_width]
if seq_id not in self._added:
self._fh.write(">" + seq_id + "\n")
for l in wrap_lines(seq, line_width):
self._fh.write(l + "\n")
self._added.add(seq_id)
logger.debug("added seq_id {i}; length {l}".format(i=seq_id, l=len(seq)))
return seq_id
def close(self):
if self._fh:
self._fh.close()
self._fh = None
subprocess.check_call([self._bgzip_exe, "--force", self._basepath])
os.rename(self._basepath + ".gz", self.filename)
# open file with FastaFile to create indexes, then make all read-only
_fh = FastaFile(self.filename)
_fh.close()
os.chmod(self.filename, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.chmod(self.filename + ".fai", stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.chmod(self.filename + ".gzi", stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
logger.info("{} written; added {} sequences".format(self.filename, len(self._added)))
def __del__(self):
if self._fh is not None:
logger.error("FabgzWriter({}) was not explicitly closed; data may be lost".format(self.filename))
self.close()
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/fastadir/fabgz.py | _find_bgzip | python | def _find_bgzip():
missing_file_exception = OSError if six.PY2 else FileNotFoundError
min_bgzip_version = ".".join(map(str, min_bgzip_version_info))
exe = os.environ.get("SEQREPO_BGZIP_PATH", which("bgzip") or "/usr/bin/bgzip")
try:
bgzip_version = _get_bgzip_version(exe)
except AttributeError:
raise RuntimeError("Didn't find version string in bgzip executable ({exe})".format(exe=exe))
except missing_file_exception:
raise RuntimeError("{exe} doesn't exist; you need to install htslib (See https://github.com/biocommons/biocommons.seqrepo#requirements)".format(exe=exe))
except Exception:
raise RuntimeError("Unknown error while executing {exe}".format(exe=exe))
bgzip_version_info = tuple(map(int, bgzip_version.split(".")))
if bgzip_version_info < min_bgzip_version_info:
raise RuntimeError("bgzip ({exe}) {ev} is too old; >= {rv} is required; please upgrade".format(
exe=exe, ev=bgzip_version, rv=min_bgzip_version))
logger.info("Using bgzip {ev} ({exe})".format(ev=bgzip_version, exe=exe))
return exe | return path to bgzip if found and meets version requirements, else exception | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/fastadir/fabgz.py#L41-L60 | [
"def which(file):\n \"\"\"\n >>> which(\"sh\") is not None\n True\n\n >>> which(\"bogus-executable-that-doesn't-exist\") is None\n True\n\n \"\"\"\n for path in os.environ[\"PATH\"].split(os.pathsep):\n if os.path.exists(os.path.join(path, file)):\n return os.path.join(path, file)\n return None\n",
"def _get_bgzip_version(exe):\n \"\"\"return bgzip version as string\"\"\"\n p = subprocess.Popen([exe, \"-h\"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n output = p.communicate()\n version_line = output[0].splitlines()[1]\n version = re.match(r\"(?:Version:|bgzip \\(htslib\\))\\s+(\\d+\\.\\d+(\\.\\d+)?)\", version_line).group(1)\n return version\n"
] | """classes to read and write block gzip fasta files
A file may not currently be opened for reading and writing at the same time
Files must be named as .fa.bgz to be recognized as blocked gzip compressed
"""
from __future__ import unicode_literals
import io
import logging
import os
import re
import stat
import subprocess
import six
from pysam import FastaFile
from ..py2compat import which
logger = logging.getLogger(__name__)
line_width = 100
min_bgzip_version_info = (1, 2, 1)
def _get_bgzip_version(exe):
"""return bgzip version as string"""
p = subprocess.Popen([exe, "-h"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output = p.communicate()
version_line = output[0].splitlines()[1]
version = re.match(r"(?:Version:|bgzip \(htslib\))\s+(\d+\.\d+(\.\d+)?)", version_line).group(1)
return version
class FabgzReader(object):
def __init__(self, filename):
self._fh = FastaFile(filename)
def fetch(self, seq_id, start=None, end=None):
return self._fh.fetch(seq_id.encode("ascii"), start, end)
def keys(self):
return self._fh.references
def __len__(self):
return self._fh.nreferences
def __getitem__(self, ac):
return self.fetch(ac)
@property
def filename(self):
return self._fh.filename
class FabgzWriter(object):
# TODO: Use temp filename until indexes are built and perms are set, then rename
def __init__(self, filename):
super(FabgzWriter, self).__init__()
self.filename = filename
self._fh = None
self._basepath, suffix = os.path.splitext(self.filename)
if suffix != ".bgz":
raise RuntimeError("Path must end with .bgz")
self._bgzip_exe = _find_bgzip()
files = [self.filename, self.filename + ".fai", self.filename + ".gzi", self._basepath]
if any(os.path.exists(fn) for fn in files):
raise RuntimeError("One or more target files already exists ({})".format(", ".join(files)))
self._fh = io.open(self._basepath, encoding="ascii", mode="w")
logger.debug("opened " + self.filename + " for writing")
self._added = set()
def store(self, seq_id, seq):
def wrap_lines(seq, line_width):
for i in range(0, len(seq), line_width):
yield seq[i:i + line_width]
if seq_id not in self._added:
self._fh.write(">" + seq_id + "\n")
for l in wrap_lines(seq, line_width):
self._fh.write(l + "\n")
self._added.add(seq_id)
logger.debug("added seq_id {i}; length {l}".format(i=seq_id, l=len(seq)))
return seq_id
def close(self):
if self._fh:
self._fh.close()
self._fh = None
subprocess.check_call([self._bgzip_exe, "--force", self._basepath])
os.rename(self._basepath + ".gz", self.filename)
# open file with FastaFile to create indexes, then make all read-only
_fh = FastaFile(self.filename)
_fh.close()
os.chmod(self.filename, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.chmod(self.filename + ".fai", stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.chmod(self.filename + ".gzi", stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
logger.info("{} written; added {} sequences".format(self.filename, len(self._added)))
def __del__(self):
if self._fh is not None:
logger.error("FabgzWriter({}) was not explicitly closed; data may be lost".format(self.filename))
self.close()
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqrepo.py | SeqRepo.fetch_uri | python | def fetch_uri(self, uri, start=None, end=None):
namespace, alias = uri_re.match(uri).groups()
return self.fetch(alias=alias, namespace=namespace, start=start, end=end) | fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqrepo.py#L102-L109 | [
"def fetch(self, alias, start=None, end=None, namespace=None):\n seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)\n return self.sequences.fetch(seq_id, start, end)\n"
] | class SeqRepo(object):
"""Implements a filesystem-backed non-redundant repository of
sequences and sequence aliases.
The current implementation uses block-gzip fasta files for
sequence storage, essentially as a transaction-based journal, and
a very simple sqlite database for aliases.
Updates add new sequence files and new aliases. This approach
means that distribution of updates involve incremental transfers
of sequences and a wholesale replacement of the database.
The pysam.FastaFile module is key here as it provides fasa index
to bgz files and fast sequence slicing.
"""
def __init__(self, root_dir, writeable=False, upcase=True, translate_ncbi_namespace=False, check_same_thread=False):
self._root_dir = root_dir
self._upcase = upcase
self._db_path = os.path.join(self._root_dir, "aliases.sqlite3")
self._seq_path = os.path.join(self._root_dir, "sequences")
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
self._check_same_thread = True if writeable else check_same_thread
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
if not os.path.exists(self._root_dir):
raise OSError("Unable to open SeqRepo directory {}".format(self._root_dir))
self.sequences = FastaDir(self._seq_path, writeable=self._writeable, check_same_thread=self._check_same_thread)
self.aliases = SeqAliasDB(self._db_path,
writeable=self._writeable,
translate_ncbi_namespace=self.translate_ncbi_namespace,
check_same_thread=self._check_same_thread)
def __contains__(self, nsa):
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.aliases.find_aliases(alias=a, namespace=ns).fetchone() is not None
def __getitem__(self, nsa):
# lookup aliases, optionally namespaced, like NM_01234.5 or NCBI:NM_01234.5
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.fetch(alias=a, namespace=ns)
def __iter__(self):
"""iterate over all sequences, yielding tuples of (sequence_record, [alias_records])
Both records are dicts.
"""
for srec in self.sequences:
arecs = self.aliases.fetch_aliases(srec["seq_id"])
yield (srec, arecs)
def __str__(self):
return "SeqRepo(root_dir={self._root_dir}, writeable={self._writeable})".format(self=self)
def commit(self):
self.sequences.commit()
self.aliases.commit()
if self._pending_sequences + self._pending_aliases > 0:
_logger.info("Committed {} sequences ({} residues) and {} aliases".format(
self._pending_sequences, self._pending_sequences_len, self._pending_aliases))
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
def fetch(self, alias, start=None, end=None, namespace=None):
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
return self.sequences.fetch(seq_id, start, end)
def store(self, seq, nsaliases):
"""nsaliases is a list of dicts, like:
[{"namespace": "en", "alias": "rose"},
{"namespace": "fr", "alias": "rose"},
{"namespace": "es", "alias": "rosa"}]
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
if self._upcase:
seq = seq.upper()
try:
seqhash = bioutils.digests.seq_seqhash(seq)
except Exception as e:
import pprint
_logger.critical("Exception raised for " + pprint.pformat(nsaliases))
raise
seq_id = seqhash
# add sequence if not present
n_seqs_added = n_aliases_added = 0
msg = "sh{nsa_sep}{seq_id:.10s}... ({l} residues; {na} aliases {aliases})".format(
seq_id=seq_id,
l=len(seq),
na=len(nsaliases),
nsa_sep=nsa_sep,
aliases=", ".join("{nsa[namespace]}:{nsa[alias]}".format(nsa=nsa) for nsa in nsaliases))
if seq_id not in self.sequences:
_logger.info("Storing " + msg)
if len(seq) > ct_n_residues: # pragma: no cover
_logger.debug("Precommit for large sequence")
self.commit()
self.sequences.store(seq_id, seq)
n_seqs_added += 1
self._pending_sequences += 1
self._pending_sequences_len += len(seq)
self._pending_aliases += self._update_digest_aliases(seq_id, seq)
else:
_logger.debug("Sequence exists: " + msg)
# add/update external aliases for new and existing sequences
# updating is optimized to load only new <seq_id,ns,alias> tuples
existing_aliases = self.aliases.fetch_aliases(seq_id)
ea_tuples = [(r["seq_id"], r["namespace"], r["alias"]) for r in existing_aliases]
new_tuples = [(seq_id, r["namespace"], r["alias"]) for r in nsaliases]
upd_tuples = set(new_tuples) - set(ea_tuples)
if upd_tuples:
_logger.info("{} new aliases for {}".format(len(upd_tuples), msg))
for _, namespace, alias in upd_tuples:
self.aliases.store_alias(seq_id=seq_id, namespace=namespace, alias=alias)
self._pending_aliases += len(upd_tuples)
n_aliases_added += len(upd_tuples)
if (self._pending_sequences > ct_n_seqs or self._pending_aliases > ct_n_aliases
or self._pending_sequences_len > ct_n_residues): # pragma: no cover
_logger.info("Hit commit thresholds ({self._pending_sequences} sequences, "
"{self._pending_aliases} aliases, {self._pending_sequences_len} residues)".format(self=self))
self.commit()
return n_seqs_added, n_aliases_added
def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):
"""given an alias and optional namespace, return a list of all other
aliases for same sequence
"""
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
aliases = self.aliases.fetch_aliases(seq_id=seq_id,
translate_ncbi_namespace=translate_ncbi_namespace)
if target_namespaces:
aliases = [a for a in aliases if a["namespace"] in target_namespaces]
return aliases
def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
"""Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence.
"""
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases]
############################################################################
# Internal Methods
def _get_unique_seqid(self, alias, namespace):
"""given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match.
"""
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
raise KeyError("Alias {} (namespace: {})".format(alias, namespace))
if len(seq_ids) > 1:
# This should only happen when namespace is None
raise KeyError("Alias {} (namespace: {}): not unique".format(alias, namespace))
return seq_ids.pop()
def _update_digest_aliases(self, seq_id, seq):
"""compute digest aliases for seq and update; returns number of digest
aliases (some of which may have already existed)
For the moment, sha512 is computed for seq_id separately from
the sha512 here. We should fix that.
"""
ir = bioutils.digests.seq_vmc_identifier(seq)
seq_aliases = [
{
"namespace": ir["namespace"],
"alias": ir["accession"],
},
{
"namespace": "SHA1",
"alias": bioutils.digests.seq_sha1(seq)
},
{
"namespace": "MD5",
"alias": bioutils.digests.seq_md5(seq)
},
{
"namespace": "SEGUID",
"alias": bioutils.digests.seq_seguid(seq)
},
]
for sa in seq_aliases:
self.aliases.store_alias(seq_id=seq_id, **sa)
return len(seq_aliases)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqrepo.py | SeqRepo.store | python | def store(self, seq, nsaliases):
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
if self._upcase:
seq = seq.upper()
try:
seqhash = bioutils.digests.seq_seqhash(seq)
except Exception as e:
import pprint
_logger.critical("Exception raised for " + pprint.pformat(nsaliases))
raise
seq_id = seqhash
# add sequence if not present
n_seqs_added = n_aliases_added = 0
msg = "sh{nsa_sep}{seq_id:.10s}... ({l} residues; {na} aliases {aliases})".format(
seq_id=seq_id,
l=len(seq),
na=len(nsaliases),
nsa_sep=nsa_sep,
aliases=", ".join("{nsa[namespace]}:{nsa[alias]}".format(nsa=nsa) for nsa in nsaliases))
if seq_id not in self.sequences:
_logger.info("Storing " + msg)
if len(seq) > ct_n_residues: # pragma: no cover
_logger.debug("Precommit for large sequence")
self.commit()
self.sequences.store(seq_id, seq)
n_seqs_added += 1
self._pending_sequences += 1
self._pending_sequences_len += len(seq)
self._pending_aliases += self._update_digest_aliases(seq_id, seq)
else:
_logger.debug("Sequence exists: " + msg)
# add/update external aliases for new and existing sequences
# updating is optimized to load only new <seq_id,ns,alias> tuples
existing_aliases = self.aliases.fetch_aliases(seq_id)
ea_tuples = [(r["seq_id"], r["namespace"], r["alias"]) for r in existing_aliases]
new_tuples = [(seq_id, r["namespace"], r["alias"]) for r in nsaliases]
upd_tuples = set(new_tuples) - set(ea_tuples)
if upd_tuples:
_logger.info("{} new aliases for {}".format(len(upd_tuples), msg))
for _, namespace, alias in upd_tuples:
self.aliases.store_alias(seq_id=seq_id, namespace=namespace, alias=alias)
self._pending_aliases += len(upd_tuples)
n_aliases_added += len(upd_tuples)
if (self._pending_sequences > ct_n_seqs or self._pending_aliases > ct_n_aliases
or self._pending_sequences_len > ct_n_residues): # pragma: no cover
_logger.info("Hit commit thresholds ({self._pending_sequences} sequences, "
"{self._pending_aliases} aliases, {self._pending_sequences_len} residues)".format(self=self))
self.commit()
return n_seqs_added, n_aliases_added | nsaliases is a list of dicts, like:
[{"namespace": "en", "alias": "rose"},
{"namespace": "fr", "alias": "rose"},
{"namespace": "es", "alias": "rosa"}] | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqrepo.py#L112-L172 | [
"def store(self, seq_id, seq):\n \"\"\"store a sequence with key seq_id. The sequence itself is stored in\n a fasta file and a reference to it in the sqlite3 database.\n\n \"\"\"\n\n if not self._writeable:\n raise RuntimeError(\"Cannot write -- opened read-only\")\n\n # open a file for writing if necessary\n # path: <root_dir>/<reldir>/<basename>\n # <---- relpath ---->\n # <------ dir_ ----->\n # <----------- path ----------->\n if self._writing is None:\n reldir = datetime.datetime.utcnow().strftime(\"%Y/%m%d/%H%M\")\n basename = str(time.time()) + \".fa.bgz\"\n relpath = os.path.join(reldir, basename)\n\n dir_ = os.path.join(self._root_dir, reldir)\n path = os.path.join(self._root_dir, reldir, basename)\n makedirs(dir_, exist_ok=True)\n fabgz = FabgzWriter(path)\n self._writing = {\"relpath\": relpath, \"fabgz\": fabgz}\n logger.info(\"Opened for writing: \" + path)\n\n self._writing[\"fabgz\"].store(seq_id, seq)\n alpha = \"\".join(sorted(set(seq)))\n self._db.execute(\"\"\"insert into seqinfo (seq_id, len, alpha, relpath)\n values (?, ?, ?,?)\"\"\", (seq_id, len(seq), alpha, self._writing[\"relpath\"]))\n return seq_id\n",
"def commit(self):\n self.sequences.commit()\n self.aliases.commit()\n if self._pending_sequences + self._pending_aliases > 0:\n _logger.info(\"Committed {} sequences ({} residues) and {} aliases\".format(\n self._pending_sequences, self._pending_sequences_len, self._pending_aliases))\n self._pending_sequences = 0\n self._pending_sequences_len = 0\n self._pending_aliases = 0\n",
"def _update_digest_aliases(self, seq_id, seq):\n\n \"\"\"compute digest aliases for seq and update; returns number of digest\n aliases (some of which may have already existed)\n\n For the moment, sha512 is computed for seq_id separately from\n the sha512 here. We should fix that.\n\n \"\"\"\n\n ir = bioutils.digests.seq_vmc_identifier(seq)\n seq_aliases = [\n {\n \"namespace\": ir[\"namespace\"],\n \"alias\": ir[\"accession\"],\n },\n {\n \"namespace\": \"SHA1\",\n \"alias\": bioutils.digests.seq_sha1(seq)\n },\n {\n \"namespace\": \"MD5\",\n \"alias\": bioutils.digests.seq_md5(seq)\n },\n {\n \"namespace\": \"SEGUID\",\n \"alias\": bioutils.digests.seq_seguid(seq)\n },\n ]\n for sa in seq_aliases:\n self.aliases.store_alias(seq_id=seq_id, **sa)\n return len(seq_aliases)\n"
] | class SeqRepo(object):
"""Implements a filesystem-backed non-redundant repository of
sequences and sequence aliases.
The current implementation uses block-gzip fasta files for
sequence storage, essentially as a transaction-based journal, and
a very simple sqlite database for aliases.
Updates add new sequence files and new aliases. This approach
means that distribution of updates involve incremental transfers
of sequences and a wholesale replacement of the database.
The pysam.FastaFile module is key here as it provides fasa index
to bgz files and fast sequence slicing.
"""
def __init__(self, root_dir, writeable=False, upcase=True, translate_ncbi_namespace=False, check_same_thread=False):
self._root_dir = root_dir
self._upcase = upcase
self._db_path = os.path.join(self._root_dir, "aliases.sqlite3")
self._seq_path = os.path.join(self._root_dir, "sequences")
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
self._check_same_thread = True if writeable else check_same_thread
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
if not os.path.exists(self._root_dir):
raise OSError("Unable to open SeqRepo directory {}".format(self._root_dir))
self.sequences = FastaDir(self._seq_path, writeable=self._writeable, check_same_thread=self._check_same_thread)
self.aliases = SeqAliasDB(self._db_path,
writeable=self._writeable,
translate_ncbi_namespace=self.translate_ncbi_namespace,
check_same_thread=self._check_same_thread)
def __contains__(self, nsa):
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.aliases.find_aliases(alias=a, namespace=ns).fetchone() is not None
def __getitem__(self, nsa):
# lookup aliases, optionally namespaced, like NM_01234.5 or NCBI:NM_01234.5
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.fetch(alias=a, namespace=ns)
def __iter__(self):
"""iterate over all sequences, yielding tuples of (sequence_record, [alias_records])
Both records are dicts.
"""
for srec in self.sequences:
arecs = self.aliases.fetch_aliases(srec["seq_id"])
yield (srec, arecs)
def __str__(self):
return "SeqRepo(root_dir={self._root_dir}, writeable={self._writeable})".format(self=self)
def commit(self):
self.sequences.commit()
self.aliases.commit()
if self._pending_sequences + self._pending_aliases > 0:
_logger.info("Committed {} sequences ({} residues) and {} aliases".format(
self._pending_sequences, self._pending_sequences_len, self._pending_aliases))
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
def fetch(self, alias, start=None, end=None, namespace=None):
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
return self.sequences.fetch(seq_id, start, end)
def fetch_uri(self, uri, start=None, end=None):
"""fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3.
"""
namespace, alias = uri_re.match(uri).groups()
return self.fetch(alias=alias, namespace=namespace, start=start, end=end)
def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):
"""given an alias and optional namespace, return a list of all other
aliases for same sequence
"""
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
aliases = self.aliases.fetch_aliases(seq_id=seq_id,
translate_ncbi_namespace=translate_ncbi_namespace)
if target_namespaces:
aliases = [a for a in aliases if a["namespace"] in target_namespaces]
return aliases
def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
"""Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence.
"""
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases]
############################################################################
# Internal Methods
def _get_unique_seqid(self, alias, namespace):
"""given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match.
"""
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
raise KeyError("Alias {} (namespace: {})".format(alias, namespace))
if len(seq_ids) > 1:
# This should only happen when namespace is None
raise KeyError("Alias {} (namespace: {}): not unique".format(alias, namespace))
return seq_ids.pop()
def _update_digest_aliases(self, seq_id, seq):
"""compute digest aliases for seq and update; returns number of digest
aliases (some of which may have already existed)
For the moment, sha512 is computed for seq_id separately from
the sha512 here. We should fix that.
"""
ir = bioutils.digests.seq_vmc_identifier(seq)
seq_aliases = [
{
"namespace": ir["namespace"],
"alias": ir["accession"],
},
{
"namespace": "SHA1",
"alias": bioutils.digests.seq_sha1(seq)
},
{
"namespace": "MD5",
"alias": bioutils.digests.seq_md5(seq)
},
{
"namespace": "SEGUID",
"alias": bioutils.digests.seq_seguid(seq)
},
]
for sa in seq_aliases:
self.aliases.store_alias(seq_id=seq_id, **sa)
return len(seq_aliases)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqrepo.py | SeqRepo.translate_alias | python | def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
aliases = self.aliases.fetch_aliases(seq_id=seq_id,
translate_ncbi_namespace=translate_ncbi_namespace)
if target_namespaces:
aliases = [a for a in aliases if a["namespace"] in target_namespaces]
return aliases | given an alias and optional namespace, return a list of all other
aliases for same sequence | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqrepo.py#L175-L188 | [
"def _get_unique_seqid(self, alias, namespace):\n \"\"\"given alias and namespace, return seq_id if exactly one distinct\n sequence id is found, raise KeyError if there's no match, or\n raise ValueError if there's more than one match.\n\n \"\"\"\n\n recs = self.aliases.find_aliases(alias=alias, namespace=namespace)\n seq_ids = set(r[\"seq_id\"] for r in recs)\n if len(seq_ids) == 0:\n raise KeyError(\"Alias {} (namespace: {})\".format(alias, namespace))\n if len(seq_ids) > 1:\n # This should only happen when namespace is None\n raise KeyError(\"Alias {} (namespace: {}): not unique\".format(alias, namespace))\n return seq_ids.pop()\n"
] | class SeqRepo(object):
"""Implements a filesystem-backed non-redundant repository of
sequences and sequence aliases.
The current implementation uses block-gzip fasta files for
sequence storage, essentially as a transaction-based journal, and
a very simple sqlite database for aliases.
Updates add new sequence files and new aliases. This approach
means that distribution of updates involve incremental transfers
of sequences and a wholesale replacement of the database.
The pysam.FastaFile module is key here as it provides fasa index
to bgz files and fast sequence slicing.
"""
def __init__(self, root_dir, writeable=False, upcase=True, translate_ncbi_namespace=False, check_same_thread=False):
self._root_dir = root_dir
self._upcase = upcase
self._db_path = os.path.join(self._root_dir, "aliases.sqlite3")
self._seq_path = os.path.join(self._root_dir, "sequences")
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
self._check_same_thread = True if writeable else check_same_thread
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
if not os.path.exists(self._root_dir):
raise OSError("Unable to open SeqRepo directory {}".format(self._root_dir))
self.sequences = FastaDir(self._seq_path, writeable=self._writeable, check_same_thread=self._check_same_thread)
self.aliases = SeqAliasDB(self._db_path,
writeable=self._writeable,
translate_ncbi_namespace=self.translate_ncbi_namespace,
check_same_thread=self._check_same_thread)
def __contains__(self, nsa):
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.aliases.find_aliases(alias=a, namespace=ns).fetchone() is not None
def __getitem__(self, nsa):
# lookup aliases, optionally namespaced, like NM_01234.5 or NCBI:NM_01234.5
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.fetch(alias=a, namespace=ns)
def __iter__(self):
"""iterate over all sequences, yielding tuples of (sequence_record, [alias_records])
Both records are dicts.
"""
for srec in self.sequences:
arecs = self.aliases.fetch_aliases(srec["seq_id"])
yield (srec, arecs)
def __str__(self):
return "SeqRepo(root_dir={self._root_dir}, writeable={self._writeable})".format(self=self)
def commit(self):
self.sequences.commit()
self.aliases.commit()
if self._pending_sequences + self._pending_aliases > 0:
_logger.info("Committed {} sequences ({} residues) and {} aliases".format(
self._pending_sequences, self._pending_sequences_len, self._pending_aliases))
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
def fetch(self, alias, start=None, end=None, namespace=None):
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
return self.sequences.fetch(seq_id, start, end)
def fetch_uri(self, uri, start=None, end=None):
"""fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3.
"""
namespace, alias = uri_re.match(uri).groups()
return self.fetch(alias=alias, namespace=namespace, start=start, end=end)
def store(self, seq, nsaliases):
"""nsaliases is a list of dicts, like:
[{"namespace": "en", "alias": "rose"},
{"namespace": "fr", "alias": "rose"},
{"namespace": "es", "alias": "rosa"}]
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
if self._upcase:
seq = seq.upper()
try:
seqhash = bioutils.digests.seq_seqhash(seq)
except Exception as e:
import pprint
_logger.critical("Exception raised for " + pprint.pformat(nsaliases))
raise
seq_id = seqhash
# add sequence if not present
n_seqs_added = n_aliases_added = 0
msg = "sh{nsa_sep}{seq_id:.10s}... ({l} residues; {na} aliases {aliases})".format(
seq_id=seq_id,
l=len(seq),
na=len(nsaliases),
nsa_sep=nsa_sep,
aliases=", ".join("{nsa[namespace]}:{nsa[alias]}".format(nsa=nsa) for nsa in nsaliases))
if seq_id not in self.sequences:
_logger.info("Storing " + msg)
if len(seq) > ct_n_residues: # pragma: no cover
_logger.debug("Precommit for large sequence")
self.commit()
self.sequences.store(seq_id, seq)
n_seqs_added += 1
self._pending_sequences += 1
self._pending_sequences_len += len(seq)
self._pending_aliases += self._update_digest_aliases(seq_id, seq)
else:
_logger.debug("Sequence exists: " + msg)
# add/update external aliases for new and existing sequences
# updating is optimized to load only new <seq_id,ns,alias> tuples
existing_aliases = self.aliases.fetch_aliases(seq_id)
ea_tuples = [(r["seq_id"], r["namespace"], r["alias"]) for r in existing_aliases]
new_tuples = [(seq_id, r["namespace"], r["alias"]) for r in nsaliases]
upd_tuples = set(new_tuples) - set(ea_tuples)
if upd_tuples:
_logger.info("{} new aliases for {}".format(len(upd_tuples), msg))
for _, namespace, alias in upd_tuples:
self.aliases.store_alias(seq_id=seq_id, namespace=namespace, alias=alias)
self._pending_aliases += len(upd_tuples)
n_aliases_added += len(upd_tuples)
if (self._pending_sequences > ct_n_seqs or self._pending_aliases > ct_n_aliases
or self._pending_sequences_len > ct_n_residues): # pragma: no cover
_logger.info("Hit commit thresholds ({self._pending_sequences} sequences, "
"{self._pending_aliases} aliases, {self._pending_sequences_len} residues)".format(self=self))
self.commit()
return n_seqs_added, n_aliases_added
def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
"""Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence.
"""
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases]
############################################################################
# Internal Methods
def _get_unique_seqid(self, alias, namespace):
"""given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match.
"""
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
raise KeyError("Alias {} (namespace: {})".format(alias, namespace))
if len(seq_ids) > 1:
# This should only happen when namespace is None
raise KeyError("Alias {} (namespace: {}): not unique".format(alias, namespace))
return seq_ids.pop()
def _update_digest_aliases(self, seq_id, seq):
"""compute digest aliases for seq and update; returns number of digest
aliases (some of which may have already existed)
For the moment, sha512 is computed for seq_id separately from
the sha512 here. We should fix that.
"""
ir = bioutils.digests.seq_vmc_identifier(seq)
seq_aliases = [
{
"namespace": ir["namespace"],
"alias": ir["accession"],
},
{
"namespace": "SHA1",
"alias": bioutils.digests.seq_sha1(seq)
},
{
"namespace": "MD5",
"alias": bioutils.digests.seq_md5(seq)
},
{
"namespace": "SEGUID",
"alias": bioutils.digests.seq_seguid(seq)
},
]
for sa in seq_aliases:
self.aliases.store_alias(seq_id=seq_id, **sa)
return len(seq_aliases)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqrepo.py | SeqRepo.translate_identifier | python | def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases] | Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqrepo.py#L191-L201 | [
"def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):\n \"\"\"given an alias and optional namespace, return a list of all other\n aliases for same sequence\n\n \"\"\"\n\n if translate_ncbi_namespace is None:\n translate_ncbi_namespace = self.translate_ncbi_namespace\n seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)\n aliases = self.aliases.fetch_aliases(seq_id=seq_id,\n translate_ncbi_namespace=translate_ncbi_namespace)\n if target_namespaces:\n aliases = [a for a in aliases if a[\"namespace\"] in target_namespaces]\n return aliases\n"
] | class SeqRepo(object):
"""Implements a filesystem-backed non-redundant repository of
sequences and sequence aliases.
The current implementation uses block-gzip fasta files for
sequence storage, essentially as a transaction-based journal, and
a very simple sqlite database for aliases.
Updates add new sequence files and new aliases. This approach
means that distribution of updates involve incremental transfers
of sequences and a wholesale replacement of the database.
The pysam.FastaFile module is key here as it provides fasa index
to bgz files and fast sequence slicing.
"""
def __init__(self, root_dir, writeable=False, upcase=True, translate_ncbi_namespace=False, check_same_thread=False):
self._root_dir = root_dir
self._upcase = upcase
self._db_path = os.path.join(self._root_dir, "aliases.sqlite3")
self._seq_path = os.path.join(self._root_dir, "sequences")
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
self._check_same_thread = True if writeable else check_same_thread
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
if not os.path.exists(self._root_dir):
raise OSError("Unable to open SeqRepo directory {}".format(self._root_dir))
self.sequences = FastaDir(self._seq_path, writeable=self._writeable, check_same_thread=self._check_same_thread)
self.aliases = SeqAliasDB(self._db_path,
writeable=self._writeable,
translate_ncbi_namespace=self.translate_ncbi_namespace,
check_same_thread=self._check_same_thread)
def __contains__(self, nsa):
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.aliases.find_aliases(alias=a, namespace=ns).fetchone() is not None
def __getitem__(self, nsa):
# lookup aliases, optionally namespaced, like NM_01234.5 or NCBI:NM_01234.5
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.fetch(alias=a, namespace=ns)
def __iter__(self):
"""iterate over all sequences, yielding tuples of (sequence_record, [alias_records])
Both records are dicts.
"""
for srec in self.sequences:
arecs = self.aliases.fetch_aliases(srec["seq_id"])
yield (srec, arecs)
def __str__(self):
return "SeqRepo(root_dir={self._root_dir}, writeable={self._writeable})".format(self=self)
def commit(self):
self.sequences.commit()
self.aliases.commit()
if self._pending_sequences + self._pending_aliases > 0:
_logger.info("Committed {} sequences ({} residues) and {} aliases".format(
self._pending_sequences, self._pending_sequences_len, self._pending_aliases))
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
def fetch(self, alias, start=None, end=None, namespace=None):
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
return self.sequences.fetch(seq_id, start, end)
def fetch_uri(self, uri, start=None, end=None):
"""fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3.
"""
namespace, alias = uri_re.match(uri).groups()
return self.fetch(alias=alias, namespace=namespace, start=start, end=end)
def store(self, seq, nsaliases):
"""nsaliases is a list of dicts, like:
[{"namespace": "en", "alias": "rose"},
{"namespace": "fr", "alias": "rose"},
{"namespace": "es", "alias": "rosa"}]
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
if self._upcase:
seq = seq.upper()
try:
seqhash = bioutils.digests.seq_seqhash(seq)
except Exception as e:
import pprint
_logger.critical("Exception raised for " + pprint.pformat(nsaliases))
raise
seq_id = seqhash
# add sequence if not present
n_seqs_added = n_aliases_added = 0
msg = "sh{nsa_sep}{seq_id:.10s}... ({l} residues; {na} aliases {aliases})".format(
seq_id=seq_id,
l=len(seq),
na=len(nsaliases),
nsa_sep=nsa_sep,
aliases=", ".join("{nsa[namespace]}:{nsa[alias]}".format(nsa=nsa) for nsa in nsaliases))
if seq_id not in self.sequences:
_logger.info("Storing " + msg)
if len(seq) > ct_n_residues: # pragma: no cover
_logger.debug("Precommit for large sequence")
self.commit()
self.sequences.store(seq_id, seq)
n_seqs_added += 1
self._pending_sequences += 1
self._pending_sequences_len += len(seq)
self._pending_aliases += self._update_digest_aliases(seq_id, seq)
else:
_logger.debug("Sequence exists: " + msg)
# add/update external aliases for new and existing sequences
# updating is optimized to load only new <seq_id,ns,alias> tuples
existing_aliases = self.aliases.fetch_aliases(seq_id)
ea_tuples = [(r["seq_id"], r["namespace"], r["alias"]) for r in existing_aliases]
new_tuples = [(seq_id, r["namespace"], r["alias"]) for r in nsaliases]
upd_tuples = set(new_tuples) - set(ea_tuples)
if upd_tuples:
_logger.info("{} new aliases for {}".format(len(upd_tuples), msg))
for _, namespace, alias in upd_tuples:
self.aliases.store_alias(seq_id=seq_id, namespace=namespace, alias=alias)
self._pending_aliases += len(upd_tuples)
n_aliases_added += len(upd_tuples)
if (self._pending_sequences > ct_n_seqs or self._pending_aliases > ct_n_aliases
or self._pending_sequences_len > ct_n_residues): # pragma: no cover
_logger.info("Hit commit thresholds ({self._pending_sequences} sequences, "
"{self._pending_aliases} aliases, {self._pending_sequences_len} residues)".format(self=self))
self.commit()
return n_seqs_added, n_aliases_added
def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):
"""given an alias and optional namespace, return a list of all other
aliases for same sequence
"""
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
aliases = self.aliases.fetch_aliases(seq_id=seq_id,
translate_ncbi_namespace=translate_ncbi_namespace)
if target_namespaces:
aliases = [a for a in aliases if a["namespace"] in target_namespaces]
return aliases
############################################################################
# Internal Methods
def _get_unique_seqid(self, alias, namespace):
"""given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match.
"""
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
raise KeyError("Alias {} (namespace: {})".format(alias, namespace))
if len(seq_ids) > 1:
# This should only happen when namespace is None
raise KeyError("Alias {} (namespace: {}): not unique".format(alias, namespace))
return seq_ids.pop()
def _update_digest_aliases(self, seq_id, seq):
"""compute digest aliases for seq and update; returns number of digest
aliases (some of which may have already existed)
For the moment, sha512 is computed for seq_id separately from
the sha512 here. We should fix that.
"""
ir = bioutils.digests.seq_vmc_identifier(seq)
seq_aliases = [
{
"namespace": ir["namespace"],
"alias": ir["accession"],
},
{
"namespace": "SHA1",
"alias": bioutils.digests.seq_sha1(seq)
},
{
"namespace": "MD5",
"alias": bioutils.digests.seq_md5(seq)
},
{
"namespace": "SEGUID",
"alias": bioutils.digests.seq_seguid(seq)
},
]
for sa in seq_aliases:
self.aliases.store_alias(seq_id=seq_id, **sa)
return len(seq_aliases)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqrepo.py | SeqRepo._get_unique_seqid | python | def _get_unique_seqid(self, alias, namespace):
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
raise KeyError("Alias {} (namespace: {})".format(alias, namespace))
if len(seq_ids) > 1:
# This should only happen when namespace is None
raise KeyError("Alias {} (namespace: {}): not unique".format(alias, namespace))
return seq_ids.pop() | given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqrepo.py#L207-L221 | null | class SeqRepo(object):
"""Implements a filesystem-backed non-redundant repository of
sequences and sequence aliases.
The current implementation uses block-gzip fasta files for
sequence storage, essentially as a transaction-based journal, and
a very simple sqlite database for aliases.
Updates add new sequence files and new aliases. This approach
means that distribution of updates involve incremental transfers
of sequences and a wholesale replacement of the database.
The pysam.FastaFile module is key here as it provides fasa index
to bgz files and fast sequence slicing.
"""
def __init__(self, root_dir, writeable=False, upcase=True, translate_ncbi_namespace=False, check_same_thread=False):
self._root_dir = root_dir
self._upcase = upcase
self._db_path = os.path.join(self._root_dir, "aliases.sqlite3")
self._seq_path = os.path.join(self._root_dir, "sequences")
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
self._check_same_thread = True if writeable else check_same_thread
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
if not os.path.exists(self._root_dir):
raise OSError("Unable to open SeqRepo directory {}".format(self._root_dir))
self.sequences = FastaDir(self._seq_path, writeable=self._writeable, check_same_thread=self._check_same_thread)
self.aliases = SeqAliasDB(self._db_path,
writeable=self._writeable,
translate_ncbi_namespace=self.translate_ncbi_namespace,
check_same_thread=self._check_same_thread)
def __contains__(self, nsa):
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.aliases.find_aliases(alias=a, namespace=ns).fetchone() is not None
def __getitem__(self, nsa):
# lookup aliases, optionally namespaced, like NM_01234.5 or NCBI:NM_01234.5
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.fetch(alias=a, namespace=ns)
def __iter__(self):
"""iterate over all sequences, yielding tuples of (sequence_record, [alias_records])
Both records are dicts.
"""
for srec in self.sequences:
arecs = self.aliases.fetch_aliases(srec["seq_id"])
yield (srec, arecs)
def __str__(self):
return "SeqRepo(root_dir={self._root_dir}, writeable={self._writeable})".format(self=self)
def commit(self):
self.sequences.commit()
self.aliases.commit()
if self._pending_sequences + self._pending_aliases > 0:
_logger.info("Committed {} sequences ({} residues) and {} aliases".format(
self._pending_sequences, self._pending_sequences_len, self._pending_aliases))
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
def fetch(self, alias, start=None, end=None, namespace=None):
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
return self.sequences.fetch(seq_id, start, end)
def fetch_uri(self, uri, start=None, end=None):
"""fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3.
"""
namespace, alias = uri_re.match(uri).groups()
return self.fetch(alias=alias, namespace=namespace, start=start, end=end)
def store(self, seq, nsaliases):
"""nsaliases is a list of dicts, like:
[{"namespace": "en", "alias": "rose"},
{"namespace": "fr", "alias": "rose"},
{"namespace": "es", "alias": "rosa"}]
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
if self._upcase:
seq = seq.upper()
try:
seqhash = bioutils.digests.seq_seqhash(seq)
except Exception as e:
import pprint
_logger.critical("Exception raised for " + pprint.pformat(nsaliases))
raise
seq_id = seqhash
# add sequence if not present
n_seqs_added = n_aliases_added = 0
msg = "sh{nsa_sep}{seq_id:.10s}... ({l} residues; {na} aliases {aliases})".format(
seq_id=seq_id,
l=len(seq),
na=len(nsaliases),
nsa_sep=nsa_sep,
aliases=", ".join("{nsa[namespace]}:{nsa[alias]}".format(nsa=nsa) for nsa in nsaliases))
if seq_id not in self.sequences:
_logger.info("Storing " + msg)
if len(seq) > ct_n_residues: # pragma: no cover
_logger.debug("Precommit for large sequence")
self.commit()
self.sequences.store(seq_id, seq)
n_seqs_added += 1
self._pending_sequences += 1
self._pending_sequences_len += len(seq)
self._pending_aliases += self._update_digest_aliases(seq_id, seq)
else:
_logger.debug("Sequence exists: " + msg)
# add/update external aliases for new and existing sequences
# updating is optimized to load only new <seq_id,ns,alias> tuples
existing_aliases = self.aliases.fetch_aliases(seq_id)
ea_tuples = [(r["seq_id"], r["namespace"], r["alias"]) for r in existing_aliases]
new_tuples = [(seq_id, r["namespace"], r["alias"]) for r in nsaliases]
upd_tuples = set(new_tuples) - set(ea_tuples)
if upd_tuples:
_logger.info("{} new aliases for {}".format(len(upd_tuples), msg))
for _, namespace, alias in upd_tuples:
self.aliases.store_alias(seq_id=seq_id, namespace=namespace, alias=alias)
self._pending_aliases += len(upd_tuples)
n_aliases_added += len(upd_tuples)
if (self._pending_sequences > ct_n_seqs or self._pending_aliases > ct_n_aliases
or self._pending_sequences_len > ct_n_residues): # pragma: no cover
_logger.info("Hit commit thresholds ({self._pending_sequences} sequences, "
"{self._pending_aliases} aliases, {self._pending_sequences_len} residues)".format(self=self))
self.commit()
return n_seqs_added, n_aliases_added
def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):
"""given an alias and optional namespace, return a list of all other
aliases for same sequence
"""
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
aliases = self.aliases.fetch_aliases(seq_id=seq_id,
translate_ncbi_namespace=translate_ncbi_namespace)
if target_namespaces:
aliases = [a for a in aliases if a["namespace"] in target_namespaces]
return aliases
def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
"""Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence.
"""
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases]
############################################################################
# Internal Methods
def _update_digest_aliases(self, seq_id, seq):
"""compute digest aliases for seq and update; returns number of digest
aliases (some of which may have already existed)
For the moment, sha512 is computed for seq_id separately from
the sha512 here. We should fix that.
"""
ir = bioutils.digests.seq_vmc_identifier(seq)
seq_aliases = [
{
"namespace": ir["namespace"],
"alias": ir["accession"],
},
{
"namespace": "SHA1",
"alias": bioutils.digests.seq_sha1(seq)
},
{
"namespace": "MD5",
"alias": bioutils.digests.seq_md5(seq)
},
{
"namespace": "SEGUID",
"alias": bioutils.digests.seq_seguid(seq)
},
]
for sa in seq_aliases:
self.aliases.store_alias(seq_id=seq_id, **sa)
return len(seq_aliases)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqrepo.py | SeqRepo._update_digest_aliases | python | def _update_digest_aliases(self, seq_id, seq):
ir = bioutils.digests.seq_vmc_identifier(seq)
seq_aliases = [
{
"namespace": ir["namespace"],
"alias": ir["accession"],
},
{
"namespace": "SHA1",
"alias": bioutils.digests.seq_sha1(seq)
},
{
"namespace": "MD5",
"alias": bioutils.digests.seq_md5(seq)
},
{
"namespace": "SEGUID",
"alias": bioutils.digests.seq_seguid(seq)
},
]
for sa in seq_aliases:
self.aliases.store_alias(seq_id=seq_id, **sa)
return len(seq_aliases) | compute digest aliases for seq and update; returns number of digest
aliases (some of which may have already existed)
For the moment, sha512 is computed for seq_id separately from
the sha512 here. We should fix that. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqrepo.py#L224-L255 | null | class SeqRepo(object):
"""Implements a filesystem-backed non-redundant repository of
sequences and sequence aliases.
The current implementation uses block-gzip fasta files for
sequence storage, essentially as a transaction-based journal, and
a very simple sqlite database for aliases.
Updates add new sequence files and new aliases. This approach
means that distribution of updates involve incremental transfers
of sequences and a wholesale replacement of the database.
The pysam.FastaFile module is key here as it provides fasa index
to bgz files and fast sequence slicing.
"""
def __init__(self, root_dir, writeable=False, upcase=True, translate_ncbi_namespace=False, check_same_thread=False):
self._root_dir = root_dir
self._upcase = upcase
self._db_path = os.path.join(self._root_dir, "aliases.sqlite3")
self._seq_path = os.path.join(self._root_dir, "sequences")
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
self._check_same_thread = True if writeable else check_same_thread
if self._writeable:
makedirs(self._root_dir, exist_ok=True)
if not os.path.exists(self._root_dir):
raise OSError("Unable to open SeqRepo directory {}".format(self._root_dir))
self.sequences = FastaDir(self._seq_path, writeable=self._writeable, check_same_thread=self._check_same_thread)
self.aliases = SeqAliasDB(self._db_path,
writeable=self._writeable,
translate_ncbi_namespace=self.translate_ncbi_namespace,
check_same_thread=self._check_same_thread)
def __contains__(self, nsa):
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.aliases.find_aliases(alias=a, namespace=ns).fetchone() is not None
def __getitem__(self, nsa):
# lookup aliases, optionally namespaced, like NM_01234.5 or NCBI:NM_01234.5
ns, a = nsa.split(nsa_sep) if nsa_sep in nsa else (None, nsa)
return self.fetch(alias=a, namespace=ns)
def __iter__(self):
"""iterate over all sequences, yielding tuples of (sequence_record, [alias_records])
Both records are dicts.
"""
for srec in self.sequences:
arecs = self.aliases.fetch_aliases(srec["seq_id"])
yield (srec, arecs)
def __str__(self):
return "SeqRepo(root_dir={self._root_dir}, writeable={self._writeable})".format(self=self)
def commit(self):
self.sequences.commit()
self.aliases.commit()
if self._pending_sequences + self._pending_aliases > 0:
_logger.info("Committed {} sequences ({} residues) and {} aliases".format(
self._pending_sequences, self._pending_sequences_len, self._pending_aliases))
self._pending_sequences = 0
self._pending_sequences_len = 0
self._pending_aliases = 0
def fetch(self, alias, start=None, end=None, namespace=None):
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
return self.sequences.fetch(seq_id, start, end)
def fetch_uri(self, uri, start=None, end=None):
"""fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3.
"""
namespace, alias = uri_re.match(uri).groups()
return self.fetch(alias=alias, namespace=namespace, start=start, end=end)
def store(self, seq, nsaliases):
"""nsaliases is a list of dicts, like:
[{"namespace": "en", "alias": "rose"},
{"namespace": "fr", "alias": "rose"},
{"namespace": "es", "alias": "rosa"}]
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
if self._upcase:
seq = seq.upper()
try:
seqhash = bioutils.digests.seq_seqhash(seq)
except Exception as e:
import pprint
_logger.critical("Exception raised for " + pprint.pformat(nsaliases))
raise
seq_id = seqhash
# add sequence if not present
n_seqs_added = n_aliases_added = 0
msg = "sh{nsa_sep}{seq_id:.10s}... ({l} residues; {na} aliases {aliases})".format(
seq_id=seq_id,
l=len(seq),
na=len(nsaliases),
nsa_sep=nsa_sep,
aliases=", ".join("{nsa[namespace]}:{nsa[alias]}".format(nsa=nsa) for nsa in nsaliases))
if seq_id not in self.sequences:
_logger.info("Storing " + msg)
if len(seq) > ct_n_residues: # pragma: no cover
_logger.debug("Precommit for large sequence")
self.commit()
self.sequences.store(seq_id, seq)
n_seqs_added += 1
self._pending_sequences += 1
self._pending_sequences_len += len(seq)
self._pending_aliases += self._update_digest_aliases(seq_id, seq)
else:
_logger.debug("Sequence exists: " + msg)
# add/update external aliases for new and existing sequences
# updating is optimized to load only new <seq_id,ns,alias> tuples
existing_aliases = self.aliases.fetch_aliases(seq_id)
ea_tuples = [(r["seq_id"], r["namespace"], r["alias"]) for r in existing_aliases]
new_tuples = [(seq_id, r["namespace"], r["alias"]) for r in nsaliases]
upd_tuples = set(new_tuples) - set(ea_tuples)
if upd_tuples:
_logger.info("{} new aliases for {}".format(len(upd_tuples), msg))
for _, namespace, alias in upd_tuples:
self.aliases.store_alias(seq_id=seq_id, namespace=namespace, alias=alias)
self._pending_aliases += len(upd_tuples)
n_aliases_added += len(upd_tuples)
if (self._pending_sequences > ct_n_seqs or self._pending_aliases > ct_n_aliases
or self._pending_sequences_len > ct_n_residues): # pragma: no cover
_logger.info("Hit commit thresholds ({self._pending_sequences} sequences, "
"{self._pending_aliases} aliases, {self._pending_sequences_len} residues)".format(self=self))
self.commit()
return n_seqs_added, n_aliases_added
def translate_alias(self, alias, namespace=None, target_namespaces=None, translate_ncbi_namespace=None):
"""given an alias and optional namespace, return a list of all other
aliases for same sequence
"""
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
seq_id = self._get_unique_seqid(alias=alias, namespace=namespace)
aliases = self.aliases.fetch_aliases(seq_id=seq_id,
translate_ncbi_namespace=translate_ncbi_namespace)
if target_namespaces:
aliases = [a for a in aliases if a["namespace"] in target_namespaces]
return aliases
def translate_identifier(self, identifier, target_namespaces=None, translate_ncbi_namespace=None):
"""Given a string identifier, return a list of aliases (as
identifiers) that refer to the same sequence.
"""
namespace, alias = identifier.split(nsa_sep) if nsa_sep in identifier else (None, identifier)
aliases = self.translate_alias(alias=alias,
namespace=namespace,
target_namespaces=target_namespaces,
translate_ncbi_namespace=translate_ncbi_namespace)
return [nsa_sep.join((a["namespace"], a["alias"])) for a in aliases]
############################################################################
# Internal Methods
def _get_unique_seqid(self, alias, namespace):
"""given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match.
"""
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
raise KeyError("Alias {} (namespace: {})".format(alias, namespace))
if len(seq_ids) > 1:
# This should only happen when namespace is None
raise KeyError("Alias {} (namespace: {}): not unique".format(alias, namespace))
return seq_ids.pop()
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqaliasdb/seqaliasdb.py | SeqAliasDB.fetch_aliases | python | def fetch_aliases(self, seq_id, current_only=True, translate_ncbi_namespace=None):
return [dict(r) for r in self.find_aliases(seq_id=seq_id,
current_only=current_only,
translate_ncbi_namespace=translate_ncbi_namespace)] | return list of alias annotation records (dicts) for a given seq_id | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqaliasdb/seqaliasdb.py#L58-L62 | null | class SeqAliasDB(object):
"""Implements a sqlite database of sequence aliases
"""
def __init__(self, db_path, writeable=False, translate_ncbi_namespace=False, check_same_thread=True):
self._db_path = db_path
self._db = None
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
if self._writeable:
self._upgrade_db()
self._db = sqlite3.connect(self._db_path, check_same_thread=check_same_thread)
schema_version = self.schema_version()
self._db.row_factory = sqlite3.Row
# if we're not at the expected schema version for this code, bail
if schema_version != expected_schema_version: # pragma: no cover
raise RuntimeError("Upgrade required: Database schema"
"version is {} and code expects {}".format(schema_version, expected_schema_version))
# ############################################################################
# Special methods
def __contains__(self, seq_id):
c = self._db.execute("select exists(select 1 from seqalias where seq_id = ? limit 1) as ex",
(seq_id, )).fetchone()
return True if c["ex"] else False
# ############################################################################
# Public methods
def commit(self):
if self._writeable:
self._db.commit()
def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None):
"""returns iterator over alias annotation records that match criteria
The arguments, all optional, restrict the records that are
returned. Without arguments, all aliases are returned.
If arguments contain %, the `like` comparison operator is
used. Otherwise arguments must match exactly.
"""
clauses = []
params = []
def eq_or_like(s):
return "like" if "%" in s else "="
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
if alias is not None:
clauses += ["alias {} ?".format(eq_or_like(alias))]
params += [alias]
if namespace is not None:
# Switch to using RefSeq for RefSeq accessions
# issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups
# issue #31: later breaking change, translate database
if namespace == "RefSeq":
namespace = "NCBI"
clauses += ["namespace {} ?".format(eq_or_like(namespace))]
params += [namespace]
if seq_id is not None:
clauses += ["seq_id {} ?".format(eq_or_like(seq_id))]
params += [seq_id]
if current_only:
clauses += ["is_current = 1"]
cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"]
if translate_ncbi_namespace:
cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"]
else:
cols += ["namespace"]
sql = "select {cols} from seqalias".format(cols=", ".join(cols))
if clauses:
sql += " where " + " and ".join("(" + c + ")" for c in clauses)
sql += " order by seq_id, namespace, alias"
_logger.debug("Executing: " + sql)
return self._db.execute(sql, params)
def schema_version(self):
"""return schema version as integer"""
return int(self._db.execute("select value from meta where key = 'schema version'").fetchone()[0])
def stats(self):
sql = """select count(*) as n_aliases, sum(is_current) as n_current,
count(distinct seq_id) as n_sequences, count(distinct namespace) as
n_namespaces, min(added) as min_ts, max(added) as max_ts from
seqalias;"""
return dict(self._db.execute(sql).fetchone())
def store_alias(self, seq_id, namespace, alias):
"""associate a namespaced alias with a sequence
Alias association with sequences is idempotent: duplicate
associations are discarded silently.
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
log_pfx = "store({q},{n},{a})".format(n=namespace, a=alias, q=seq_id)
try:
c = self._db.execute("insert into seqalias (seq_id, namespace, alias) values (?, ?, ?)", (seq_id, namespace,
alias))
# success => new record
return c.lastrowid
except sqlite3.IntegrityError:
pass
# IntegrityError fall-through
# existing record is guaranteed to exist uniquely; fetchone() should always succeed
current_rec = self.find_aliases(namespace=namespace, alias=alias).fetchone()
# if seq_id matches current record, it's a duplicate (seq_id, namespace, alias) tuple
# and we return current record
if current_rec["seq_id"] == seq_id:
_logger.debug(log_pfx + ": duplicate record")
return current_rec["seqalias_id"]
# otherwise, we're reassigning; deprecate old record, then retry
_logger.debug(log_pfx + ": collision; deprecating {s1}".format(s1=current_rec["seq_id"]))
self._db.execute("update seqalias set is_current = 0 where seqalias_id = ?", [current_rec["seqalias_id"]])
return self.store_alias(seq_id, namespace, alias)
# ############################################################################
# Internal methods
def _dump_aliases(self): # pragma: no cover
import prettytable
fields = "seqalias_id seq_id namespace alias added is_current".split()
pt = prettytable.PrettyTable(field_names=fields)
for r in self._db.execute("select * from seqalias"):
pt.add_row([r[f] for f in fields])
print(pt)
def _upgrade_db(self):
"""upgrade db using scripts for specified (current) schema version"""
migration_path = "_data/migrations"
sqlite3.connect(self._db_path).close() # ensure that it exists
db_url = "sqlite:///" + self._db_path
backend = yoyo.get_backend(db_url)
migration_dir = pkg_resources.resource_filename(__package__, migration_path)
migrations = yoyo.read_migrations(migration_dir)
assert len(migrations) > 0, "no migration scripts found -- wrong migraion path for " + __package__
migrations_to_apply = backend.to_apply(migrations)
backend.apply_migrations(migrations_to_apply)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqaliasdb/seqaliasdb.py | SeqAliasDB.find_aliases | python | def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None):
clauses = []
params = []
def eq_or_like(s):
return "like" if "%" in s else "="
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
if alias is not None:
clauses += ["alias {} ?".format(eq_or_like(alias))]
params += [alias]
if namespace is not None:
# Switch to using RefSeq for RefSeq accessions
# issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups
# issue #31: later breaking change, translate database
if namespace == "RefSeq":
namespace = "NCBI"
clauses += ["namespace {} ?".format(eq_or_like(namespace))]
params += [namespace]
if seq_id is not None:
clauses += ["seq_id {} ?".format(eq_or_like(seq_id))]
params += [seq_id]
if current_only:
clauses += ["is_current = 1"]
cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"]
if translate_ncbi_namespace:
cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"]
else:
cols += ["namespace"]
sql = "select {cols} from seqalias".format(cols=", ".join(cols))
if clauses:
sql += " where " + " and ".join("(" + c + ")" for c in clauses)
sql += " order by seq_id, namespace, alias"
_logger.debug("Executing: " + sql)
return self._db.execute(sql, params) | returns iterator over alias annotation records that match criteria
The arguments, all optional, restrict the records that are
returned. Without arguments, all aliases are returned.
If arguments contain %, the `like` comparison operator is
used. Otherwise arguments must match exactly. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqaliasdb/seqaliasdb.py#L64-L110 | [
"def eq_or_like(s):\n return \"like\" if \"%\" in s else \"=\"\n"
] | class SeqAliasDB(object):
"""Implements a sqlite database of sequence aliases
"""
def __init__(self, db_path, writeable=False, translate_ncbi_namespace=False, check_same_thread=True):
self._db_path = db_path
self._db = None
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
if self._writeable:
self._upgrade_db()
self._db = sqlite3.connect(self._db_path, check_same_thread=check_same_thread)
schema_version = self.schema_version()
self._db.row_factory = sqlite3.Row
# if we're not at the expected schema version for this code, bail
if schema_version != expected_schema_version: # pragma: no cover
raise RuntimeError("Upgrade required: Database schema"
"version is {} and code expects {}".format(schema_version, expected_schema_version))
# ############################################################################
# Special methods
def __contains__(self, seq_id):
c = self._db.execute("select exists(select 1 from seqalias where seq_id = ? limit 1) as ex",
(seq_id, )).fetchone()
return True if c["ex"] else False
# ############################################################################
# Public methods
def commit(self):
if self._writeable:
self._db.commit()
def fetch_aliases(self, seq_id, current_only=True, translate_ncbi_namespace=None):
"""return list of alias annotation records (dicts) for a given seq_id"""
return [dict(r) for r in self.find_aliases(seq_id=seq_id,
current_only=current_only,
translate_ncbi_namespace=translate_ncbi_namespace)]
def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None):
"""returns iterator over alias annotation records that match criteria
The arguments, all optional, restrict the records that are
returned. Without arguments, all aliases are returned.
If arguments contain %, the `like` comparison operator is
used. Otherwise arguments must match exactly.
"""
clauses = []
params = []
def eq_or_like(s):
return "like" if "%" in s else "="
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
if alias is not None:
clauses += ["alias {} ?".format(eq_or_like(alias))]
params += [alias]
if namespace is not None:
# Switch to using RefSeq for RefSeq accessions
# issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups
# issue #31: later breaking change, translate database
if namespace == "RefSeq":
namespace = "NCBI"
clauses += ["namespace {} ?".format(eq_or_like(namespace))]
params += [namespace]
if seq_id is not None:
clauses += ["seq_id {} ?".format(eq_or_like(seq_id))]
params += [seq_id]
if current_only:
clauses += ["is_current = 1"]
cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"]
if translate_ncbi_namespace:
cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"]
else:
cols += ["namespace"]
sql = "select {cols} from seqalias".format(cols=", ".join(cols))
if clauses:
sql += " where " + " and ".join("(" + c + ")" for c in clauses)
sql += " order by seq_id, namespace, alias"
_logger.debug("Executing: " + sql)
return self._db.execute(sql, params)
def schema_version(self):
"""return schema version as integer"""
return int(self._db.execute("select value from meta where key = 'schema version'").fetchone()[0])
def stats(self):
sql = """select count(*) as n_aliases, sum(is_current) as n_current,
count(distinct seq_id) as n_sequences, count(distinct namespace) as
n_namespaces, min(added) as min_ts, max(added) as max_ts from
seqalias;"""
return dict(self._db.execute(sql).fetchone())
def store_alias(self, seq_id, namespace, alias):
"""associate a namespaced alias with a sequence
Alias association with sequences is idempotent: duplicate
associations are discarded silently.
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
log_pfx = "store({q},{n},{a})".format(n=namespace, a=alias, q=seq_id)
try:
c = self._db.execute("insert into seqalias (seq_id, namespace, alias) values (?, ?, ?)", (seq_id, namespace,
alias))
# success => new record
return c.lastrowid
except sqlite3.IntegrityError:
pass
# IntegrityError fall-through
# existing record is guaranteed to exist uniquely; fetchone() should always succeed
current_rec = self.find_aliases(namespace=namespace, alias=alias).fetchone()
# if seq_id matches current record, it's a duplicate (seq_id, namespace, alias) tuple
# and we return current record
if current_rec["seq_id"] == seq_id:
_logger.debug(log_pfx + ": duplicate record")
return current_rec["seqalias_id"]
# otherwise, we're reassigning; deprecate old record, then retry
_logger.debug(log_pfx + ": collision; deprecating {s1}".format(s1=current_rec["seq_id"]))
self._db.execute("update seqalias set is_current = 0 where seqalias_id = ?", [current_rec["seqalias_id"]])
return self.store_alias(seq_id, namespace, alias)
# ############################################################################
# Internal methods
def _dump_aliases(self): # pragma: no cover
import prettytable
fields = "seqalias_id seq_id namespace alias added is_current".split()
pt = prettytable.PrettyTable(field_names=fields)
for r in self._db.execute("select * from seqalias"):
pt.add_row([r[f] for f in fields])
print(pt)
def _upgrade_db(self):
"""upgrade db using scripts for specified (current) schema version"""
migration_path = "_data/migrations"
sqlite3.connect(self._db_path).close() # ensure that it exists
db_url = "sqlite:///" + self._db_path
backend = yoyo.get_backend(db_url)
migration_dir = pkg_resources.resource_filename(__package__, migration_path)
migrations = yoyo.read_migrations(migration_dir)
assert len(migrations) > 0, "no migration scripts found -- wrong migraion path for " + __package__
migrations_to_apply = backend.to_apply(migrations)
backend.apply_migrations(migrations_to_apply)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqaliasdb/seqaliasdb.py | SeqAliasDB.store_alias | python | def store_alias(self, seq_id, namespace, alias):
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
log_pfx = "store({q},{n},{a})".format(n=namespace, a=alias, q=seq_id)
try:
c = self._db.execute("insert into seqalias (seq_id, namespace, alias) values (?, ?, ?)", (seq_id, namespace,
alias))
# success => new record
return c.lastrowid
except sqlite3.IntegrityError:
pass
# IntegrityError fall-through
# existing record is guaranteed to exist uniquely; fetchone() should always succeed
current_rec = self.find_aliases(namespace=namespace, alias=alias).fetchone()
# if seq_id matches current record, it's a duplicate (seq_id, namespace, alias) tuple
# and we return current record
if current_rec["seq_id"] == seq_id:
_logger.debug(log_pfx + ": duplicate record")
return current_rec["seqalias_id"]
# otherwise, we're reassigning; deprecate old record, then retry
_logger.debug(log_pfx + ": collision; deprecating {s1}".format(s1=current_rec["seq_id"]))
self._db.execute("update seqalias set is_current = 0 where seqalias_id = ?", [current_rec["seqalias_id"]])
return self.store_alias(seq_id, namespace, alias) | associate a namespaced alias with a sequence
Alias association with sequences is idempotent: duplicate
associations are discarded silently. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqaliasdb/seqaliasdb.py#L123-L157 | null | class SeqAliasDB(object):
"""Implements a sqlite database of sequence aliases
"""
def __init__(self, db_path, writeable=False, translate_ncbi_namespace=False, check_same_thread=True):
self._db_path = db_path
self._db = None
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
if self._writeable:
self._upgrade_db()
self._db = sqlite3.connect(self._db_path, check_same_thread=check_same_thread)
schema_version = self.schema_version()
self._db.row_factory = sqlite3.Row
# if we're not at the expected schema version for this code, bail
if schema_version != expected_schema_version: # pragma: no cover
raise RuntimeError("Upgrade required: Database schema"
"version is {} and code expects {}".format(schema_version, expected_schema_version))
# ############################################################################
# Special methods
def __contains__(self, seq_id):
c = self._db.execute("select exists(select 1 from seqalias where seq_id = ? limit 1) as ex",
(seq_id, )).fetchone()
return True if c["ex"] else False
# ############################################################################
# Public methods
def commit(self):
if self._writeable:
self._db.commit()
def fetch_aliases(self, seq_id, current_only=True, translate_ncbi_namespace=None):
"""return list of alias annotation records (dicts) for a given seq_id"""
return [dict(r) for r in self.find_aliases(seq_id=seq_id,
current_only=current_only,
translate_ncbi_namespace=translate_ncbi_namespace)]
def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None):
"""returns iterator over alias annotation records that match criteria
The arguments, all optional, restrict the records that are
returned. Without arguments, all aliases are returned.
If arguments contain %, the `like` comparison operator is
used. Otherwise arguments must match exactly.
"""
clauses = []
params = []
def eq_or_like(s):
return "like" if "%" in s else "="
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
if alias is not None:
clauses += ["alias {} ?".format(eq_or_like(alias))]
params += [alias]
if namespace is not None:
# Switch to using RefSeq for RefSeq accessions
# issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups
# issue #31: later breaking change, translate database
if namespace == "RefSeq":
namespace = "NCBI"
clauses += ["namespace {} ?".format(eq_or_like(namespace))]
params += [namespace]
if seq_id is not None:
clauses += ["seq_id {} ?".format(eq_or_like(seq_id))]
params += [seq_id]
if current_only:
clauses += ["is_current = 1"]
cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"]
if translate_ncbi_namespace:
cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"]
else:
cols += ["namespace"]
sql = "select {cols} from seqalias".format(cols=", ".join(cols))
if clauses:
sql += " where " + " and ".join("(" + c + ")" for c in clauses)
sql += " order by seq_id, namespace, alias"
_logger.debug("Executing: " + sql)
return self._db.execute(sql, params)
def schema_version(self):
"""return schema version as integer"""
return int(self._db.execute("select value from meta where key = 'schema version'").fetchone()[0])
def stats(self):
sql = """select count(*) as n_aliases, sum(is_current) as n_current,
count(distinct seq_id) as n_sequences, count(distinct namespace) as
n_namespaces, min(added) as min_ts, max(added) as max_ts from
seqalias;"""
return dict(self._db.execute(sql).fetchone())
# ############################################################################
# Internal methods
def _dump_aliases(self): # pragma: no cover
import prettytable
fields = "seqalias_id seq_id namespace alias added is_current".split()
pt = prettytable.PrettyTable(field_names=fields)
for r in self._db.execute("select * from seqalias"):
pt.add_row([r[f] for f in fields])
print(pt)
def _upgrade_db(self):
"""upgrade db using scripts for specified (current) schema version"""
migration_path = "_data/migrations"
sqlite3.connect(self._db_path).close() # ensure that it exists
db_url = "sqlite:///" + self._db_path
backend = yoyo.get_backend(db_url)
migration_dir = pkg_resources.resource_filename(__package__, migration_path)
migrations = yoyo.read_migrations(migration_dir)
assert len(migrations) > 0, "no migration scripts found -- wrong migraion path for " + __package__
migrations_to_apply = backend.to_apply(migrations)
backend.apply_migrations(migrations_to_apply)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/seqaliasdb/seqaliasdb.py | SeqAliasDB._upgrade_db | python | def _upgrade_db(self):
migration_path = "_data/migrations"
sqlite3.connect(self._db_path).close() # ensure that it exists
db_url = "sqlite:///" + self._db_path
backend = yoyo.get_backend(db_url)
migration_dir = pkg_resources.resource_filename(__package__, migration_path)
migrations = yoyo.read_migrations(migration_dir)
assert len(migrations) > 0, "no migration scripts found -- wrong migraion path for " + __package__
migrations_to_apply = backend.to_apply(migrations)
backend.apply_migrations(migrations_to_apply) | upgrade db using scripts for specified (current) schema version | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/seqaliasdb/seqaliasdb.py#L173-L183 | null | class SeqAliasDB(object):
"""Implements a sqlite database of sequence aliases
"""
def __init__(self, db_path, writeable=False, translate_ncbi_namespace=False, check_same_thread=True):
self._db_path = db_path
self._db = None
self._writeable = writeable
self.translate_ncbi_namespace = translate_ncbi_namespace
if self._writeable:
self._upgrade_db()
self._db = sqlite3.connect(self._db_path, check_same_thread=check_same_thread)
schema_version = self.schema_version()
self._db.row_factory = sqlite3.Row
# if we're not at the expected schema version for this code, bail
if schema_version != expected_schema_version: # pragma: no cover
raise RuntimeError("Upgrade required: Database schema"
"version is {} and code expects {}".format(schema_version, expected_schema_version))
# ############################################################################
# Special methods
def __contains__(self, seq_id):
c = self._db.execute("select exists(select 1 from seqalias where seq_id = ? limit 1) as ex",
(seq_id, )).fetchone()
return True if c["ex"] else False
# ############################################################################
# Public methods
def commit(self):
if self._writeable:
self._db.commit()
def fetch_aliases(self, seq_id, current_only=True, translate_ncbi_namespace=None):
"""return list of alias annotation records (dicts) for a given seq_id"""
return [dict(r) for r in self.find_aliases(seq_id=seq_id,
current_only=current_only,
translate_ncbi_namespace=translate_ncbi_namespace)]
def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None):
"""returns iterator over alias annotation records that match criteria
The arguments, all optional, restrict the records that are
returned. Without arguments, all aliases are returned.
If arguments contain %, the `like` comparison operator is
used. Otherwise arguments must match exactly.
"""
clauses = []
params = []
def eq_or_like(s):
return "like" if "%" in s else "="
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
if alias is not None:
clauses += ["alias {} ?".format(eq_or_like(alias))]
params += [alias]
if namespace is not None:
# Switch to using RefSeq for RefSeq accessions
# issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups
# issue #31: later breaking change, translate database
if namespace == "RefSeq":
namespace = "NCBI"
clauses += ["namespace {} ?".format(eq_or_like(namespace))]
params += [namespace]
if seq_id is not None:
clauses += ["seq_id {} ?".format(eq_or_like(seq_id))]
params += [seq_id]
if current_only:
clauses += ["is_current = 1"]
cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"]
if translate_ncbi_namespace:
cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"]
else:
cols += ["namespace"]
sql = "select {cols} from seqalias".format(cols=", ".join(cols))
if clauses:
sql += " where " + " and ".join("(" + c + ")" for c in clauses)
sql += " order by seq_id, namespace, alias"
_logger.debug("Executing: " + sql)
return self._db.execute(sql, params)
def schema_version(self):
"""return schema version as integer"""
return int(self._db.execute("select value from meta where key = 'schema version'").fetchone()[0])
def stats(self):
sql = """select count(*) as n_aliases, sum(is_current) as n_current,
count(distinct seq_id) as n_sequences, count(distinct namespace) as
n_namespaces, min(added) as min_ts, max(added) as max_ts from
seqalias;"""
return dict(self._db.execute(sql).fetchone())
def store_alias(self, seq_id, namespace, alias):
"""associate a namespaced alias with a sequence
Alias association with sequences is idempotent: duplicate
associations are discarded silently.
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
log_pfx = "store({q},{n},{a})".format(n=namespace, a=alias, q=seq_id)
try:
c = self._db.execute("insert into seqalias (seq_id, namespace, alias) values (?, ?, ?)", (seq_id, namespace,
alias))
# success => new record
return c.lastrowid
except sqlite3.IntegrityError:
pass
# IntegrityError fall-through
# existing record is guaranteed to exist uniquely; fetchone() should always succeed
current_rec = self.find_aliases(namespace=namespace, alias=alias).fetchone()
# if seq_id matches current record, it's a duplicate (seq_id, namespace, alias) tuple
# and we return current record
if current_rec["seq_id"] == seq_id:
_logger.debug(log_pfx + ": duplicate record")
return current_rec["seqalias_id"]
# otherwise, we're reassigning; deprecate old record, then retry
_logger.debug(log_pfx + ": collision; deprecating {s1}".format(s1=current_rec["seq_id"]))
self._db.execute("update seqalias set is_current = 0 where seqalias_id = ?", [current_rec["seqalias_id"]])
return self.store_alias(seq_id, namespace, alias)
# ############################################################################
# Internal methods
def _dump_aliases(self): # pragma: no cover
import prettytable
fields = "seqalias_id seq_id namespace alias added is_current".split()
pt = prettytable.PrettyTable(field_names=fields)
for r in self._db.execute("select * from seqalias"):
pt.add_row([r[f] for f in fields])
print(pt)
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/py2compat/_commonpath.py | commonpath | python | def commonpath(paths):
assert os.sep == "/", "tested only on slash-delimited paths"
split_re = re.compile(os.sep + "+")
if len(paths) == 0:
raise ValueError("commonpath() arg is an empty sequence")
spaths = [p.rstrip(os.sep) for p in paths]
splitpaths = [split_re.split(p) for p in spaths]
if all(p.startswith(os.sep) for p in paths):
abs_paths = True
splitpaths = [p[1:] for p in splitpaths]
elif all(not p.startswith(os.sep) for p in paths):
abs_paths = False
else:
raise ValueError("Can't mix absolute and relative paths")
splitpaths0 = splitpaths[0]
splitpaths1n = splitpaths[1:]
min_length = min(len(p) for p in splitpaths)
equal = [i for i in range(min_length) if all(splitpaths0[i] == sp[i] for sp in splitpaths1n)]
max_equal = max(equal or [-1])
commonelems = splitpaths0[:max_equal + 1]
commonpath = os.sep.join(commonelems)
return (os.sep if abs_paths else '') + commonpath | py2 compatible version of py3's os.path.commonpath
>>> commonpath([""])
''
>>> commonpath(["/"])
'/'
>>> commonpath(["/a"])
'/a'
>>> commonpath(["/a//"])
'/a'
>>> commonpath(["/a", "/a"])
'/a'
>>> commonpath(["/a/b", "/a"])
'/a'
>>> commonpath(["/a/b", "/a/b"])
'/a/b'
>>> commonpath(["/a/b/c", "/a/b/d"])
'/a/b'
>>> commonpath(["/a/b/c", "/a/b/d", "//a//b//e//"])
'/a/b'
>>> commonpath([])
Traceback (most recent call last):
...
ValueError: commonpath() arg is an empty sequence
>>> commonpath(["/absolute/path", "relative/path"])
Traceback (most recent call last):
...
ValueError: (Can't mix absolute and relative paths") | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/py2compat/_commonpath.py#L5-L58 | null | import os
import re
if __name__ == "__main__": # pragma: no cover
def cmp1(pathlist):
bi = os.path.commonpath(pathlist)
c = commonpath(pathlist)
print("{eq:5s} {bi:20s} {c:20s} {paths}".format(eq=str(bi == c), bi=bi, c=c, paths=", ".join(pathlist)))
paths = ["/a/b/c", "/a/b/c//", "///a///b///c", "/a/b/d", "/a/b", "/a", "/"]
paths = ["/a/b/c", "/a/b/c//", "///a///b///c", "/a/b/d", "/a/b", "/a", "/"]
for i in range(0, len(paths)):
cmp1(paths[:i + 1])
paths2 = [p.lstrip("/") for p in paths]
for i in range(1, len(paths2)):
cmp1(paths2[:i])
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/cli.py | add_assembly_names | python | def add_assembly_names(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format(
an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name))
continue
eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map]
_logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s["refseq_ac"]]
aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]]
if "genbank_ac" in s and s["genbank_ac"]:
aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id))
sr.commit() | add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10. | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/cli.py#L200-L265 | [
"def commit(self):\n self.sequences.commit()\n self.aliases.commit()\n if self._pending_sequences + self._pending_aliases > 0:\n _logger.info(\"Committed {} sequences ({} residues) and {} aliases\".format(\n self._pending_sequences, self._pending_sequences_len, self._pending_aliases))\n self._pending_sequences = 0\n self._pending_sequences_len = 0\n self._pending_aliases = 0\n"
] | """command line interface to a local SeqRepo repository
SeqRepo is a non-redundant, compressed, journalled, file-based storage
for biological sequences
https://github.com/biocommons/biocommons.seqrepo
Try::
$ seqrepo --help
"""
from __future__ import division, print_function
import argparse
import datetime
import io
import itertools
import logging
import os
import pprint
import re
import shutil
import stat
import sys
import subprocess
import tempfile
import bioutils.assemblies
import bioutils.seqfetcher
import six
import tqdm
from . import __version__, SeqRepo
from .py2compat import commonpath, gzip_open_encoded, makedirs
from .fastaiter import FastaIter
SEQREPO_ROOT_DIR = os.environ.get("SEQREPO_ROOT_DIR", "/usr/local/share/seqrepo")
DEFAULT_INSTANCE_NAME_RW = "master"
DEFAULT_INSTANCE_NAME_RO = "latest"
instance_name_new_re = re.compile(r"^201\d-\d\d-\d\d$") # smells like a new datestamp, 2017-01-17
instance_name_old_re = re.compile(r"^201\d\d\d\d\d$") # smells like an old datestamp, 20170117
instance_name_re = re.compile(r"^201\d-?\d\d-?\d\d$") # smells like a datestamp, 20170117 or 2017-01-17
_logger = logging.getLogger(__name__)
def _get_remote_instances(opts):
line_re = re.compile(r"d[-rwx]{9}\s+[\d,]+ \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} (.+)")
rsync_cmd = [opts.rsync_exe, "--no-motd", "--copy-dirlinks",
opts.remote_host + "::seqrepo"]
_logger.debug("Executing `" + " ".join(rsync_cmd) + "`")
lines = subprocess.check_output(rsync_cmd).decode().splitlines()[1:]
dirs = (m.group(1) for m in (line_re.match(l) for l in lines) if m)
return sorted(list(filter(instance_name_new_re.match, dirs)))
def _get_local_instances(opts):
return sorted(list(filter(instance_name_re.match, os.listdir(opts.root_directory))))
def _latest_instance(opts):
instances = _get_local_instances(opts)
return instances[-1] if instances else None
def _latest_instance_path(opts):
li = _latest_instance(opts)
return os.path.join(opts.root_directory, li) if li else None
def parse_arguments():
top_p = argparse.ArgumentParser(
description=__doc__.split("\n\n")[0],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="seqrepo " + __version__ + ". See https://github.com/biocommons/biocommons.seqrepo for more information")
top_p.add_argument("--dry-run", "-n", default=False, action="store_true")
top_p.add_argument("--remote-host", default="dl.biocommons.org", help="rsync server host")
top_p.add_argument("--root-directory", "-r", default=SEQREPO_ROOT_DIR, help="seqrepo root directory (SEQREPO_ROOT_DIR)")
top_p.add_argument("--rsync-exe", default="/usr/bin/rsync", help="path to rsync executable")
top_p.add_argument("--verbose", "-v", action="count", default=0, help="be verbose; multiple accepted")
top_p.add_argument("--version", action="version", version=__version__)
# dest and required bits are to work around a bug in the Python 3 version of argparse
# when no subcommands are provided
# https://stackoverflow.com/questions/22990977/why-does-this-argparse-code-behave-differently-between-python-2-and-3
# http://bugs.python.org/issue9253#msg186387
subparsers = top_p.add_subparsers(title="subcommands", dest="_subcommands")
subparsers.required = True
# add-assembly-names
ap = subparsers.add_parser(
"add-assembly-names", help="add assembly aliases (from bioutils.assemblies) to existing sequences")
ap.set_defaults(func=add_assembly_names)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
ap.add_argument(
"--partial-load", "-p", default=False, action="store_true", help="assign assembly aliases even if some sequences are missing")
ap.add_argument(
"--reload-all", "-r", default=False, action="store_true", help="reload all assemblies, not just missing ones")
# export
ap = subparsers.add_parser("export", help="export sequences")
ap.set_defaults(func=export)
ap.add_argument("--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RO, help="instance name")
# fetch-load
ap = subparsers.add_parser("fetch-load", help="fetch remote sequences by accession and load them (low-throughput!)")
ap.set_defaults(func=fetch_load)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
ap.add_argument(
"accessions",
nargs="+",
help="accessions (NCBI or Ensembl)", )
ap.add_argument(
"--namespace",
"-n",
required=True,
help="namespace name (e.g., NCBI, Ensembl, LRG)", )
# init
ap = subparsers.add_parser("init", help="initialize seqrepo directory")
ap.set_defaults(func=init)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
# list-local-instances
ap = subparsers.add_parser("list-local-instances", help="list local seqrepo instances")
ap.set_defaults(func=list_local_instances)
# list-remote-instances
ap = subparsers.add_parser("list-remote-instances", help="list remote seqrepo instances")
ap.set_defaults(func=list_remote_instances)
# load
ap = subparsers.add_parser("load", help="load a single fasta file")
ap.set_defaults(func=load)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
ap.add_argument(
"fasta_files",
nargs="+",
help="fasta files to load (compressed okay)", )
ap.add_argument(
"--namespace",
"-n",
required=True,
help="namespace name (e.g., NCBI, Ensembl, LRG)", )
# pull
ap = subparsers.add_parser("pull", help="pull incremental update from seqrepo mirror")
ap.set_defaults(func=pull)
ap.add_argument("--instance-name", "-i", default=None, help="instance name")
ap.add_argument("--update-latest", "-l", default=False, action="store_true", help="set latest symlink to point to this instance")
# show-status
ap = subparsers.add_parser("show-status", help="show seqrepo status")
ap.set_defaults(func=show_status)
ap.add_argument("--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RO, help="instance name")
# snapshot
ap = subparsers.add_parser("snapshot", help="create a new read-only seqrepo snapshot")
ap.set_defaults(func=snapshot)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable")
ap.add_argument("--destination-name", "-d",
default=datetime.datetime.utcnow().strftime("%F"),
help="destination directory name (must not already exist)")
# start-shell
ap = subparsers.add_parser("start-shell", help="start interactive shell with initialized seqrepo")
ap.set_defaults(func=start_shell)
ap.add_argument("--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RO, help="instance name")
# upgrade
ap = subparsers.add_parser("upgrade", help="upgrade seqrepo database and directory")
ap.set_defaults(func=upgrade)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable")
# update digests
ap = subparsers.add_parser("update-digests", help="update computed digests in place")
ap.set_defaults(func=update_digests)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable")
# update latest (symlink)
ap = subparsers.add_parser("update-latest", help="create symlink `latest` to newest seqrepo instance")
ap.set_defaults(func=update_latest)
opts = top_p.parse_args()
return opts
############################################################################
def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format(
an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name))
continue
eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map]
_logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s["refseq_ac"]]
aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]]
if "genbank_ac" in s and s["genbank_ac"]:
aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id))
sr.commit()
def export(opts):
def convert_alias_records_to_ns_dict(records):
"""converts a set of alias db records to a dict like {ns: [aliases], ...}
aliases are lexicographicaly sorted
"""
records = sorted(records, key=lambda r: (r["namespace"], r["alias"]))
return {g: [r["alias"] for r in gi] for g, gi in itertools.groupby(records, key=lambda r: r["namespace"])}
def wrap_lines(seq, line_width):
for i in range(0, len(seq), line_width):
yield seq[i:i + line_width]
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir)
for srec, arecs in sr:
nsad = convert_alias_records_to_ns_dict(arecs)
aliases = ["{ns}:{a}".format(ns=ns, a=a) for ns, aliases in sorted(nsad.items()) for a in aliases]
print(">" + " ".join(aliases))
for l in wrap_lines(srec["seq"], 100):
print(l)
def fetch_load(opts):
disable_bar = _logger.getEffectiveLevel() < logging.WARNING
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
ac_bar = tqdm.tqdm(opts.accessions, unit="acs", disable=disable_bar)
for ac in ac_bar:
ac_bar.set_description(ac)
aliases_cur = sr.aliases.find_aliases(namespace=opts.namespace, alias=ac)
if aliases_cur.fetchone() is not None:
_logger.info("{ac} already in {sr}".format(ac=ac, sr=sr))
continue
seq = bioutils.seqfetcher.fetch_seq(ac)
aliases = [{"namespace": opts.namespace, "alias": ac}]
n_sa, n_aa = sr.store(seq, aliases)
sr.commit()
def init(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
if os.path.exists(seqrepo_dir) and len(os.listdir(seqrepo_dir)) > 0:
raise IOError("{seqrepo_dir} exists and is not empty".format(seqrepo_dir=seqrepo_dir))
sr = SeqRepo(seqrepo_dir, writeable=True) # flake8: noqa
def list_local_instances(opts):
instances = _get_local_instances(opts)
print("Local instances ({})".format(opts.root_directory))
for i in instances:
print(" " + i)
def list_remote_instances(opts):
instances = _get_remote_instances(opts)
print("Remote instances ({})".format(opts.remote_host))
for i in instances:
print(" " + i)
def load(opts):
if opts.namespace == "-":
raise RuntimeError("namespace == '-' is no longer supported")
disable_bar = _logger.getEffectiveLevel() < logging.WARNING
defline_re = re.compile(r"(?P<namespace>gi|ref)\|(?P<alias>[^|]+)")
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
n_seqs_seen = n_seqs_added = n_aliases_added = 0
fn_bar = tqdm.tqdm(opts.fasta_files, unit="file", disable=disable_bar)
for fn in fn_bar:
fn_bar.set_description(os.path.basename(fn))
if fn == "-":
fh = sys.stdin
elif fn.endswith(".gz") or fn.endswith(".bgz"):
fh = gzip_open_encoded(fn, encoding="ascii") # PY2BAGGAGE
else:
fh = io.open(fn, mode="rt", encoding="ascii")
_logger.info("Opened " + fn)
seq_bar = tqdm.tqdm(FastaIter(fh), unit=" seqs", disable=disable_bar, leave=False)
for rec_id, seq in seq_bar:
n_seqs_seen += 1
seq_bar.set_description("sequences: {nsa}/{nss} added/seen; aliases: {naa} added".format(
nss=n_seqs_seen, nsa=n_seqs_added, naa=n_aliases_added))
if opts.namespace == "NCBI" and "|" in rec_id:
# NCBI deflines may have multiple accessions, pipe-separated
aliases = [m.groupdict() for m in defline_re.finditer(rec_id)]
for a in aliases:
if a["namespace"] == "ref":
a["namespace"] = "NCBI"
else:
aliases = [{"namespace": opts.namespace, "alias": rec_id}]
n_sa, n_aa = sr.store(seq, aliases)
n_seqs_added += n_sa
n_aliases_added += n_aa
def pull(opts):
remote_instances = _get_remote_instances(opts)
if opts.instance_name:
instance_name = opts.instance_name
if instance_name not in remote_instances:
raise KeyError("{}: not in list of remote instance names".format(instance_name))
else:
instance_name = remote_instances[-1]
_logger.info("most recent seqrepo instance is " + instance_name)
local_instances = _get_local_instances(opts)
if instance_name in local_instances:
_logger.warning("{}: instance already exists; skipping".format(instance_name))
return
tmp_dir = tempfile.mkdtemp(dir=opts.root_directory, prefix=instance_name + ".")
os.rmdir(tmp_dir) # let rsync create it the directory
cmd = [opts.rsync_exe, "-aHP", "--no-motd"]
if local_instances:
latest_local_instance = local_instances[-1]
cmd += ["--link-dest=" + os.path.join(opts.root_directory, latest_local_instance) + "/"]
cmd += ["{h}::seqrepo/{i}/".format(h=opts.remote_host, i=instance_name), tmp_dir]
_logger.debug("Executing: " + " ".join(cmd))
if not opts.dry_run:
subprocess.check_call(cmd)
dst_dir = os.path.join(opts.root_directory, instance_name)
os.rename(tmp_dir, dst_dir)
_logger.info("{}: successfully updated ({})".format(instance_name, dst_dir))
if opts.update_latest:
update_latest(opts, instance_name)
def show_status(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
tot_size = sum(
os.path.getsize(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk(seqrepo_dir) for filename in filenames)
sr = SeqRepo(seqrepo_dir)
print("seqrepo {version}".format(version=__version__))
print("instance directory: {sr._root_dir}, {ts:.1f} GB".format(sr=sr, ts=tot_size / 1e9))
print("backends: fastadir (schema {fd_v}), seqaliasdb (schema {sa_v}) ".format(
fd_v=sr.sequences.schema_version(), sa_v=sr.aliases.schema_version()))
print("sequences: {ss[n_sequences]} sequences, {ss[tot_length]} residues, {ss[n_files]} files".format(
ss=sr.sequences.stats()))
print(
"aliases: {sa[n_aliases]} aliases, {sa[n_current]} current, {sa[n_namespaces]} namespaces, {sa[n_sequences]} sequences".
format(sa=sr.aliases.stats()))
return sr
def snapshot(opts):
"""snapshot a seqrepo data directory by hardlinking sequence files,
copying sqlite databases, and remove write permissions from directories
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
dst_dir = opts.destination_name
if not dst_dir.startswith("/"):
# interpret dst_dir as relative to parent dir of seqrepo_dir
dst_dir = os.path.join(opts.root_directory, dst_dir)
src_dir = os.path.realpath(seqrepo_dir)
dst_dir = os.path.realpath(dst_dir)
if commonpath([src_dir, dst_dir]).startswith(src_dir):
raise RuntimeError("Cannot nest seqrepo directories " "({} is within {})".format(dst_dir, src_dir))
if os.path.exists(dst_dir):
raise IOError(dst_dir + ": File exists")
tmp_dir = tempfile.mkdtemp(prefix=dst_dir + ".")
_logger.debug("src_dir = " + src_dir)
_logger.debug("dst_dir = " + dst_dir)
_logger.debug("tmp_dir = " + tmp_dir)
# TODO: cleanup of tmpdir on failure
makedirs(tmp_dir, exist_ok=True)
wd = os.getcwd()
os.chdir(src_dir)
# make destination directories (walk is top-down)
for rp in (os.path.join(dirpath, dirname) for dirpath, dirnames, _ in os.walk(".") for dirname in dirnames):
dp = os.path.join(tmp_dir, rp)
os.mkdir(dp)
# hard link sequence files
for rp in (os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(".") for filename in filenames
if ".bgz" in filename):
dp = os.path.join(tmp_dir, rp)
os.link(rp, dp)
# copy sqlite databases
for rp in ["aliases.sqlite3", "sequences/db.sqlite3"]:
dp = os.path.join(tmp_dir, rp)
shutil.copyfile(rp, dp)
# recursively drop write perms on snapshot
mode_aw = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
def _drop_write(p):
mode = os.lstat(p).st_mode
new_mode = mode & ~mode_aw
os.chmod(p, new_mode)
for dp in (os.path.join(dirpath, dirent)
for dirpath, dirnames, filenames in os.walk(tmp_dir) for dirent in dirnames + filenames):
_drop_write(dp)
_drop_write(tmp_dir)
os.rename(tmp_dir, dst_dir)
_logger.info("snapshot created in " + dst_dir)
os.chdir(wd)
def start_shell(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir)
import IPython
IPython.embed(header="\n".join([
"seqrepo (https://github.com/biocommons/biocommons.seqrepo/)", "version: " + __version__,
"instance path: " + seqrepo_dir
]))
def upgrade(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
print("upgraded to schema version {}".format(sr.seqinfo.schema_version()))
def update_digests(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
for srec in tqdm.tqdm(sr.sequences):
sr._update_digest_aliases(srec["seq_id"], srec["seq"])
def update_latest(opts, mri=None):
if not mri:
instances = _get_local_instances(opts)
if not instances:
_logger.error("No seqrepo instances in {opts.root_directory}".format(opts=opts))
return
mri = instances[-1]
dst = os.path.join(opts.root_directory, "latest")
try:
os.unlink(dst)
except OSError:
pass
os.symlink(mri, dst)
_logger.info("Linked `latest` -> `{}`".format(mri))
def main():
opts = parse_arguments()
verbose_log_level = (logging.WARN if opts.verbose == 0 else
logging.INFO if opts.verbose == 1 else
logging.DEBUG)
logging.basicConfig(level=verbose_log_level)
opts.func(opts)
if __name__ == "__main__":
main()
|
biocommons/biocommons.seqrepo | biocommons/seqrepo/cli.py | snapshot | python | def snapshot(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
dst_dir = opts.destination_name
if not dst_dir.startswith("/"):
# interpret dst_dir as relative to parent dir of seqrepo_dir
dst_dir = os.path.join(opts.root_directory, dst_dir)
src_dir = os.path.realpath(seqrepo_dir)
dst_dir = os.path.realpath(dst_dir)
if commonpath([src_dir, dst_dir]).startswith(src_dir):
raise RuntimeError("Cannot nest seqrepo directories " "({} is within {})".format(dst_dir, src_dir))
if os.path.exists(dst_dir):
raise IOError(dst_dir + ": File exists")
tmp_dir = tempfile.mkdtemp(prefix=dst_dir + ".")
_logger.debug("src_dir = " + src_dir)
_logger.debug("dst_dir = " + dst_dir)
_logger.debug("tmp_dir = " + tmp_dir)
# TODO: cleanup of tmpdir on failure
makedirs(tmp_dir, exist_ok=True)
wd = os.getcwd()
os.chdir(src_dir)
# make destination directories (walk is top-down)
for rp in (os.path.join(dirpath, dirname) for dirpath, dirnames, _ in os.walk(".") for dirname in dirnames):
dp = os.path.join(tmp_dir, rp)
os.mkdir(dp)
# hard link sequence files
for rp in (os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(".") for filename in filenames
if ".bgz" in filename):
dp = os.path.join(tmp_dir, rp)
os.link(rp, dp)
# copy sqlite databases
for rp in ["aliases.sqlite3", "sequences/db.sqlite3"]:
dp = os.path.join(tmp_dir, rp)
shutil.copyfile(rp, dp)
# recursively drop write perms on snapshot
mode_aw = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
def _drop_write(p):
mode = os.lstat(p).st_mode
new_mode = mode & ~mode_aw
os.chmod(p, new_mode)
for dp in (os.path.join(dirpath, dirent)
for dirpath, dirnames, filenames in os.walk(tmp_dir) for dirent in dirnames + filenames):
_drop_write(dp)
_drop_write(tmp_dir)
os.rename(tmp_dir, dst_dir)
_logger.info("snapshot created in " + dst_dir)
os.chdir(wd) | snapshot a seqrepo data directory by hardlinking sequence files,
copying sqlite databases, and remove write permissions from directories | train | https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/cli.py#L423-L486 | [
"def commonpath(paths):\n \"\"\"py2 compatible version of py3's os.path.commonpath\n\n >>> commonpath([\"\"])\n ''\n >>> commonpath([\"/\"])\n '/'\n >>> commonpath([\"/a\"])\n '/a'\n >>> commonpath([\"/a//\"])\n '/a'\n >>> commonpath([\"/a\", \"/a\"])\n '/a'\n >>> commonpath([\"/a/b\", \"/a\"])\n '/a'\n >>> commonpath([\"/a/b\", \"/a/b\"])\n '/a/b'\n >>> commonpath([\"/a/b/c\", \"/a/b/d\"])\n '/a/b'\n >>> commonpath([\"/a/b/c\", \"/a/b/d\", \"//a//b//e//\"])\n '/a/b'\n >>> commonpath([])\n Traceback (most recent call last):\n ...\n ValueError: commonpath() arg is an empty sequence\n >>> commonpath([\"/absolute/path\", \"relative/path\"])\n Traceback (most recent call last):\n ... \n ValueError: (Can't mix absolute and relative paths\")\n \"\"\"\n assert os.sep == \"/\", \"tested only on slash-delimited paths\"\n split_re = re.compile(os.sep + \"+\")\n\n if len(paths) == 0:\n raise ValueError(\"commonpath() arg is an empty sequence\")\n\n spaths = [p.rstrip(os.sep) for p in paths]\n splitpaths = [split_re.split(p) for p in spaths]\n if all(p.startswith(os.sep) for p in paths):\n abs_paths = True\n splitpaths = [p[1:] for p in splitpaths]\n elif all(not p.startswith(os.sep) for p in paths):\n abs_paths = False\n else:\n raise ValueError(\"Can't mix absolute and relative paths\")\n\n splitpaths0 = splitpaths[0]\n splitpaths1n = splitpaths[1:]\n min_length = min(len(p) for p in splitpaths)\n equal = [i for i in range(min_length) if all(splitpaths0[i] == sp[i] for sp in splitpaths1n)]\n max_equal = max(equal or [-1])\n commonelems = splitpaths0[:max_equal + 1]\n commonpath = os.sep.join(commonelems)\n return (os.sep if abs_paths else '') + commonpath\n",
"def _drop_write(p):\n mode = os.lstat(p).st_mode\n new_mode = mode & ~mode_aw\n os.chmod(p, new_mode)\n"
] | """command line interface to a local SeqRepo repository
SeqRepo is a non-redundant, compressed, journalled, file-based storage
for biological sequences
https://github.com/biocommons/biocommons.seqrepo
Try::
$ seqrepo --help
"""
from __future__ import division, print_function
import argparse
import datetime
import io
import itertools
import logging
import os
import pprint
import re
import shutil
import stat
import sys
import subprocess
import tempfile
import bioutils.assemblies
import bioutils.seqfetcher
import six
import tqdm
from . import __version__, SeqRepo
from .py2compat import commonpath, gzip_open_encoded, makedirs
from .fastaiter import FastaIter
SEQREPO_ROOT_DIR = os.environ.get("SEQREPO_ROOT_DIR", "/usr/local/share/seqrepo")
DEFAULT_INSTANCE_NAME_RW = "master"
DEFAULT_INSTANCE_NAME_RO = "latest"
instance_name_new_re = re.compile(r"^201\d-\d\d-\d\d$") # smells like a new datestamp, 2017-01-17
instance_name_old_re = re.compile(r"^201\d\d\d\d\d$") # smells like an old datestamp, 20170117
instance_name_re = re.compile(r"^201\d-?\d\d-?\d\d$") # smells like a datestamp, 20170117 or 2017-01-17
_logger = logging.getLogger(__name__)
def _get_remote_instances(opts):
line_re = re.compile(r"d[-rwx]{9}\s+[\d,]+ \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} (.+)")
rsync_cmd = [opts.rsync_exe, "--no-motd", "--copy-dirlinks",
opts.remote_host + "::seqrepo"]
_logger.debug("Executing `" + " ".join(rsync_cmd) + "`")
lines = subprocess.check_output(rsync_cmd).decode().splitlines()[1:]
dirs = (m.group(1) for m in (line_re.match(l) for l in lines) if m)
return sorted(list(filter(instance_name_new_re.match, dirs)))
def _get_local_instances(opts):
return sorted(list(filter(instance_name_re.match, os.listdir(opts.root_directory))))
def _latest_instance(opts):
instances = _get_local_instances(opts)
return instances[-1] if instances else None
def _latest_instance_path(opts):
li = _latest_instance(opts)
return os.path.join(opts.root_directory, li) if li else None
def parse_arguments():
top_p = argparse.ArgumentParser(
description=__doc__.split("\n\n")[0],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="seqrepo " + __version__ + ". See https://github.com/biocommons/biocommons.seqrepo for more information")
top_p.add_argument("--dry-run", "-n", default=False, action="store_true")
top_p.add_argument("--remote-host", default="dl.biocommons.org", help="rsync server host")
top_p.add_argument("--root-directory", "-r", default=SEQREPO_ROOT_DIR, help="seqrepo root directory (SEQREPO_ROOT_DIR)")
top_p.add_argument("--rsync-exe", default="/usr/bin/rsync", help="path to rsync executable")
top_p.add_argument("--verbose", "-v", action="count", default=0, help="be verbose; multiple accepted")
top_p.add_argument("--version", action="version", version=__version__)
# dest and required bits are to work around a bug in the Python 3 version of argparse
# when no subcommands are provided
# https://stackoverflow.com/questions/22990977/why-does-this-argparse-code-behave-differently-between-python-2-and-3
# http://bugs.python.org/issue9253#msg186387
subparsers = top_p.add_subparsers(title="subcommands", dest="_subcommands")
subparsers.required = True
# add-assembly-names
ap = subparsers.add_parser(
"add-assembly-names", help="add assembly aliases (from bioutils.assemblies) to existing sequences")
ap.set_defaults(func=add_assembly_names)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
ap.add_argument(
"--partial-load", "-p", default=False, action="store_true", help="assign assembly aliases even if some sequences are missing")
ap.add_argument(
"--reload-all", "-r", default=False, action="store_true", help="reload all assemblies, not just missing ones")
# export
ap = subparsers.add_parser("export", help="export sequences")
ap.set_defaults(func=export)
ap.add_argument("--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RO, help="instance name")
# fetch-load
ap = subparsers.add_parser("fetch-load", help="fetch remote sequences by accession and load them (low-throughput!)")
ap.set_defaults(func=fetch_load)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
ap.add_argument(
"accessions",
nargs="+",
help="accessions (NCBI or Ensembl)", )
ap.add_argument(
"--namespace",
"-n",
required=True,
help="namespace name (e.g., NCBI, Ensembl, LRG)", )
# init
ap = subparsers.add_parser("init", help="initialize seqrepo directory")
ap.set_defaults(func=init)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
# list-local-instances
ap = subparsers.add_parser("list-local-instances", help="list local seqrepo instances")
ap.set_defaults(func=list_local_instances)
# list-remote-instances
ap = subparsers.add_parser("list-remote-instances", help="list remote seqrepo instances")
ap.set_defaults(func=list_remote_instances)
# load
ap = subparsers.add_parser("load", help="load a single fasta file")
ap.set_defaults(func=load)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable (i.e., not a snapshot)")
ap.add_argument(
"fasta_files",
nargs="+",
help="fasta files to load (compressed okay)", )
ap.add_argument(
"--namespace",
"-n",
required=True,
help="namespace name (e.g., NCBI, Ensembl, LRG)", )
# pull
ap = subparsers.add_parser("pull", help="pull incremental update from seqrepo mirror")
ap.set_defaults(func=pull)
ap.add_argument("--instance-name", "-i", default=None, help="instance name")
ap.add_argument("--update-latest", "-l", default=False, action="store_true", help="set latest symlink to point to this instance")
# show-status
ap = subparsers.add_parser("show-status", help="show seqrepo status")
ap.set_defaults(func=show_status)
ap.add_argument("--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RO, help="instance name")
# snapshot
ap = subparsers.add_parser("snapshot", help="create a new read-only seqrepo snapshot")
ap.set_defaults(func=snapshot)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable")
ap.add_argument("--destination-name", "-d",
default=datetime.datetime.utcnow().strftime("%F"),
help="destination directory name (must not already exist)")
# start-shell
ap = subparsers.add_parser("start-shell", help="start interactive shell with initialized seqrepo")
ap.set_defaults(func=start_shell)
ap.add_argument("--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RO, help="instance name")
# upgrade
ap = subparsers.add_parser("upgrade", help="upgrade seqrepo database and directory")
ap.set_defaults(func=upgrade)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable")
# update digests
ap = subparsers.add_parser("update-digests", help="update computed digests in place")
ap.set_defaults(func=update_digests)
ap.add_argument(
"--instance-name", "-i", default=DEFAULT_INSTANCE_NAME_RW, help="instance name; must be writeable")
# update latest (symlink)
ap = subparsers.add_parser("update-latest", help="create symlink `latest` to newest seqrepo instance")
ap.set_defaults(func=update_latest)
opts = top_p.parse_args()
return opts
############################################################################
def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format(
an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name))
continue
eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map]
_logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s["refseq_ac"]]
aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]]
if "genbank_ac" in s and s["genbank_ac"]:
aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id))
sr.commit()
def export(opts):
def convert_alias_records_to_ns_dict(records):
"""converts a set of alias db records to a dict like {ns: [aliases], ...}
aliases are lexicographicaly sorted
"""
records = sorted(records, key=lambda r: (r["namespace"], r["alias"]))
return {g: [r["alias"] for r in gi] for g, gi in itertools.groupby(records, key=lambda r: r["namespace"])}
def wrap_lines(seq, line_width):
for i in range(0, len(seq), line_width):
yield seq[i:i + line_width]
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir)
for srec, arecs in sr:
nsad = convert_alias_records_to_ns_dict(arecs)
aliases = ["{ns}:{a}".format(ns=ns, a=a) for ns, aliases in sorted(nsad.items()) for a in aliases]
print(">" + " ".join(aliases))
for l in wrap_lines(srec["seq"], 100):
print(l)
def fetch_load(opts):
disable_bar = _logger.getEffectiveLevel() < logging.WARNING
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
ac_bar = tqdm.tqdm(opts.accessions, unit="acs", disable=disable_bar)
for ac in ac_bar:
ac_bar.set_description(ac)
aliases_cur = sr.aliases.find_aliases(namespace=opts.namespace, alias=ac)
if aliases_cur.fetchone() is not None:
_logger.info("{ac} already in {sr}".format(ac=ac, sr=sr))
continue
seq = bioutils.seqfetcher.fetch_seq(ac)
aliases = [{"namespace": opts.namespace, "alias": ac}]
n_sa, n_aa = sr.store(seq, aliases)
sr.commit()
def init(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
if os.path.exists(seqrepo_dir) and len(os.listdir(seqrepo_dir)) > 0:
raise IOError("{seqrepo_dir} exists and is not empty".format(seqrepo_dir=seqrepo_dir))
sr = SeqRepo(seqrepo_dir, writeable=True) # flake8: noqa
def list_local_instances(opts):
instances = _get_local_instances(opts)
print("Local instances ({})".format(opts.root_directory))
for i in instances:
print(" " + i)
def list_remote_instances(opts):
instances = _get_remote_instances(opts)
print("Remote instances ({})".format(opts.remote_host))
for i in instances:
print(" " + i)
def load(opts):
if opts.namespace == "-":
raise RuntimeError("namespace == '-' is no longer supported")
disable_bar = _logger.getEffectiveLevel() < logging.WARNING
defline_re = re.compile(r"(?P<namespace>gi|ref)\|(?P<alias>[^|]+)")
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
n_seqs_seen = n_seqs_added = n_aliases_added = 0
fn_bar = tqdm.tqdm(opts.fasta_files, unit="file", disable=disable_bar)
for fn in fn_bar:
fn_bar.set_description(os.path.basename(fn))
if fn == "-":
fh = sys.stdin
elif fn.endswith(".gz") or fn.endswith(".bgz"):
fh = gzip_open_encoded(fn, encoding="ascii") # PY2BAGGAGE
else:
fh = io.open(fn, mode="rt", encoding="ascii")
_logger.info("Opened " + fn)
seq_bar = tqdm.tqdm(FastaIter(fh), unit=" seqs", disable=disable_bar, leave=False)
for rec_id, seq in seq_bar:
n_seqs_seen += 1
seq_bar.set_description("sequences: {nsa}/{nss} added/seen; aliases: {naa} added".format(
nss=n_seqs_seen, nsa=n_seqs_added, naa=n_aliases_added))
if opts.namespace == "NCBI" and "|" in rec_id:
# NCBI deflines may have multiple accessions, pipe-separated
aliases = [m.groupdict() for m in defline_re.finditer(rec_id)]
for a in aliases:
if a["namespace"] == "ref":
a["namespace"] = "NCBI"
else:
aliases = [{"namespace": opts.namespace, "alias": rec_id}]
n_sa, n_aa = sr.store(seq, aliases)
n_seqs_added += n_sa
n_aliases_added += n_aa
def pull(opts):
remote_instances = _get_remote_instances(opts)
if opts.instance_name:
instance_name = opts.instance_name
if instance_name not in remote_instances:
raise KeyError("{}: not in list of remote instance names".format(instance_name))
else:
instance_name = remote_instances[-1]
_logger.info("most recent seqrepo instance is " + instance_name)
local_instances = _get_local_instances(opts)
if instance_name in local_instances:
_logger.warning("{}: instance already exists; skipping".format(instance_name))
return
tmp_dir = tempfile.mkdtemp(dir=opts.root_directory, prefix=instance_name + ".")
os.rmdir(tmp_dir) # let rsync create it the directory
cmd = [opts.rsync_exe, "-aHP", "--no-motd"]
if local_instances:
latest_local_instance = local_instances[-1]
cmd += ["--link-dest=" + os.path.join(opts.root_directory, latest_local_instance) + "/"]
cmd += ["{h}::seqrepo/{i}/".format(h=opts.remote_host, i=instance_name), tmp_dir]
_logger.debug("Executing: " + " ".join(cmd))
if not opts.dry_run:
subprocess.check_call(cmd)
dst_dir = os.path.join(opts.root_directory, instance_name)
os.rename(tmp_dir, dst_dir)
_logger.info("{}: successfully updated ({})".format(instance_name, dst_dir))
if opts.update_latest:
update_latest(opts, instance_name)
def show_status(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
tot_size = sum(
os.path.getsize(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk(seqrepo_dir) for filename in filenames)
sr = SeqRepo(seqrepo_dir)
print("seqrepo {version}".format(version=__version__))
print("instance directory: {sr._root_dir}, {ts:.1f} GB".format(sr=sr, ts=tot_size / 1e9))
print("backends: fastadir (schema {fd_v}), seqaliasdb (schema {sa_v}) ".format(
fd_v=sr.sequences.schema_version(), sa_v=sr.aliases.schema_version()))
print("sequences: {ss[n_sequences]} sequences, {ss[tot_length]} residues, {ss[n_files]} files".format(
ss=sr.sequences.stats()))
print(
"aliases: {sa[n_aliases]} aliases, {sa[n_current]} current, {sa[n_namespaces]} namespaces, {sa[n_sequences]} sequences".
format(sa=sr.aliases.stats()))
return sr
def start_shell(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir)
import IPython
IPython.embed(header="\n".join([
"seqrepo (https://github.com/biocommons/biocommons.seqrepo/)", "version: " + __version__,
"instance path: " + seqrepo_dir
]))
def upgrade(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
print("upgraded to schema version {}".format(sr.seqinfo.schema_version()))
def update_digests(opts):
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
for srec in tqdm.tqdm(sr.sequences):
sr._update_digest_aliases(srec["seq_id"], srec["seq"])
def update_latest(opts, mri=None):
if not mri:
instances = _get_local_instances(opts)
if not instances:
_logger.error("No seqrepo instances in {opts.root_directory}".format(opts=opts))
return
mri = instances[-1]
dst = os.path.join(opts.root_directory, "latest")
try:
os.unlink(dst)
except OSError:
pass
os.symlink(mri, dst)
_logger.info("Linked `latest` -> `{}`".format(mri))
def main():
opts = parse_arguments()
verbose_log_level = (logging.WARN if opts.verbose == 0 else
logging.INFO if opts.verbose == 1 else
logging.DEBUG)
logging.basicConfig(level=verbose_log_level)
opts.func(opts)
if __name__ == "__main__":
main()
|
chrippa/python-librtmp | librtmp/packet.py | RTMPPacket.body | python | def body(self):
view = ffi.buffer(self.packet.m_body, self.packet.m_nBodySize)
return view[:] | The body of the packet. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/packet.py#L103-L107 | null | class RTMPPacket(object):
@classmethod
def _from_pointer(cls, pointer):
packet = cls.__new__(cls)
packet.packet = pointer
return packet
def __init__(self, type, format, channel, timestamp=0,
absolute_timestamp=False, body=None):
self.packet = ffi.new("RTMPPacket*")
self.type = type
self.format = format
self.channel = channel
self.timestamp = timestamp
self.absolute_timestamp = absolute_timestamp
if not body:
body = b""
self.body = body
@property
def format(self):
"""Format of the packet."""
return self.packet.m_headerType
@format.setter
def format(self, value):
self.packet.m_headerType = int(value)
@property
def type(self):
"""Type of the packet."""
return self.packet.m_packetType
@type.setter
def type(self, value):
self.packet.m_packetType = int(value)
@property
def channel(self):
"""Channel of the packet."""
return self.packet.m_nChannel
@channel.setter
def channel(self, value):
self.packet.m_nChannel = int(value)
@property
def timestamp(self):
"""Timestamp of the packet."""
return self.packet.m_nTimeStamp
@timestamp.setter
def timestamp(self, value):
self.packet.m_nTimeStamp = int(value)
@property
def absolute_timestamp(self):
"""True if the timestamp is absolute."""
return bool(self.packet.m_hasAbsTimestamp)
@absolute_timestamp.setter
def absolute_timestamp(self, value):
self.packet.m_hasAbsTimestamp = int(bool(value))
@property
@body.setter
def body(self, value):
size = len(value)
librtmp.RTMPPacket_Alloc(self.packet, size)
view = ffi.buffer(self.packet.m_body, size)
view[:] = value
self.packet.m_nBodySize = size
def dump(self):
"""Dumps packet to logger."""
librtmp.RTMPPacket_Dump(self.packet)
def __del__(self):
librtmp.RTMPPacket_Free(self.packet)
|
chrippa/python-librtmp | librtmp/logging.py | add_log_callback | python | def add_log_callback(callback):
global _log_callbacks
if not callable(callback):
raise ValueError("Callback must be callable")
_log_callbacks.add(callback)
return callback | Adds a log callback. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/logging.py#L36-L44 | null | from . import ffi, librtmp
from .utils import add_signal_handler
__all__ = ["set_log_level", "get_log_level",
"set_log_output", "add_log_callback", "remove_log_callback",
"LOG_CRIT", "LOG_ERROR", "LOG_WARNING",
"LOG_INFO", "LOG_DEBUG", "LOG_DEBUG2",
"LOG_ALL"]
(LOG_CRIT, LOG_ERROR, LOG_WARNING, LOG_INFO, LOG_DEBUG,
LOG_DEBUG2, LOG_ALL) = range(1, 8)
_log_callbacks = set()
_log_level = LOG_ALL
_log_output = None
def set_log_level(level):
"""Sets log level."""
global _log_level
_log_level = level
def get_log_level():
"""Returns current log level."""
return _log_level
def set_log_output(fd):
"""Sets log output to a open file-object."""
global _log_output
_log_output = fd
def remove_log_callback(callback):
"""Removes a log callback."""
global _log_callbacks
_log_callbacks.remove(callback)
@ffi.callback("void(int,char*)")
def _log_callback(level, msg):
msg = ffi.string(msg)
msg = msg.decode("utf8", "ignore")
for callback in _log_callbacks:
callback(level, msg)
if hasattr(_log_output, "write") and level <= _log_level:
_log_output.write(msg + "\n")
librtmp.python_log_callback = _log_callback
librtmp.RTMP_LogSetCallback(librtmp.c_log_callback)
add_signal_handler()
|
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.read | python | def read(self, size):
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res] | Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L21-L43 | [
"def update_buffer(self, ms):\n \"\"\"Tells the server how big our buffer is (in milliseconds).\"\"\"\n librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))\n librtmp.RTMP_UpdateBufferMS(self.client.rtmp)\n"
] | class RTMPStream(IOBase):
"""A file-like interface to a stream within
a RTMP session."""
def __init__(self, client, update_buffer=True):
self.client = client
self._buf = self._view = None
self._closed = False
self._update_buffer = update_buffer
self._updated_buffer = False
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause")
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause")
def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek")
def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close()
def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp)
@property
def duration(self):
"""The duration of the stream."""
return librtmp.RTMP_GetDuration(self.client.rtmp)
|
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.write | python | def write(self, data):
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res | Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L45-L67 | null | class RTMPStream(IOBase):
"""A file-like interface to a stream within
a RTMP session."""
def __init__(self, client, update_buffer=True):
self.client = client
self._buf = self._view = None
self._closed = False
self._update_buffer = update_buffer
self._updated_buffer = False
def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res]
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause")
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause")
def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek")
def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close()
def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp)
@property
def duration(self):
"""The duration of the stream."""
return librtmp.RTMP_GetDuration(self.client.rtmp)
|
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.pause | python | def pause(self):
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause") | Pauses the stream. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L69-L74 | null | class RTMPStream(IOBase):
"""A file-like interface to a stream within
a RTMP session."""
def __init__(self, client, update_buffer=True):
self.client = client
self._buf = self._view = None
self._closed = False
self._update_buffer = update_buffer
self._updated_buffer = False
def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res]
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause")
def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek")
def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close()
def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp)
@property
def duration(self):
"""The duration of the stream."""
return librtmp.RTMP_GetDuration(self.client.rtmp)
|
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.unpause | python | def unpause(self):
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause") | Unpauses the stream. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L76-L81 | null | class RTMPStream(IOBase):
"""A file-like interface to a stream within
a RTMP session."""
def __init__(self, client, update_buffer=True):
self.client = client
self._buf = self._view = None
self._closed = False
self._update_buffer = update_buffer
self._updated_buffer = False
def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res]
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause")
def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek")
def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close()
def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp)
@property
def duration(self):
"""The duration of the stream."""
return librtmp.RTMP_GetDuration(self.client.rtmp)
|
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.seek | python | def seek(self, time):
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek") | Attempts to seek in the stream.
:param time: int, Time to seek to in seconds | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L83-L92 | null | class RTMPStream(IOBase):
"""A file-like interface to a stream within
a RTMP session."""
def __init__(self, client, update_buffer=True):
self.client = client
self._buf = self._view = None
self._closed = False
self._update_buffer = update_buffer
self._updated_buffer = False
def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res]
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause")
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause")
def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close()
def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp)
@property
def duration(self):
"""The duration of the stream."""
return librtmp.RTMP_GetDuration(self.client.rtmp)
|
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.close | python | def close(self):
if not self._closed:
self._closed = True
self.client.close() | Closes the connection. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L94-L98 | null | class RTMPStream(IOBase):
"""A file-like interface to a stream within
a RTMP session."""
def __init__(self, client, update_buffer=True):
self.client = client
self._buf = self._view = None
self._closed = False
self._update_buffer = update_buffer
self._updated_buffer = False
def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res]
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause")
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause")
def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek")
def update_buffer(self, ms):
"""Tells the server how big our buffer is (in milliseconds)."""
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp)
@property
def duration(self):
"""The duration of the stream."""
return librtmp.RTMP_GetDuration(self.client.rtmp)
|
chrippa/python-librtmp | librtmp/stream.py | RTMPStream.update_buffer | python | def update_buffer(self, ms):
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))
librtmp.RTMP_UpdateBufferMS(self.client.rtmp) | Tells the server how big our buffer is (in milliseconds). | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L100-L103 | null | class RTMPStream(IOBase):
"""A file-like interface to a stream within
a RTMP session."""
def __init__(self, client, update_buffer=True):
self.client = client
self._buf = self._view = None
self._closed = False
self._update_buffer = update_buffer
self._updated_buffer = False
def read(self, size):
"""Attempts to read data from the stream.
:param size: int, The maximum amount of bytes to read.
Raises :exc:`IOError` on error.
"""
# If enabled tell the server that our buffer can fit the whole
# stream, this often increases throughput alot.
if self._update_buffer and not self._updated_buffer and self.duration:
self.update_buffer((self.duration * 1000) + 5000)
self._updated_buffer = True
if not self._buf or len(self._buf) != size:
self._buf = ffi.new("char[]", size)
self._view = ffi.buffer(self._buf, size)
res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)
if res < 0:
raise IOError("Failed to read data")
return self._view[:res]
def write(self, data):
"""Writes data to the stream.
:param data: bytes, FLV data to write to the stream
The data passed can contain multiple FLV tags, but it MUST
always contain complete tags or undefined behaviour might
occur.
Raises :exc:`IOError` on error.
"""
if isinstance(data, bytearray):
data = bytes(data)
if not isinstance(data, byte_types):
raise ValueError("A bytes argument is required")
res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))
if res < 0:
raise IOError("Failed to write data")
return res
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause")
def unpause(self):
"""Unpauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 0)
if res < 1:
raise RTMPError("Failed to unpause")
def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek")
def close(self):
"""Closes the connection."""
if not self._closed:
self._closed = True
self.client.close()
@property
def duration(self):
"""The duration of the stream."""
return librtmp.RTMP_GetDuration(self.client.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.set_option | python | def set_option(self, key, value):
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval | Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L128-L148 | null | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.setup_url | python | def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL") | r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L150-L170 | null | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.connect | python | def connect(self, packet=None):
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0) | Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L172-L191 | null | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.create_stream | python | def create_stream(self, seek=None, writeable=False, update_buffer=True):
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer) | Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024) | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L193-L228 | [
"def handle_packet(self, packet):\n \"\"\"Lets librtmp look at a packet and send a response\n if needed.\"\"\"\n\n if not isinstance(packet, RTMPPacket):\n raise ValueError(\"A RTMPPacket argument is required\")\n\n return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)\n"
] | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.read_packet | python | def read_packet(self):
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet) | Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...' | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L242-L271 | [
"def _from_pointer(cls, pointer):\n packet = cls.__new__(cls)\n packet.packet = pointer\n\n return packet\n"
] | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.