repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
bovee/Aston
aston/peak/integrators.py
simple_integrate
def simple_integrate(ts, peak_list, base_ts=None, intname='simple'): """ Integrate each peak naively; without regard to overlap. This is used as the terminal step by most of the other integrators. """ peaks = [] for hints in peak_list: t0, t1 = hints['t0'], hints['t1'] hints['int'] = intname pk_ts = ts.twin((t0, t1)) if base_ts is None: # make a two point baseline base = Trace([hints.get('y0', pk_ts[0]), hints.get('y1', pk_ts[-1])], [t0, t1], name=ts.name) else: base = base_ts.twin((t0, t1)) peaks.append(PeakComponent(hints, pk_ts, base)) return peaks
python
def simple_integrate(ts, peak_list, base_ts=None, intname='simple'): """ Integrate each peak naively; without regard to overlap. This is used as the terminal step by most of the other integrators. """ peaks = [] for hints in peak_list: t0, t1 = hints['t0'], hints['t1'] hints['int'] = intname pk_ts = ts.twin((t0, t1)) if base_ts is None: # make a two point baseline base = Trace([hints.get('y0', pk_ts[0]), hints.get('y1', pk_ts[-1])], [t0, t1], name=ts.name) else: base = base_ts.twin((t0, t1)) peaks.append(PeakComponent(hints, pk_ts, base)) return peaks
[ "def", "simple_integrate", "(", "ts", ",", "peak_list", ",", "base_ts", "=", "None", ",", "intname", "=", "'simple'", ")", ":", "peaks", "=", "[", "]", "for", "hints", "in", "peak_list", ":", "t0", ",", "t1", "=", "hints", "[", "'t0'", "]", ",", "h...
Integrate each peak naively; without regard to overlap. This is used as the terminal step by most of the other integrators.
[ "Integrate", "each", "peak", "naively", ";", "without", "regard", "to", "overlap", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/peak/integrators.py#L10-L29
bovee/Aston
aston/peak/integrators.py
drop_integrate
def drop_integrate(ts, peak_list): """ Resolves overlap by breaking at the minimum value. """ peaks = [] for _, pks in _get_windows(peak_list): temp_pks = [] pks = sorted(pks, key=lambda p: p['t0']) if 'y0' in pks[0] and 'y1' in pks[-1]: y0, y1 = pks[0]['y0'], pks[-1]['y1'] else: y0 = ts.get_point(pks[0]['t0']) y1 = ts.get_point(pks[-1]['t1']) ys = np.array([y0, y1]) xs = np.array([pks[0]['t0'], pks[-1]['t1']]) # go through list of peaks to make sure there's no overlap for hints in pks: t0, t1 = hints['t0'], hints['t1'] # figure out the y values (using a linear baseline) hints['y0'] = np.interp(t0, xs, ys) hints['y1'] = np.interp(t1, xs, ys) # if this peak totally overlaps with an existing one, don't add if sum(1 for p in temp_pks if t1 <= p['t1']) > 0: continue overlap_pks = [p for p in temp_pks if t0 <= p['t1']] if len(overlap_pks) > 0: # find the last of the overlapping peaks overlap_pk = max(overlap_pks, key=lambda p: p['t0']) # get the section of trace and find the lowest point over_ts = ts.twin((t0, overlap_pk['t1'])) min_t = over_ts.index[over_ts.values.argmin()] # delete the existing overlaping peak for i, p in enumerate(temp_pks): if p == overlap_pk: del temp_pks[i] break # interpolate a new y value y_val = np.interp(min_t, xs, ys) overlap_pk['y1'] = y_val hints['y0'] = y_val # add the old and new peak in overlap_pk['t1'] = min_t temp_pks.append(overlap_pk) hints['t0'], hints['t1'] = min_t, t1 temp_pks.append(hints) else: hints['t0'], hints['t1'] = t0, t1 temp_pks.append(hints) # none of our peaks should overlap, so we can just use # simple_integrate now peaks += simple_integrate(ts, temp_pks, intname='drop') return peaks
python
def drop_integrate(ts, peak_list): """ Resolves overlap by breaking at the minimum value. """ peaks = [] for _, pks in _get_windows(peak_list): temp_pks = [] pks = sorted(pks, key=lambda p: p['t0']) if 'y0' in pks[0] and 'y1' in pks[-1]: y0, y1 = pks[0]['y0'], pks[-1]['y1'] else: y0 = ts.get_point(pks[0]['t0']) y1 = ts.get_point(pks[-1]['t1']) ys = np.array([y0, y1]) xs = np.array([pks[0]['t0'], pks[-1]['t1']]) # go through list of peaks to make sure there's no overlap for hints in pks: t0, t1 = hints['t0'], hints['t1'] # figure out the y values (using a linear baseline) hints['y0'] = np.interp(t0, xs, ys) hints['y1'] = np.interp(t1, xs, ys) # if this peak totally overlaps with an existing one, don't add if sum(1 for p in temp_pks if t1 <= p['t1']) > 0: continue overlap_pks = [p for p in temp_pks if t0 <= p['t1']] if len(overlap_pks) > 0: # find the last of the overlapping peaks overlap_pk = max(overlap_pks, key=lambda p: p['t0']) # get the section of trace and find the lowest point over_ts = ts.twin((t0, overlap_pk['t1'])) min_t = over_ts.index[over_ts.values.argmin()] # delete the existing overlaping peak for i, p in enumerate(temp_pks): if p == overlap_pk: del temp_pks[i] break # interpolate a new y value y_val = np.interp(min_t, xs, ys) overlap_pk['y1'] = y_val hints['y0'] = y_val # add the old and new peak in overlap_pk['t1'] = min_t temp_pks.append(overlap_pk) hints['t0'], hints['t1'] = min_t, t1 temp_pks.append(hints) else: hints['t0'], hints['t1'] = t0, t1 temp_pks.append(hints) # none of our peaks should overlap, so we can just use # simple_integrate now peaks += simple_integrate(ts, temp_pks, intname='drop') return peaks
[ "def", "drop_integrate", "(", "ts", ",", "peak_list", ")", ":", "peaks", "=", "[", "]", "for", "_", ",", "pks", "in", "_get_windows", "(", "peak_list", ")", ":", "temp_pks", "=", "[", "]", "pks", "=", "sorted", "(", "pks", ",", "key", "=", "lambda"...
Resolves overlap by breaking at the minimum value.
[ "Resolves", "overlap", "by", "breaking", "at", "the", "minimum", "value", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/peak/integrators.py#L60-L118
bovee/Aston
aston/peak/integrators.py
_integrate_mpwrap
def _integrate_mpwrap(ts_and_pks, integrate, fopts): """ Take a zipped timeseries and peaks found in it and integrate it to return peaks. Used to allow multiprocessing support. """ ts, tpks = ts_and_pks pks = integrate(ts, tpks, **fopts) # for p in pks: # p.info['mz'] = str(ts.name) return pks
python
def _integrate_mpwrap(ts_and_pks, integrate, fopts): """ Take a zipped timeseries and peaks found in it and integrate it to return peaks. Used to allow multiprocessing support. """ ts, tpks = ts_and_pks pks = integrate(ts, tpks, **fopts) # for p in pks: # p.info['mz'] = str(ts.name) return pks
[ "def", "_integrate_mpwrap", "(", "ts_and_pks", ",", "integrate", ",", "fopts", ")", ":", "ts", ",", "tpks", "=", "ts_and_pks", "pks", "=", "integrate", "(", "ts", ",", "tpks", ",", "*", "*", "fopts", ")", "# for p in pks:", "# p.info['mz'] = str(ts.name)",...
Take a zipped timeseries and peaks found in it and integrate it to return peaks. Used to allow multiprocessing support.
[ "Take", "a", "zipped", "timeseries", "and", "peaks", "found", "in", "it", "and", "integrate", "it", "to", "return", "peaks", ".", "Used", "to", "allow", "multiprocessing", "support", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/peak/integrators.py#L223-L233
gtaylor/winrandom-ctypes
winrandom/ctyped_funcs.py
get_bytes
def get_bytes(num_bytes): """ Returns a random string of num_bytes length. """ # Is this the way to do it? #s = c_ubyte() # Or this? s = create_string_buffer(num_bytes) # Used to keep track of status. 1 = success, 0 = error. ok = c_int() # Provider? hProv = c_ulong() ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0) ok = windll.Advapi32.CryptGenRandom(hProv, wintypes.DWORD(num_bytes), cast(byref(s), POINTER(c_byte))) return s.raw
python
def get_bytes(num_bytes): """ Returns a random string of num_bytes length. """ # Is this the way to do it? #s = c_ubyte() # Or this? s = create_string_buffer(num_bytes) # Used to keep track of status. 1 = success, 0 = error. ok = c_int() # Provider? hProv = c_ulong() ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0) ok = windll.Advapi32.CryptGenRandom(hProv, wintypes.DWORD(num_bytes), cast(byref(s), POINTER(c_byte))) return s.raw
[ "def", "get_bytes", "(", "num_bytes", ")", ":", "# Is this the way to do it?", "#s = c_ubyte()", "# Or this?", "s", "=", "create_string_buffer", "(", "num_bytes", ")", "# Used to keep track of status. 1 = success, 0 = error.", "ok", "=", "c_int", "(", ")", "# Provider?", ...
Returns a random string of num_bytes length.
[ "Returns", "a", "random", "string", "of", "num_bytes", "length", "." ]
train
https://github.com/gtaylor/winrandom-ctypes/blob/77aaf18cb12d65d7bf321cef34234b20534dc4ba/winrandom/ctyped_funcs.py#L11-L27
gtaylor/winrandom-ctypes
winrandom/ctyped_funcs.py
get_long
def get_long(): """ Generates a random long. The length of said long varies by platform. """ # The C long type to populate. pbRandomData = c_ulong() # Determine the byte size of this machine's long type. size_of_long = wintypes.DWORD(sizeof(pbRandomData)) # Used to keep track of status. 1 = success, 0 = error. ok = c_int() # Provider? hProv = c_ulong() ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0) ok = windll.Advapi32.CryptGenRandom(hProv, size_of_long, byref(pbRandomData)) return pbRandomData.value
python
def get_long(): """ Generates a random long. The length of said long varies by platform. """ # The C long type to populate. pbRandomData = c_ulong() # Determine the byte size of this machine's long type. size_of_long = wintypes.DWORD(sizeof(pbRandomData)) # Used to keep track of status. 1 = success, 0 = error. ok = c_int() # Provider? hProv = c_ulong() ok = windll.Advapi32.CryptAcquireContextA(byref(hProv), None, None, PROV_RSA_FULL, 0) ok = windll.Advapi32.CryptGenRandom(hProv, size_of_long, byref(pbRandomData)) return pbRandomData.value
[ "def", "get_long", "(", ")", ":", "# The C long type to populate.", "pbRandomData", "=", "c_ulong", "(", ")", "# Determine the byte size of this machine's long type.", "size_of_long", "=", "wintypes", ".", "DWORD", "(", "sizeof", "(", "pbRandomData", ")", ")", "# Used t...
Generates a random long. The length of said long varies by platform.
[ "Generates", "a", "random", "long", ".", "The", "length", "of", "said", "long", "varies", "by", "platform", "." ]
train
https://github.com/gtaylor/winrandom-ctypes/blob/77aaf18cb12d65d7bf321cef34234b20534dc4ba/winrandom/ctyped_funcs.py#L29-L45
mbr/data
data/decorators.py
annotate
def annotate(*args, **kwargs): """Set function annotations (on Python2 and 3).""" def decorator(f): if not hasattr(f, '__annotations__'): f.__annotations__ = kwargs.copy() else: f.__annotations__.update(kwargs) if args: if len(args) != 1: raise ValueError('annotate supports only a single argument.') f.__annotations__['return'] = args[0] return f return decorator
python
def annotate(*args, **kwargs): """Set function annotations (on Python2 and 3).""" def decorator(f): if not hasattr(f, '__annotations__'): f.__annotations__ = kwargs.copy() else: f.__annotations__.update(kwargs) if args: if len(args) != 1: raise ValueError('annotate supports only a single argument.') f.__annotations__['return'] = args[0] return f return decorator
[ "def", "annotate", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "decorator", "(", "f", ")", ":", "if", "not", "hasattr", "(", "f", ",", "'__annotations__'", ")", ":", "f", ".", "__annotations__", "=", "kwargs", ".", "copy", "(", ")",...
Set function annotations (on Python2 and 3).
[ "Set", "function", "annotations", "(", "on", "Python2", "and", "3", ")", "." ]
train
https://github.com/mbr/data/blob/f326938502defb4af93e97ed1212a71575641e77/data/decorators.py#L11-L25
mbr/data
data/decorators.py
auto_instantiate
def auto_instantiate(*classes): """Creates a decorator that will instantiate objects based on function parameter annotations. The decorator will check every argument passed into ``f``. If ``f`` has an annotation for the specified parameter and the annotation is found in ``classes``, the parameter value passed in will be used to construct a new instance of the expression that is the annotation. An example (Python 3): .. code-block:: python @auto_instantiate(int) def foo(a: int, b: float): pass Any value passed in as ``b`` is left unchanged. Anything passed as the parameter for ``a`` will be converted to :class:`int` before calling the function. Since Python 2 does not support annotations, the :func:`~data.decorators.annotate` function should can be used: .. code-block:: python @auto_instantiate(int) @annotate(a=int) def foo(a, b): pass :param classes: Any number of classes/callables for which auto-instantiation should be performed. If empty, perform for all. :note: When dealing with data, it is almost always more convenient to use the :func:`~data.decorators.data` decorator instead. """ def decorator(f): # collect our argspec sig = signature(f) @wraps(f) def _(*args, **kwargs): bvals = sig.bind(*args, **kwargs) # replace with instance if desired for varname, val in bvals.arguments.items(): anno = sig.parameters[varname].annotation if anno in classes or (len(classes) == 0 and anno != _empty): bvals.arguments[varname] = anno(val) return f(*bvals.args, **bvals.kwargs) # create another layer by wrapping in a FunctionMaker. this is done # to preserve the original signature return FunctionMaker.create( f, 'return _(%(signature)s)', dict(_=_, __wrapped__=f) ) return decorator
python
def auto_instantiate(*classes): """Creates a decorator that will instantiate objects based on function parameter annotations. The decorator will check every argument passed into ``f``. If ``f`` has an annotation for the specified parameter and the annotation is found in ``classes``, the parameter value passed in will be used to construct a new instance of the expression that is the annotation. An example (Python 3): .. code-block:: python @auto_instantiate(int) def foo(a: int, b: float): pass Any value passed in as ``b`` is left unchanged. Anything passed as the parameter for ``a`` will be converted to :class:`int` before calling the function. Since Python 2 does not support annotations, the :func:`~data.decorators.annotate` function should can be used: .. code-block:: python @auto_instantiate(int) @annotate(a=int) def foo(a, b): pass :param classes: Any number of classes/callables for which auto-instantiation should be performed. If empty, perform for all. :note: When dealing with data, it is almost always more convenient to use the :func:`~data.decorators.data` decorator instead. """ def decorator(f): # collect our argspec sig = signature(f) @wraps(f) def _(*args, **kwargs): bvals = sig.bind(*args, **kwargs) # replace with instance if desired for varname, val in bvals.arguments.items(): anno = sig.parameters[varname].annotation if anno in classes or (len(classes) == 0 and anno != _empty): bvals.arguments[varname] = anno(val) return f(*bvals.args, **bvals.kwargs) # create another layer by wrapping in a FunctionMaker. this is done # to preserve the original signature return FunctionMaker.create( f, 'return _(%(signature)s)', dict(_=_, __wrapped__=f) ) return decorator
[ "def", "auto_instantiate", "(", "*", "classes", ")", ":", "def", "decorator", "(", "f", ")", ":", "# collect our argspec", "sig", "=", "signature", "(", "f", ")", "@", "wraps", "(", "f", ")", "def", "_", "(", "*", "args", ",", "*", "*", "kwargs", "...
Creates a decorator that will instantiate objects based on function parameter annotations. The decorator will check every argument passed into ``f``. If ``f`` has an annotation for the specified parameter and the annotation is found in ``classes``, the parameter value passed in will be used to construct a new instance of the expression that is the annotation. An example (Python 3): .. code-block:: python @auto_instantiate(int) def foo(a: int, b: float): pass Any value passed in as ``b`` is left unchanged. Anything passed as the parameter for ``a`` will be converted to :class:`int` before calling the function. Since Python 2 does not support annotations, the :func:`~data.decorators.annotate` function should can be used: .. code-block:: python @auto_instantiate(int) @annotate(a=int) def foo(a, b): pass :param classes: Any number of classes/callables for which auto-instantiation should be performed. If empty, perform for all. :note: When dealing with data, it is almost always more convenient to use the :func:`~data.decorators.data` decorator instead.
[ "Creates", "a", "decorator", "that", "will", "instantiate", "objects", "based", "on", "function", "parameter", "annotations", "." ]
train
https://github.com/mbr/data/blob/f326938502defb4af93e97ed1212a71575641e77/data/decorators.py#L28-L90
mbr/data
data/decorators.py
data
def data(*argnames): """Designate an argument as a :class:`~data.Data` argument. Works by combining calls to :func:`~data.decorators.auto_instantiate` and :func:~data.decorators.annotate` on the named arguments. Example: .. code-block:: python class Foo(object): @data('bar') def meth(self, foo, bar): pass Inside ``meth``, ``bar`` will always be a :class:`~data.Data` instance constructed from the original value passed as ``bar``. :param argnames: List of parameter names that should be data arguments. :return: A decorator that converts the named arguments to :class:`~data.Data` instances.""" # make it work if given only one argument (for Python3) if len(argnames) == 1 and callable(argnames[0]): return data()(argnames[0]) def decorator(f): f = annotate(**dict((argname, Data) for argname in argnames))(f) f = auto_instantiate(Data)(f) return f return decorator
python
def data(*argnames): """Designate an argument as a :class:`~data.Data` argument. Works by combining calls to :func:`~data.decorators.auto_instantiate` and :func:~data.decorators.annotate` on the named arguments. Example: .. code-block:: python class Foo(object): @data('bar') def meth(self, foo, bar): pass Inside ``meth``, ``bar`` will always be a :class:`~data.Data` instance constructed from the original value passed as ``bar``. :param argnames: List of parameter names that should be data arguments. :return: A decorator that converts the named arguments to :class:`~data.Data` instances.""" # make it work if given only one argument (for Python3) if len(argnames) == 1 and callable(argnames[0]): return data()(argnames[0]) def decorator(f): f = annotate(**dict((argname, Data) for argname in argnames))(f) f = auto_instantiate(Data)(f) return f return decorator
[ "def", "data", "(", "*", "argnames", ")", ":", "# make it work if given only one argument (for Python3)", "if", "len", "(", "argnames", ")", "==", "1", "and", "callable", "(", "argnames", "[", "0", "]", ")", ":", "return", "data", "(", ")", "(", "argnames", ...
Designate an argument as a :class:`~data.Data` argument. Works by combining calls to :func:`~data.decorators.auto_instantiate` and :func:~data.decorators.annotate` on the named arguments. Example: .. code-block:: python class Foo(object): @data('bar') def meth(self, foo, bar): pass Inside ``meth``, ``bar`` will always be a :class:`~data.Data` instance constructed from the original value passed as ``bar``. :param argnames: List of parameter names that should be data arguments. :return: A decorator that converts the named arguments to :class:`~data.Data` instances.
[ "Designate", "an", "argument", "as", "a", ":", "class", ":", "~data", ".", "Data", "argument", "." ]
train
https://github.com/mbr/data/blob/f326938502defb4af93e97ed1212a71575641e77/data/decorators.py#L93-L122
ryanvarley/ExoData
exodata/equations.py
ratioTerminatorToStar
def ratioTerminatorToStar(H_p, R_p, R_s): # TODO add into planet class r"""Calculates the ratio of the terminator to the star assuming 5 scale heights large. If you dont know all of the input try :py:func:`calcRatioTerminatorToStar` .. math:: \Delta F = \frac{10 H R_p + 25 H^2}{R_\star^2} Where :math:`\Delta F` is the ration of the terminator to the star, H scale height planet atmosphere, :math:`R_p` radius of the planet, :math:`R_s` radius of the star :param H_p: :param R_p: :param R_s: :return: ratio of the terminator to the star """ deltaF = ((10 * H_p * R_p) + (25 * H_p**2)) / (R_s**2) return deltaF.simplified
python
def ratioTerminatorToStar(H_p, R_p, R_s): # TODO add into planet class r"""Calculates the ratio of the terminator to the star assuming 5 scale heights large. If you dont know all of the input try :py:func:`calcRatioTerminatorToStar` .. math:: \Delta F = \frac{10 H R_p + 25 H^2}{R_\star^2} Where :math:`\Delta F` is the ration of the terminator to the star, H scale height planet atmosphere, :math:`R_p` radius of the planet, :math:`R_s` radius of the star :param H_p: :param R_p: :param R_s: :return: ratio of the terminator to the star """ deltaF = ((10 * H_p * R_p) + (25 * H_p**2)) / (R_s**2) return deltaF.simplified
[ "def", "ratioTerminatorToStar", "(", "H_p", ",", "R_p", ",", "R_s", ")", ":", "# TODO add into planet class", "deltaF", "=", "(", "(", "10", "*", "H_p", "*", "R_p", ")", "+", "(", "25", "*", "H_p", "**", "2", ")", ")", "/", "(", "R_s", "**", "2", ...
r"""Calculates the ratio of the terminator to the star assuming 5 scale heights large. If you dont know all of the input try :py:func:`calcRatioTerminatorToStar` .. math:: \Delta F = \frac{10 H R_p + 25 H^2}{R_\star^2} Where :math:`\Delta F` is the ration of the terminator to the star, H scale height planet atmosphere, :math:`R_p` radius of the planet, :math:`R_s` radius of the star :param H_p: :param R_p: :param R_s: :return: ratio of the terminator to the star
[ "r", "Calculates", "the", "ratio", "of", "the", "terminator", "to", "the", "star", "assuming", "5", "scale", "heights", "large", ".", "If", "you", "dont", "know", "all", "of", "the", "input", "try", ":", "py", ":", "func", ":", "calcRatioTerminatorToStar" ...
train
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L768-L787
ryanvarley/ExoData
exodata/equations.py
SNRPlanet
def SNRPlanet(SNRStar, starPlanetFlux, Nobs, pixPerbin, NVisits=1): r""" Calculate the Signal to Noise Ratio of the planet atmosphere .. math:: \text{SNR}_\text{planet} = \text{SNR}_\text{star} \times \Delta F \times \sqrt{N_\text{obs}} \times \sqrt{N_\text{pixPerbin}} \times \sqrt{N_\text{visits}} Where :math:`\text{SNR}_\star` SNR of the star detection, :math:`\Delta F` ratio of the terminator to the star, :math:`N_\text{obs}` number of exposures per visit, :math:`N_\text{pixPerBin}` number of pixels per wavelength bin, :math:`N_\text{visits}` number of visits. :return: """ SNRplanet = SNRStar * starPlanetFlux * \ sqrt(Nobs) * sqrt(pixPerbin) * sqrt(NVisits) return SNRplanet
python
def SNRPlanet(SNRStar, starPlanetFlux, Nobs, pixPerbin, NVisits=1): r""" Calculate the Signal to Noise Ratio of the planet atmosphere .. math:: \text{SNR}_\text{planet} = \text{SNR}_\text{star} \times \Delta F \times \sqrt{N_\text{obs}} \times \sqrt{N_\text{pixPerbin}} \times \sqrt{N_\text{visits}} Where :math:`\text{SNR}_\star` SNR of the star detection, :math:`\Delta F` ratio of the terminator to the star, :math:`N_\text{obs}` number of exposures per visit, :math:`N_\text{pixPerBin}` number of pixels per wavelength bin, :math:`N_\text{visits}` number of visits. :return: """ SNRplanet = SNRStar * starPlanetFlux * \ sqrt(Nobs) * sqrt(pixPerbin) * sqrt(NVisits) return SNRplanet
[ "def", "SNRPlanet", "(", "SNRStar", ",", "starPlanetFlux", ",", "Nobs", ",", "pixPerbin", ",", "NVisits", "=", "1", ")", ":", "SNRplanet", "=", "SNRStar", "*", "starPlanetFlux", "*", "sqrt", "(", "Nobs", ")", "*", "sqrt", "(", "pixPerbin", ")", "*", "s...
r""" Calculate the Signal to Noise Ratio of the planet atmosphere .. math:: \text{SNR}_\text{planet} = \text{SNR}_\text{star} \times \Delta F \times \sqrt{N_\text{obs}} \times \sqrt{N_\text{pixPerbin}} \times \sqrt{N_\text{visits}} Where :math:`\text{SNR}_\star` SNR of the star detection, :math:`\Delta F` ratio of the terminator to the star, :math:`N_\text{obs}` number of exposures per visit, :math:`N_\text{pixPerBin}` number of pixels per wavelength bin, :math:`N_\text{visits}` number of visits. :return:
[ "r", "Calculate", "the", "Signal", "to", "Noise", "Ratio", "of", "the", "planet", "atmosphere" ]
train
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L790-L809
ryanvarley/ExoData
exodata/equations.py
transitDurationCircular
def transitDurationCircular(P, R_s, R_p, a, i): r"""Estimation of the primary transit time. Assumes a circular orbit. .. math:: T_\text{dur} = \frac{P}{\pi}\sin^{-1} \left[\frac{R_\star}{a}\frac{\sqrt{(1+k)^2 + b^2}}{\sin{a}} \right] Where :math:`T_\text{dur}` transit duration, P orbital period, :math:`R_\star` radius of the star, a is the semi-major axis, k is :math:`\frac{R_p}{R_s}`, b is :math:`\frac{a}{R_*} \cos{i}` (Seager & Mallen-Ornelas 2003) """ if i is nan: i = 90 * aq.deg i = i.rescale(aq.rad) k = R_p / R_s # lit reference for eclipsing binaries b = (a * cos(i)) / R_s duration = (P / pi) * arcsin(((R_s * sqrt((1 + k) ** 2 - b ** 2)) / (a * sin(i))).simplified) return duration.rescale(aq.min)
python
def transitDurationCircular(P, R_s, R_p, a, i): r"""Estimation of the primary transit time. Assumes a circular orbit. .. math:: T_\text{dur} = \frac{P}{\pi}\sin^{-1} \left[\frac{R_\star}{a}\frac{\sqrt{(1+k)^2 + b^2}}{\sin{a}} \right] Where :math:`T_\text{dur}` transit duration, P orbital period, :math:`R_\star` radius of the star, a is the semi-major axis, k is :math:`\frac{R_p}{R_s}`, b is :math:`\frac{a}{R_*} \cos{i}` (Seager & Mallen-Ornelas 2003) """ if i is nan: i = 90 * aq.deg i = i.rescale(aq.rad) k = R_p / R_s # lit reference for eclipsing binaries b = (a * cos(i)) / R_s duration = (P / pi) * arcsin(((R_s * sqrt((1 + k) ** 2 - b ** 2)) / (a * sin(i))).simplified) return duration.rescale(aq.min)
[ "def", "transitDurationCircular", "(", "P", ",", "R_s", ",", "R_p", ",", "a", ",", "i", ")", ":", "if", "i", "is", "nan", ":", "i", "=", "90", "*", "aq", ".", "deg", "i", "=", "i", ".", "rescale", "(", "aq", ".", "rad", ")", "k", "=", "R_p"...
r"""Estimation of the primary transit time. Assumes a circular orbit. .. math:: T_\text{dur} = \frac{P}{\pi}\sin^{-1} \left[\frac{R_\star}{a}\frac{\sqrt{(1+k)^2 + b^2}}{\sin{a}} \right] Where :math:`T_\text{dur}` transit duration, P orbital period, :math:`R_\star` radius of the star, a is the semi-major axis, k is :math:`\frac{R_p}{R_s}`, b is :math:`\frac{a}{R_*} \cos{i}` (Seager & Mallen-Ornelas 2003)
[ "r", "Estimation", "of", "the", "primary", "transit", "time", ".", "Assumes", "a", "circular", "orbit", "." ]
train
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L812-L836
ryanvarley/ExoData
exodata/equations.py
estimateStellarTemperature
def estimateStellarTemperature(M_s): """Estimates stellar temperature using the main sequence relationship T ~ 5800*M^0.65 (Cox 2000). """ # TODO improve with more x and k values from Cox 2000 try: temp = (5800 * aq.K * float(M_s.rescale(aq.M_s) ** 0.65)).rescale(aq.K) except AttributeError: temp = np.nan return temp
python
def estimateStellarTemperature(M_s): """Estimates stellar temperature using the main sequence relationship T ~ 5800*M^0.65 (Cox 2000). """ # TODO improve with more x and k values from Cox 2000 try: temp = (5800 * aq.K * float(M_s.rescale(aq.M_s) ** 0.65)).rescale(aq.K) except AttributeError: temp = np.nan return temp
[ "def", "estimateStellarTemperature", "(", "M_s", ")", ":", "# TODO improve with more x and k values from Cox 2000", "try", ":", "temp", "=", "(", "5800", "*", "aq", ".", "K", "*", "float", "(", "M_s", ".", "rescale", "(", "aq", ".", "M_s", ")", "**", "0.65",...
Estimates stellar temperature using the main sequence relationship T ~ 5800*M^0.65 (Cox 2000).
[ "Estimates", "stellar", "temperature", "using", "the", "main", "sequence", "relationship", "T", "~", "5800", "*", "M^0", ".", "65", "(", "Cox", "2000", ")", "." ]
train
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L839-L848
ryanvarley/ExoData
exodata/equations.py
estimateDistance
def estimateDistance(m, M, Av=0.0): """ estimate the distance to star based on the absolute magnitude, apparent magnitude and the absorption / extinction. :param m: apparent magnitude :param M: absolute magnitude :param Av: absorbtion / extinction :return: d (distance to object) in parsecs """ try: m = float(m) # basic value checking as there is no units M = float(M) Av = float(Av) except TypeError: return np.nan d = 10 ** ((m - M + 5 - Av) / 5) if math.isnan(d): return np.nan else: return d * aq.pc
python
def estimateDistance(m, M, Av=0.0): """ estimate the distance to star based on the absolute magnitude, apparent magnitude and the absorption / extinction. :param m: apparent magnitude :param M: absolute magnitude :param Av: absorbtion / extinction :return: d (distance to object) in parsecs """ try: m = float(m) # basic value checking as there is no units M = float(M) Av = float(Av) except TypeError: return np.nan d = 10 ** ((m - M + 5 - Av) / 5) if math.isnan(d): return np.nan else: return d * aq.pc
[ "def", "estimateDistance", "(", "m", ",", "M", ",", "Av", "=", "0.0", ")", ":", "try", ":", "m", "=", "float", "(", "m", ")", "# basic value checking as there is no units", "M", "=", "float", "(", "M", ")", "Av", "=", "float", "(", "Av", ")", "except...
estimate the distance to star based on the absolute magnitude, apparent magnitude and the absorption / extinction. :param m: apparent magnitude :param M: absolute magnitude :param Av: absorbtion / extinction :return: d (distance to object) in parsecs
[ "estimate", "the", "distance", "to", "star", "based", "on", "the", "absolute", "magnitude", "apparent", "magnitude", "and", "the", "absorption", "/", "extinction", "." ]
train
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L851-L873
ryanvarley/ExoData
exodata/equations.py
_createAbsMagEstimationDict
def _createAbsMagEstimationDict(): """ loads magnitude_estimation.dat which is from http://xoomer.virgilio.it/hrtrace/Sk.htm on 24/01/2014 and based on Schmid-Kaler (1982) creates a dict in the form [Classletter][ClassNumber][List of values for each L Class] """ magnitude_estimation_filepath = resource_filename( __name__, 'data/magnitude_estimation.dat') raw_table = np.loadtxt(magnitude_estimation_filepath, '|S5') absMagDict = { 'O': {}, 'B': {}, 'A': {}, 'F': {}, 'G': {}, 'K': {}, 'M': {}} for row in raw_table: if sys.hexversion >= 0x03000000: # otherwise we get byte ints or b' caused by 2to3 starClass = row[0].decode("utf-8") absMagDict[starClass[0]][int(starClass[1])] = [ float(x) for x in row[1:]] else: # dict of spectral type = {abs mag for each luminosity class} absMagDict[row[0][0]][int(row[0][1])] = [float(x) for x in row[1:]] # manually typed from table headers - used to match columns with the L # class (header) LClassRef = { 'V': 0, 'IV': 1, 'III': 2, 'II': 3, 'Ib': 4, 'Iab': 5, 'Ia': 6, 'Ia0': 7} return absMagDict, LClassRef
python
def _createAbsMagEstimationDict(): """ loads magnitude_estimation.dat which is from http://xoomer.virgilio.it/hrtrace/Sk.htm on 24/01/2014 and based on Schmid-Kaler (1982) creates a dict in the form [Classletter][ClassNumber][List of values for each L Class] """ magnitude_estimation_filepath = resource_filename( __name__, 'data/magnitude_estimation.dat') raw_table = np.loadtxt(magnitude_estimation_filepath, '|S5') absMagDict = { 'O': {}, 'B': {}, 'A': {}, 'F': {}, 'G': {}, 'K': {}, 'M': {}} for row in raw_table: if sys.hexversion >= 0x03000000: # otherwise we get byte ints or b' caused by 2to3 starClass = row[0].decode("utf-8") absMagDict[starClass[0]][int(starClass[1])] = [ float(x) for x in row[1:]] else: # dict of spectral type = {abs mag for each luminosity class} absMagDict[row[0][0]][int(row[0][1])] = [float(x) for x in row[1:]] # manually typed from table headers - used to match columns with the L # class (header) LClassRef = { 'V': 0, 'IV': 1, 'III': 2, 'II': 3, 'Ib': 4, 'Iab': 5, 'Ia': 6, 'Ia0': 7} return absMagDict, LClassRef
[ "def", "_createAbsMagEstimationDict", "(", ")", ":", "magnitude_estimation_filepath", "=", "resource_filename", "(", "__name__", ",", "'data/magnitude_estimation.dat'", ")", "raw_table", "=", "np", ".", "loadtxt", "(", "magnitude_estimation_filepath", ",", "'|S5'", ")", ...
loads magnitude_estimation.dat which is from http://xoomer.virgilio.it/hrtrace/Sk.htm on 24/01/2014 and based on Schmid-Kaler (1982) creates a dict in the form [Classletter][ClassNumber][List of values for each L Class]
[ "loads", "magnitude_estimation", ".", "dat", "which", "is", "from", "http", ":", "//", "xoomer", ".", "virgilio", ".", "it", "/", "hrtrace", "/", "Sk", ".", "htm", "on", "24", "/", "01", "/", "2014", "and", "based", "on", "Schmid", "-", "Kaler", "(",...
train
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L876-L918
ryanvarley/ExoData
exodata/equations.py
estimateAbsoluteMagnitude
def estimateAbsoluteMagnitude(spectralType): """Uses the spectral type to lookup an approximate absolute magnitude for the star. """ from .astroclasses import SpectralType specType = SpectralType(spectralType) if specType.classLetter == '': return np.nan elif specType.classNumber == '': specType.classNumber = 5 # approximation using mid magnitude value if specType.lumType == '': specType.lumType = 'V' # assume main sequence LNum = LClassRef[specType.lumType] classNum = specType.classNumber classLet = specType.classLetter try: return absMagDict[classLet][classNum][LNum] # value not in table. Assume the number isn't there (Key p2.7, Ind p3+) except (KeyError, IndexError): try: classLookup = absMagDict[classLet] values = np.array(list(classLookup.values()))[ :, LNum] # only select the right L Type return np.interp(classNum, list(classLookup.keys()), values) except (KeyError, ValueError): return np.nan
python
def estimateAbsoluteMagnitude(spectralType): """Uses the spectral type to lookup an approximate absolute magnitude for the star. """ from .astroclasses import SpectralType specType = SpectralType(spectralType) if specType.classLetter == '': return np.nan elif specType.classNumber == '': specType.classNumber = 5 # approximation using mid magnitude value if specType.lumType == '': specType.lumType = 'V' # assume main sequence LNum = LClassRef[specType.lumType] classNum = specType.classNumber classLet = specType.classLetter try: return absMagDict[classLet][classNum][LNum] # value not in table. Assume the number isn't there (Key p2.7, Ind p3+) except (KeyError, IndexError): try: classLookup = absMagDict[classLet] values = np.array(list(classLookup.values()))[ :, LNum] # only select the right L Type return np.interp(classNum, list(classLookup.keys()), values) except (KeyError, ValueError): return np.nan
[ "def", "estimateAbsoluteMagnitude", "(", "spectralType", ")", ":", "from", ".", "astroclasses", "import", "SpectralType", "specType", "=", "SpectralType", "(", "spectralType", ")", "if", "specType", ".", "classLetter", "==", "''", ":", "return", "np", ".", "nan"...
Uses the spectral type to lookup an approximate absolute magnitude for the star.
[ "Uses", "the", "spectral", "type", "to", "lookup", "an", "approximate", "absolute", "magnitude", "for", "the", "star", "." ]
train
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/equations.py#L923-L954
Locu/chronology
kronos/kronos/utils/decorators.py
is_remote_allowed
def is_remote_allowed(remote): """ Check if `remote` is allowed to make a CORS request. """ if settings.debug: return True if not remote: return False for domain_pattern in settings.node['cors_whitelist_domains']: if domain_pattern.match(remote): return True return False
python
def is_remote_allowed(remote): """ Check if `remote` is allowed to make a CORS request. """ if settings.debug: return True if not remote: return False for domain_pattern in settings.node['cors_whitelist_domains']: if domain_pattern.match(remote): return True return False
[ "def", "is_remote_allowed", "(", "remote", ")", ":", "if", "settings", ".", "debug", ":", "return", "True", "if", "not", "remote", ":", "return", "False", "for", "domain_pattern", "in", "settings", ".", "node", "[", "'cors_whitelist_domains'", "]", ":", "if"...
Check if `remote` is allowed to make a CORS request.
[ "Check", "if", "remote", "is", "allowed", "to", "make", "a", "CORS", "request", "." ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/utils/decorators.py#L31-L42
Locu/chronology
kronos/kronos/utils/decorators.py
endpoint
def endpoint(url, methods=['GET']): """ Returns a decorator which when applied a function, causes that function to serve `url` and only allows the HTTP methods in `methods` """ def decorator(function, methods=methods): # Always allow OPTIONS since CORS requests will need it. methods = set(methods) methods.add('OPTIONS') @wraps(function) def wrapper(environment, start_response): try: start_time = time.time() if function.func_name not in (_serving_mode_endpoints [settings.serving_mode]): start_response('403 Forbidden', [('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: ['kronosd is configured to block access to this ' 'endpoint.'], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) req_method = environment['REQUEST_METHOD'] # If the request method is not allowed, return 405. if req_method not in methods: start_response('405 Method Not Allowed', [('Allow', ', '.join(methods)), ('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: ['%s method not allowed' % req_method], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) headers = [] remote_origin = environment.get('HTTP_ORIGIN') if req_method == 'OPTIONS': # This is a CORS preflight request so check that the remote domain is # allowed and respond with appropriate CORS headers. # http://www.html5rocks.com/static/images/cors_server_flowchart.png if is_remote_allowed(remote_origin): headers.extend([ ('Access-Control-Allow-Origin', remote_origin), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Headers', ', '.join( ('Accept', 'Content-Type', 'Origin', 'X-Requested-With'))), ('Access-Control-Allow-Methods', ', '.join(methods)) ]) # We just tell the client that CORS is ok. Client will follow up # with another request to get the answer. start_response('200 OK', headers) return '' # All POST bodies must be json, so decode it here. if req_method == 'POST': try: environment['json'] = marshal.loads(environment['wsgi.input'] .read()) except ValueError: start_response('400 Bad Request', [('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: ['Request body must be valid JSON.'], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) # All responses are JSON. headers.append(('Content-Type', 'application/json')) if remote_origin: headers.append(('Access-Control-Allow-Origin', remote_origin)) response = function(environment, start_response, headers) if not isinstance(response, types.GeneratorType): response[TOOK_FIELD] = '%fms' % (1000 * (time.time() - start_time)) response = marshal.dumps(response) return response except Exception, e: log.exception('endpoint: uncaught exception!') start_response('400 Bad Request', [('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: [repr(e)], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) if settings.profile: wrapper = profile(wrapper) # Map the URL to serve to this function. Only map certain # endpoints if serving_mode is restrictive. global ENDPOINTS ENDPOINTS[url] = wrapper return wrapper return decorator
python
def endpoint(url, methods=['GET']): """ Returns a decorator which when applied a function, causes that function to serve `url` and only allows the HTTP methods in `methods` """ def decorator(function, methods=methods): # Always allow OPTIONS since CORS requests will need it. methods = set(methods) methods.add('OPTIONS') @wraps(function) def wrapper(environment, start_response): try: start_time = time.time() if function.func_name not in (_serving_mode_endpoints [settings.serving_mode]): start_response('403 Forbidden', [('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: ['kronosd is configured to block access to this ' 'endpoint.'], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) req_method = environment['REQUEST_METHOD'] # If the request method is not allowed, return 405. if req_method not in methods: start_response('405 Method Not Allowed', [('Allow', ', '.join(methods)), ('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: ['%s method not allowed' % req_method], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) headers = [] remote_origin = environment.get('HTTP_ORIGIN') if req_method == 'OPTIONS': # This is a CORS preflight request so check that the remote domain is # allowed and respond with appropriate CORS headers. # http://www.html5rocks.com/static/images/cors_server_flowchart.png if is_remote_allowed(remote_origin): headers.extend([ ('Access-Control-Allow-Origin', remote_origin), ('Access-Control-Allow-Credentials', 'true'), ('Access-Control-Allow-Headers', ', '.join( ('Accept', 'Content-Type', 'Origin', 'X-Requested-With'))), ('Access-Control-Allow-Methods', ', '.join(methods)) ]) # We just tell the client that CORS is ok. Client will follow up # with another request to get the answer. start_response('200 OK', headers) return '' # All POST bodies must be json, so decode it here. if req_method == 'POST': try: environment['json'] = marshal.loads(environment['wsgi.input'] .read()) except ValueError: start_response('400 Bad Request', [('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: ['Request body must be valid JSON.'], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) # All responses are JSON. headers.append(('Content-Type', 'application/json')) if remote_origin: headers.append(('Access-Control-Allow-Origin', remote_origin)) response = function(environment, start_response, headers) if not isinstance(response, types.GeneratorType): response[TOOK_FIELD] = '%fms' % (1000 * (time.time() - start_time)) response = marshal.dumps(response) return response except Exception, e: log.exception('endpoint: uncaught exception!') start_response('400 Bad Request', [('Content-Type', 'application/json')]) return marshal.dumps({ ERRORS_FIELD: [repr(e)], SUCCESS_FIELD: False, TOOK_FIELD: '%fms' % (1000 * (time.time() - start_time)) }) if settings.profile: wrapper = profile(wrapper) # Map the URL to serve to this function. Only map certain # endpoints if serving_mode is restrictive. global ENDPOINTS ENDPOINTS[url] = wrapper return wrapper return decorator
[ "def", "endpoint", "(", "url", ",", "methods", "=", "[", "'GET'", "]", ")", ":", "def", "decorator", "(", "function", ",", "methods", "=", "methods", ")", ":", "# Always allow OPTIONS since CORS requests will need it.", "methods", "=", "set", "(", "methods", "...
Returns a decorator which when applied a function, causes that function to serve `url` and only allows the HTTP methods in `methods`
[ "Returns", "a", "decorator", "which", "when", "applied", "a", "function", "causes", "that", "function", "to", "serve", "url", "and", "only", "allows", "the", "HTTP", "methods", "in", "methods" ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/utils/decorators.py#L45-L148
StorjOld/heartbeat
heartbeat/OneHash/OneHash.py
OneHash.generate_challenges
def generate_challenges(self, num, root_seed): """ Generate the specified number of hash challenges. :param num: The number of hash challenges we want to generate. :param root_seed: Some value that we use to generate our seeds from. """ # Generate a series of seeds seeds = self.generate_seeds(num, root_seed, self.secret) blocks = self.pick_blocks(num, root_seed) # List of 2-tuples (seed, hash_response) self.challenges = [] # Generate the corresponding hash for each seed for i in range(num): self.challenges.append(Challenge(blocks[i], seeds[i])) response = self.meet_challenge(self.challenges[i]) self.challenges[i].response = response
python
def generate_challenges(self, num, root_seed): """ Generate the specified number of hash challenges. :param num: The number of hash challenges we want to generate. :param root_seed: Some value that we use to generate our seeds from. """ # Generate a series of seeds seeds = self.generate_seeds(num, root_seed, self.secret) blocks = self.pick_blocks(num, root_seed) # List of 2-tuples (seed, hash_response) self.challenges = [] # Generate the corresponding hash for each seed for i in range(num): self.challenges.append(Challenge(blocks[i], seeds[i])) response = self.meet_challenge(self.challenges[i]) self.challenges[i].response = response
[ "def", "generate_challenges", "(", "self", ",", "num", ",", "root_seed", ")", ":", "# Generate a series of seeds", "seeds", "=", "self", ".", "generate_seeds", "(", "num", ",", "root_seed", ",", "self", ".", "secret", ")", "blocks", "=", "self", ".", "pick_b...
Generate the specified number of hash challenges. :param num: The number of hash challenges we want to generate. :param root_seed: Some value that we use to generate our seeds from.
[ "Generate", "the", "specified", "number", "of", "hash", "challenges", "." ]
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/OneHash/OneHash.py#L92-L110
StorjOld/heartbeat
heartbeat/OneHash/OneHash.py
OneHash.meet_challenge
def meet_challenge(self, challenge): """ Get the SHA256 hash of a specific file block plus the provided seed. The default block size is one tenth of the file. If the file is larger than 10KB, 1KB is used as the block size. :param challenge: challenge as a `Challenge <heartbeat.Challenge>` object """ chunk_size = min(1024, self.file_size // 10) seed = challenge.seed h = hashlib.sha256() self.file_object.seek(challenge.block) if challenge.block > (self.file_size - chunk_size): end_slice = ( challenge.block - (self.file_size - chunk_size) ) h.update(self.file_object.read(end_slice)) self.file_object.seek(0) h.update(self.file_object.read(chunk_size - end_slice)) else: h.update(self.file_object.read(chunk_size)) h.update(seed) return h.digest()
python
def meet_challenge(self, challenge): """ Get the SHA256 hash of a specific file block plus the provided seed. The default block size is one tenth of the file. If the file is larger than 10KB, 1KB is used as the block size. :param challenge: challenge as a `Challenge <heartbeat.Challenge>` object """ chunk_size = min(1024, self.file_size // 10) seed = challenge.seed h = hashlib.sha256() self.file_object.seek(challenge.block) if challenge.block > (self.file_size - chunk_size): end_slice = ( challenge.block - (self.file_size - chunk_size) ) h.update(self.file_object.read(end_slice)) self.file_object.seek(0) h.update(self.file_object.read(chunk_size - end_slice)) else: h.update(self.file_object.read(chunk_size)) h.update(seed) return h.digest()
[ "def", "meet_challenge", "(", "self", ",", "challenge", ")", ":", "chunk_size", "=", "min", "(", "1024", ",", "self", ".", "file_size", "//", "10", ")", "seed", "=", "challenge", ".", "seed", "h", "=", "hashlib", ".", "sha256", "(", ")", "self", ".",...
Get the SHA256 hash of a specific file block plus the provided seed. The default block size is one tenth of the file. If the file is larger than 10KB, 1KB is used as the block size. :param challenge: challenge as a `Challenge <heartbeat.Challenge>` object
[ "Get", "the", "SHA256", "hash", "of", "a", "specific", "file", "block", "plus", "the", "provided", "seed", ".", "The", "default", "block", "size", "is", "one", "tenth", "of", "the", "file", ".", "If", "the", "file", "is", "larger", "than", "10KB", "1KB...
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/OneHash/OneHash.py#L112-L138
StorjOld/heartbeat
heartbeat/OneHash/OneHash.py
OneHash.generate_seeds
def generate_seeds(num, root_seed, secret): """ Deterministically generate list of seeds from a root seed. :param num: Numbers of seeds to generate as int :param root_seed: Seed to start off with. :return: seed values as a list of length num """ # Generate a starting seed from the root if num < 0: raise HeartbeatError('%s is not greater than 0' % num) if secret is None: raise HeartbeatError('secret can not be of type NoneType') seeds = [] try: tmp_seed = hashlib.sha256(root_seed).digest() except TypeError: tmp_seed = hashlib.sha256(str(root_seed).encode()).digest() # Deterministically generate the rest of the seeds for x in range(num): seeds.append(tmp_seed) h = hashlib.sha256(tmp_seed) h.update(secret) tmp_seed = h.digest() return seeds
python
def generate_seeds(num, root_seed, secret): """ Deterministically generate list of seeds from a root seed. :param num: Numbers of seeds to generate as int :param root_seed: Seed to start off with. :return: seed values as a list of length num """ # Generate a starting seed from the root if num < 0: raise HeartbeatError('%s is not greater than 0' % num) if secret is None: raise HeartbeatError('secret can not be of type NoneType') seeds = [] try: tmp_seed = hashlib.sha256(root_seed).digest() except TypeError: tmp_seed = hashlib.sha256(str(root_seed).encode()).digest() # Deterministically generate the rest of the seeds for x in range(num): seeds.append(tmp_seed) h = hashlib.sha256(tmp_seed) h.update(secret) tmp_seed = h.digest() return seeds
[ "def", "generate_seeds", "(", "num", ",", "root_seed", ",", "secret", ")", ":", "# Generate a starting seed from the root", "if", "num", "<", "0", ":", "raise", "HeartbeatError", "(", "'%s is not greater than 0'", "%", "num", ")", "if", "secret", "is", "None", "...
Deterministically generate list of seeds from a root seed. :param num: Numbers of seeds to generate as int :param root_seed: Seed to start off with. :return: seed values as a list of length num
[ "Deterministically", "generate", "list", "of", "seeds", "from", "a", "root", "seed", "." ]
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/OneHash/OneHash.py#L141-L168
StorjOld/heartbeat
heartbeat/OneHash/OneHash.py
OneHash.pick_blocks
def pick_blocks(self, num, root_seed): """ Pick a set of positions to start reading blocks from the file that challenges are created for. This is a deterministic operation. Positions are guaranteed to be within the bounds of the file. :param num: Number of blocks to pick :param root_seed: Seed with which begin picking blocks. :return: block values as a list """ if num < 0: raise HeartbeatError('%s is not greater than 0' % num) blocks = [] random.seed(root_seed) for i in range(num): blocks.append(random.randint(0, self.file_size - 1)) return blocks
python
def pick_blocks(self, num, root_seed): """ Pick a set of positions to start reading blocks from the file that challenges are created for. This is a deterministic operation. Positions are guaranteed to be within the bounds of the file. :param num: Number of blocks to pick :param root_seed: Seed with which begin picking blocks. :return: block values as a list """ if num < 0: raise HeartbeatError('%s is not greater than 0' % num) blocks = [] random.seed(root_seed) for i in range(num): blocks.append(random.randint(0, self.file_size - 1)) return blocks
[ "def", "pick_blocks", "(", "self", ",", "num", ",", "root_seed", ")", ":", "if", "num", "<", "0", ":", "raise", "HeartbeatError", "(", "'%s is not greater than 0'", "%", "num", ")", "blocks", "=", "[", "]", "random", ".", "seed", "(", "root_seed", ")", ...
Pick a set of positions to start reading blocks from the file that challenges are created for. This is a deterministic operation. Positions are guaranteed to be within the bounds of the file. :param num: Number of blocks to pick :param root_seed: Seed with which begin picking blocks. :return: block values as a list
[ "Pick", "a", "set", "of", "positions", "to", "start", "reading", "blocks", "from", "the", "file", "that", "challenges", "are", "created", "for", ".", "This", "is", "a", "deterministic", "operation", ".", "Positions", "are", "guaranteed", "to", "be", "within"...
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/OneHash/OneHash.py#L170-L189
StorjOld/heartbeat
heartbeat/OneHash/OneHash.py
OneHash.check_answer
def check_answer(self, hash_answer): """ Check if the returned hash is in our challenges list. :param hash_answer: Hash that we compare to our list of challenges :return: boolean indicating if answer is correct, True, or not, False """ for challenge in self.challenges: if challenge.response == hash_answer: # If we don't discard a used challenge then a node # could fake having the file because it already # knows the proper response self.delete_challenge(hash_answer) return True return False
python
def check_answer(self, hash_answer): """ Check if the returned hash is in our challenges list. :param hash_answer: Hash that we compare to our list of challenges :return: boolean indicating if answer is correct, True, or not, False """ for challenge in self.challenges: if challenge.response == hash_answer: # If we don't discard a used challenge then a node # could fake having the file because it already # knows the proper response self.delete_challenge(hash_answer) return True return False
[ "def", "check_answer", "(", "self", ",", "hash_answer", ")", ":", "for", "challenge", "in", "self", ".", "challenges", ":", "if", "challenge", ".", "response", "==", "hash_answer", ":", "# If we don't discard a used challenge then a node", "# could fake having the file ...
Check if the returned hash is in our challenges list. :param hash_answer: Hash that we compare to our list of challenges :return: boolean indicating if answer is correct, True, or not, False
[ "Check", "if", "the", "returned", "hash", "is", "in", "our", "challenges", "list", "." ]
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/OneHash/OneHash.py#L191-L204
LasLabs/python-helpscout
helpscout/__init__.py
HelpScout._load_apis
def _load_apis(self): """Find available APIs and set instances property auth proxies.""" helpscout = __import__('helpscout.apis') for class_name in helpscout.apis.__all__: if not class_name.startswith('_'): cls = getattr(helpscout.apis, class_name) api = AuthProxy(self.session, cls) setattr(self, class_name, api) self.__apis__[class_name] = api
python
def _load_apis(self): """Find available APIs and set instances property auth proxies.""" helpscout = __import__('helpscout.apis') for class_name in helpscout.apis.__all__: if not class_name.startswith('_'): cls = getattr(helpscout.apis, class_name) api = AuthProxy(self.session, cls) setattr(self, class_name, api) self.__apis__[class_name] = api
[ "def", "_load_apis", "(", "self", ")", ":", "helpscout", "=", "__import__", "(", "'helpscout.apis'", ")", "for", "class_name", "in", "helpscout", ".", "apis", ".", "__all__", ":", "if", "not", "class_name", ".", "startswith", "(", "'_'", ")", ":", "cls", ...
Find available APIs and set instances property auth proxies.
[ "Find", "available", "APIs", "and", "set", "instances", "property", "auth", "proxies", "." ]
train
https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/__init__.py#L64-L72
MakersF/LoLScraper
lol_scraper/data_types.py
slice_time
def slice_time(begin, end=None, duration=datetime.timedelta(days=2)): """ :param begin: datetime :param end: datetime :param duration: timedelta :return: a generator for a set of timeslices of the given duration """ duration_ms = int(duration.total_seconds() * 1000) previous = int(unix_time(begin) * 1000) next = previous + duration_ms now_ms = unix_time(datetime.datetime.now())*1000 end_slice = now_ms if not end else min(now_ms, int(unix_time(end) * 1000)) while next < end_slice: yield TimeSlice(previous, next) previous = next next += duration_ms now_ms = unix_time(datetime.datetime.now())*1000 end_slice = now_ms if not end else min(now_ms, int(unix_time(end) * 1000)) yield TimeSlice(previous, end_slice)
python
def slice_time(begin, end=None, duration=datetime.timedelta(days=2)): """ :param begin: datetime :param end: datetime :param duration: timedelta :return: a generator for a set of timeslices of the given duration """ duration_ms = int(duration.total_seconds() * 1000) previous = int(unix_time(begin) * 1000) next = previous + duration_ms now_ms = unix_time(datetime.datetime.now())*1000 end_slice = now_ms if not end else min(now_ms, int(unix_time(end) * 1000)) while next < end_slice: yield TimeSlice(previous, next) previous = next next += duration_ms now_ms = unix_time(datetime.datetime.now())*1000 end_slice = now_ms if not end else min(now_ms, int(unix_time(end) * 1000)) yield TimeSlice(previous, end_slice)
[ "def", "slice_time", "(", "begin", ",", "end", "=", "None", ",", "duration", "=", "datetime", ".", "timedelta", "(", "days", "=", "2", ")", ")", ":", "duration_ms", "=", "int", "(", "duration", ".", "total_seconds", "(", ")", "*", "1000", ")", "previ...
:param begin: datetime :param end: datetime :param duration: timedelta :return: a generator for a set of timeslices of the given duration
[ ":", "param", "begin", ":", "datetime", ":", "param", "end", ":", "datetime", ":", "param", "duration", ":", "timedelta", ":", "return", ":", "a", "generator", "for", "a", "set", "of", "timeslices", "of", "the", "given", "duration" ]
train
https://github.com/MakersF/LoLScraper/blob/71d9f2ef24159f2ba5d21467aac1ab785c2bb7e6/lol_scraper/data_types.py#L275-L294
nitipit/appkit
appkit/api/v0_2_4/app.py
make_response
def make_response(response): """Make response tuple Potential features to be added - Parameters validation """ if isinstance(response, unicode) or \ isinstance(response, str): response = (response, 'text/html') return response
python
def make_response(response): """Make response tuple Potential features to be added - Parameters validation """ if isinstance(response, unicode) or \ isinstance(response, str): response = (response, 'text/html') return response
[ "def", "make_response", "(", "response", ")", ":", "if", "isinstance", "(", "response", ",", "unicode", ")", "or", "isinstance", "(", "response", ",", "str", ")", ":", "response", "=", "(", "response", ",", "'text/html'", ")", "return", "response" ]
Make response tuple Potential features to be added - Parameters validation
[ "Make", "response", "tuple" ]
train
https://github.com/nitipit/appkit/blob/08eeaf45a9ca884bf5fe105d47a81269d44b1412/appkit/api/v0_2_4/app.py#L250-L260
nitipit/appkit
appkit/api/v0_2_4/app.py
App.on_notify_load_status
def on_notify_load_status(self, webkitView, *args, **kwargs): """Callback function when the page was loaded completely FYI, this function will be called after $(document).ready() in jQuery """ status = webkitView.get_load_status() if status == status.FINISHED: if self.debug is True: print 'Load finished'
python
def on_notify_load_status(self, webkitView, *args, **kwargs): """Callback function when the page was loaded completely FYI, this function will be called after $(document).ready() in jQuery """ status = webkitView.get_load_status() if status == status.FINISHED: if self.debug is True: print 'Load finished'
[ "def", "on_notify_load_status", "(", "self", ",", "webkitView", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "status", "=", "webkitView", ".", "get_load_status", "(", ")", "if", "status", "==", "status", ".", "FINISHED", ":", "if", "self", ".", ...
Callback function when the page was loaded completely FYI, this function will be called after $(document).ready() in jQuery
[ "Callback", "function", "when", "the", "page", "was", "loaded", "completely", "FYI", "this", "function", "will", "be", "called", "after", "$", "(", "document", ")", ".", "ready", "()", "in", "jQuery" ]
train
https://github.com/nitipit/appkit/blob/08eeaf45a9ca884bf5fe105d47a81269d44b1412/appkit/api/v0_2_4/app.py#L101-L109
nitipit/appkit
appkit/api/v0_2_4/app.py
App._init_ui
def _init_ui(self): """Initial the first UI page. - load html from '/' endpoint - if <title> is defined, use as windows title """ (content, mimetype) = make_response(self._url_map_to_function('/')) try: beautifulsoup = BeautifulSoup(content) self.window.set_title(beautifulsoup.find('title').string) except: pass if self.debug is True: print self.app_dir # Use load_string instead of load_uri because it shows warning. self.webkit_web_view.load_string( content, mime_type=mimetype, encoding='utf-8', base_uri='/', )
python
def _init_ui(self): """Initial the first UI page. - load html from '/' endpoint - if <title> is defined, use as windows title """ (content, mimetype) = make_response(self._url_map_to_function('/')) try: beautifulsoup = BeautifulSoup(content) self.window.set_title(beautifulsoup.find('title').string) except: pass if self.debug is True: print self.app_dir # Use load_string instead of load_uri because it shows warning. self.webkit_web_view.load_string( content, mime_type=mimetype, encoding='utf-8', base_uri='/', )
[ "def", "_init_ui", "(", "self", ")", ":", "(", "content", ",", "mimetype", ")", "=", "make_response", "(", "self", ".", "_url_map_to_function", "(", "'/'", ")", ")", "try", ":", "beautifulsoup", "=", "BeautifulSoup", "(", "content", ")", "self", ".", "wi...
Initial the first UI page. - load html from '/' endpoint - if <title> is defined, use as windows title
[ "Initial", "the", "first", "UI", "page", ".", "-", "load", "html", "from", "/", "endpoint", "-", "if", "<title", ">", "is", "defined", "use", "as", "windows", "title" ]
train
https://github.com/nitipit/appkit/blob/08eeaf45a9ca884bf5fe105d47a81269d44b1412/appkit/api/v0_2_4/app.py#L222-L243
bovee/Aston
aston/trace/math_traces.py
fft
def fft(ts): """ Perform a fast-fourier transform on a Trace """ t_step = ts.index[1] - ts.index[0] oc = np.abs(np.fft.fftshift(np.fft.fft(ts.values))) / len(ts.values) t = np.fft.fftshift(np.fft.fftfreq(len(oc), d=t_step)) return Trace(oc, t)
python
def fft(ts): """ Perform a fast-fourier transform on a Trace """ t_step = ts.index[1] - ts.index[0] oc = np.abs(np.fft.fftshift(np.fft.fft(ts.values))) / len(ts.values) t = np.fft.fftshift(np.fft.fftfreq(len(oc), d=t_step)) return Trace(oc, t)
[ "def", "fft", "(", "ts", ")", ":", "t_step", "=", "ts", ".", "index", "[", "1", "]", "-", "ts", ".", "index", "[", "0", "]", "oc", "=", "np", ".", "abs", "(", "np", ".", "fft", ".", "fftshift", "(", "np", ".", "fft", ".", "fft", "(", "ts"...
Perform a fast-fourier transform on a Trace
[ "Perform", "a", "fast", "-", "fourier", "transform", "on", "a", "Trace" ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/math_traces.py#L44-L51
bovee/Aston
aston/trace/math_traces.py
movingaverage
def movingaverage(arr, window): """ Calculates the moving average ("rolling mean") of an array of a certain window size. """ m = np.ones(int(window)) / int(window) return scipy.ndimage.convolve1d(arr, m, axis=0, mode='reflect')
python
def movingaverage(arr, window): """ Calculates the moving average ("rolling mean") of an array of a certain window size. """ m = np.ones(int(window)) / int(window) return scipy.ndimage.convolve1d(arr, m, axis=0, mode='reflect')
[ "def", "movingaverage", "(", "arr", ",", "window", ")", ":", "m", "=", "np", ".", "ones", "(", "int", "(", "window", ")", ")", "/", "int", "(", "window", ")", "return", "scipy", ".", "ndimage", ".", "convolve1d", "(", "arr", ",", "m", ",", "axis"...
Calculates the moving average ("rolling mean") of an array of a certain window size.
[ "Calculates", "the", "moving", "average", "(", "rolling", "mean", ")", "of", "an", "array", "of", "a", "certain", "window", "size", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/math_traces.py#L73-L79
bovee/Aston
aston/trace/math_traces.py
loads
def loads(ast_str): """ Create a Trace from a suitably compressed string. """ data = zlib.decompress(ast_str) li = struct.unpack('<L', data[0:4])[0] lt = struct.unpack('<L', data[4:8])[0] n = data[8:8 + li].decode('utf-8') t = np.fromstring(data[8 + li:8 + li + lt]) d = np.fromstring(data[8 + li + lt:]) return Trace(d, t, name=n)
python
def loads(ast_str): """ Create a Trace from a suitably compressed string. """ data = zlib.decompress(ast_str) li = struct.unpack('<L', data[0:4])[0] lt = struct.unpack('<L', data[4:8])[0] n = data[8:8 + li].decode('utf-8') t = np.fromstring(data[8 + li:8 + li + lt]) d = np.fromstring(data[8 + li + lt:]) return Trace(d, t, name=n)
[ "def", "loads", "(", "ast_str", ")", ":", "data", "=", "zlib", ".", "decompress", "(", "ast_str", ")", "li", "=", "struct", ".", "unpack", "(", "'<L'", ",", "data", "[", "0", ":", "4", "]", ")", "[", "0", "]", "lt", "=", "struct", ".", "unpack"...
Create a Trace from a suitably compressed string.
[ "Create", "a", "Trace", "from", "a", "suitably", "compressed", "string", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/math_traces.py#L95-L106
bovee/Aston
aston/trace/math_traces.py
dumps
def dumps(asts): """ Create a compressed string from an Trace. """ d = asts.values.tostring() t = asts.index.values.astype(float).tostring() lt = struct.pack('<L', len(t)) i = asts.name.encode('utf-8') li = struct.pack('<L', len(i)) try: # python 2 return buffer(zlib.compress(li + lt + i + t + d)) except NameError: # python 3 return zlib.compress(li + lt + i + t + d)
python
def dumps(asts): """ Create a compressed string from an Trace. """ d = asts.values.tostring() t = asts.index.values.astype(float).tostring() lt = struct.pack('<L', len(t)) i = asts.name.encode('utf-8') li = struct.pack('<L', len(i)) try: # python 2 return buffer(zlib.compress(li + lt + i + t + d)) except NameError: # python 3 return zlib.compress(li + lt + i + t + d)
[ "def", "dumps", "(", "asts", ")", ":", "d", "=", "asts", ".", "values", ".", "tostring", "(", ")", "t", "=", "asts", ".", "index", ".", "values", ".", "astype", "(", "float", ")", ".", "tostring", "(", ")", "lt", "=", "struct", ".", "pack", "("...
Create a compressed string from an Trace.
[ "Create", "a", "compressed", "string", "from", "an", "Trace", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/math_traces.py#L109-L121
bovee/Aston
aston/trace/math_traces.py
ts_func
def ts_func(f): """ This wraps a function that would normally only accept an array and allows it to operate on a DataFrame. Useful for applying numpy functions to DataFrames. """ def wrap_func(df, *args): # TODO: should vectorize to apply over all columns? return Chromatogram(f(df.values, *args), df.index, df.columns) return wrap_func
python
def ts_func(f): """ This wraps a function that would normally only accept an array and allows it to operate on a DataFrame. Useful for applying numpy functions to DataFrames. """ def wrap_func(df, *args): # TODO: should vectorize to apply over all columns? return Chromatogram(f(df.values, *args), df.index, df.columns) return wrap_func
[ "def", "ts_func", "(", "f", ")", ":", "def", "wrap_func", "(", "df", ",", "*", "args", ")", ":", "# TODO: should vectorize to apply over all columns?", "return", "Chromatogram", "(", "f", "(", "df", ".", "values", ",", "*", "args", ")", ",", "df", ".", "...
This wraps a function that would normally only accept an array and allows it to operate on a DataFrame. Useful for applying numpy functions to DataFrames.
[ "This", "wraps", "a", "function", "that", "would", "normally", "only", "accept", "an", "array", "and", "allows", "it", "to", "operate", "on", "a", "DataFrame", ".", "Useful", "for", "applying", "numpy", "functions", "to", "DataFrames", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/math_traces.py#L124-L133
bovee/Aston
aston/trace/events.py
desaturate
def desaturate(c, k=0): """ Utility function to desaturate a color c by an amount k. """ from matplotlib.colors import ColorConverter c = ColorConverter().to_rgb(c) intensity = 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2] return [intensity * k + i * (1 - k) for i in c]
python
def desaturate(c, k=0): """ Utility function to desaturate a color c by an amount k. """ from matplotlib.colors import ColorConverter c = ColorConverter().to_rgb(c) intensity = 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2] return [intensity * k + i * (1 - k) for i in c]
[ "def", "desaturate", "(", "c", ",", "k", "=", "0", ")", ":", "from", "matplotlib", ".", "colors", "import", "ColorConverter", "c", "=", "ColorConverter", "(", ")", ".", "to_rgb", "(", "c", ")", "intensity", "=", "0.299", "*", "c", "[", "0", "]", "+...
Utility function to desaturate a color c by an amount k.
[ "Utility", "function", "to", "desaturate", "a", "color", "c", "by", "an", "amount", "k", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/events.py#L2-L9
bovee/Aston
aston/spectra/math.py
find_spectrum_match
def find_spectrum_match(spec, spec_lib, method='euclidian'): """ Find spectrum in spec_lib most similar to spec. """ # filter out any points with abundance below 1 % # spec[spec / np.sum(spec) < 0.01] = 0 # normalize everything to sum to 1 spec = spec / np.max(spec) if method == 'dot': d1 = (spec_lib * lil_matrix(spec).T).sum(axis=1).A ** 2 d2 = np.sum(spec ** 2) * spec_lib.multiply(spec_lib).sum(axis=1).A dist = d1 / d2 elif method == 'euclidian': # st_spc = spectrum[np.newaxis, :].repeat(spec_lib.shape[0], axis=0) st_spc = dia_matrix((spec, [0]), shape=(len(spec), len(spec))) # calculate the residual sum of squares from spectrum to library dist_sp = spec_lib.multiply(spec_lib) - 2 * spec_lib.dot(st_spc) dist = dist_sp.sum(axis=1).A + np.sum(spec ** 2) return (dist.argmin(), dist.min())
python
def find_spectrum_match(spec, spec_lib, method='euclidian'): """ Find spectrum in spec_lib most similar to spec. """ # filter out any points with abundance below 1 % # spec[spec / np.sum(spec) < 0.01] = 0 # normalize everything to sum to 1 spec = spec / np.max(spec) if method == 'dot': d1 = (spec_lib * lil_matrix(spec).T).sum(axis=1).A ** 2 d2 = np.sum(spec ** 2) * spec_lib.multiply(spec_lib).sum(axis=1).A dist = d1 / d2 elif method == 'euclidian': # st_spc = spectrum[np.newaxis, :].repeat(spec_lib.shape[0], axis=0) st_spc = dia_matrix((spec, [0]), shape=(len(spec), len(spec))) # calculate the residual sum of squares from spectrum to library dist_sp = spec_lib.multiply(spec_lib) - 2 * spec_lib.dot(st_spc) dist = dist_sp.sum(axis=1).A + np.sum(spec ** 2) return (dist.argmin(), dist.min())
[ "def", "find_spectrum_match", "(", "spec", ",", "spec_lib", ",", "method", "=", "'euclidian'", ")", ":", "# filter out any points with abundance below 1 %", "# spec[spec / np.sum(spec) < 0.01] = 0", "# normalize everything to sum to 1", "spec", "=", "spec", "/", "np", ".", ...
Find spectrum in spec_lib most similar to spec.
[ "Find", "spectrum", "in", "spec_lib", "most", "similar", "to", "spec", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/spectra/math.py#L8-L27
jnrbsn/daemonocle
daemonocle/cli.py
DaemonCLI.get_command
def get_command(self, ctx, name): """Get a callable command object.""" if name not in self.daemon_class.list_actions(): return None # The context object is a Daemon object daemon = ctx.obj def subcommand(debug=False): """Call a daemonocle action.""" if daemon.detach and debug: daemon.detach = False daemon.do_action(name) # Override the docstring for the function so that it shows up # correctly in the help output subcommand.__doc__ = daemon.get_action(name).__doc__ if name == 'start': # Add a --debug option for start subcommand = click.option( '--debug', is_flag=True, help='Do NOT detach and run in the background.' )(subcommand) # Make it into a click command subcommand = click.command( name, options_metavar=self.options_metavar)(subcommand) return subcommand
python
def get_command(self, ctx, name): """Get a callable command object.""" if name not in self.daemon_class.list_actions(): return None # The context object is a Daemon object daemon = ctx.obj def subcommand(debug=False): """Call a daemonocle action.""" if daemon.detach and debug: daemon.detach = False daemon.do_action(name) # Override the docstring for the function so that it shows up # correctly in the help output subcommand.__doc__ = daemon.get_action(name).__doc__ if name == 'start': # Add a --debug option for start subcommand = click.option( '--debug', is_flag=True, help='Do NOT detach and run in the background.' )(subcommand) # Make it into a click command subcommand = click.command( name, options_metavar=self.options_metavar)(subcommand) return subcommand
[ "def", "get_command", "(", "self", ",", "ctx", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "daemon_class", ".", "list_actions", "(", ")", ":", "return", "None", "# The context object is a Daemon object", "daemon", "=", "ctx", ".", "obj", ...
Get a callable command object.
[ "Get", "a", "callable", "command", "object", "." ]
train
https://github.com/jnrbsn/daemonocle/blob/a1e09bc99608eab8dfe024c6741b7ecb7143f717/daemonocle/cli.py#L44-L74
timdiels/pytil
pytil/configuration.py
ConfigurationLoader.load
def load(self, path=None): ''' Load configuration (from configuration files). Parameters ---------- path : ~pathlib.Path or None Path to configuration file, which must exist; or path to directory containing a configuration file; or None. Returns ------- ~typing.Dict[str, ~typing.Dict[str, str]] The configuration as a dict of sections mapping section name to options. Each options dict maps from option name to option value. The ``default`` section is not included. However, all options from the ``default`` section are included in each returned section. Raises ------ ValueError If ``path`` is a missing file; or if it is a directory which does not contain the configuration file. Examples -------- >>> loader.load() { 'section1': { 'option1': 'value', 'option2': 'value2', } } ''' # Add path paths = self._paths.copy() if path: if path.is_dir(): path /= '{}.conf'.format(self._configuration_name) paths.append(path) # Prepend file sys root to abs paths paths = [(path_._root / str(x)[1:] if x.is_absolute() else x) for x in paths] if path: path = paths[-1] # Passed path must exist if not path.exists(): raise ValueError('Expected configuration file at {}'.format(path)) # Configure parser config_parser = ConfigParser( inline_comment_prefixes=('#', ';'), empty_lines_in_values=False, default_section='default', interpolation=ExtendedInterpolation() ) def option_transform(name): return name.replace('-', '_').replace(' ', '_').lower() config_parser.optionxform = option_transform # Parse defaults and configs with suppress(FileNotFoundError): defaults_contents = resource_string(self._package_name, 'data/{}.defaults.conf'.format(self._configuration_name)) config_parser.read_string(defaults_contents.decode('UTF-8')) config_parser.read([str(x) for x in paths]) # reads in given order config = {k : dict(v) for k,v in config_parser.items()} del config['default'] return config
python
def load(self, path=None): ''' Load configuration (from configuration files). Parameters ---------- path : ~pathlib.Path or None Path to configuration file, which must exist; or path to directory containing a configuration file; or None. Returns ------- ~typing.Dict[str, ~typing.Dict[str, str]] The configuration as a dict of sections mapping section name to options. Each options dict maps from option name to option value. The ``default`` section is not included. However, all options from the ``default`` section are included in each returned section. Raises ------ ValueError If ``path`` is a missing file; or if it is a directory which does not contain the configuration file. Examples -------- >>> loader.load() { 'section1': { 'option1': 'value', 'option2': 'value2', } } ''' # Add path paths = self._paths.copy() if path: if path.is_dir(): path /= '{}.conf'.format(self._configuration_name) paths.append(path) # Prepend file sys root to abs paths paths = [(path_._root / str(x)[1:] if x.is_absolute() else x) for x in paths] if path: path = paths[-1] # Passed path must exist if not path.exists(): raise ValueError('Expected configuration file at {}'.format(path)) # Configure parser config_parser = ConfigParser( inline_comment_prefixes=('#', ';'), empty_lines_in_values=False, default_section='default', interpolation=ExtendedInterpolation() ) def option_transform(name): return name.replace('-', '_').replace(' ', '_').lower() config_parser.optionxform = option_transform # Parse defaults and configs with suppress(FileNotFoundError): defaults_contents = resource_string(self._package_name, 'data/{}.defaults.conf'.format(self._configuration_name)) config_parser.read_string(defaults_contents.decode('UTF-8')) config_parser.read([str(x) for x in paths]) # reads in given order config = {k : dict(v) for k,v in config_parser.items()} del config['default'] return config
[ "def", "load", "(", "self", ",", "path", "=", "None", ")", ":", "# Add path", "paths", "=", "self", ".", "_paths", ".", "copy", "(", ")", "if", "path", ":", "if", "path", ".", "is_dir", "(", ")", ":", "path", "/=", "'{}.conf'", ".", "format", "("...
Load configuration (from configuration files). Parameters ---------- path : ~pathlib.Path or None Path to configuration file, which must exist; or path to directory containing a configuration file; or None. Returns ------- ~typing.Dict[str, ~typing.Dict[str, str]] The configuration as a dict of sections mapping section name to options. Each options dict maps from option name to option value. The ``default`` section is not included. However, all options from the ``default`` section are included in each returned section. Raises ------ ValueError If ``path`` is a missing file; or if it is a directory which does not contain the configuration file. Examples -------- >>> loader.load() { 'section1': { 'option1': 'value', 'option2': 'value2', } }
[ "Load", "configuration", "(", "from", "configuration", "files", ")", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/configuration.py#L80-L151
timdiels/pytil
pytil/configuration.py
ConfigurationLoader.cli_help_message
def cli_help_message(self, description): ''' Get a user friendly help message that can be dropped in a `click.Command`\ 's epilog. Parameters ---------- description : str Description of the configuration file to include in the message. Returns ------- str A help message that uses :py:mod:`click`\ 's help formatting constructs (e.g. ``\b``). ''' config_files_listing = '\n'.join(' {}. {!s}'.format(i, path) for i, path in enumerate(self._paths, 1)) text = dedent('''\ {config_file}: {description} {config_file} files are read from the following locations: \b {config_files_listing} Any configuration file can override options set by previous configuration files. Some configuration file locations can be changed using the XDG standard (http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html). ''').format( config_file='{}.conf'.format(self._configuration_name), description=description, config_files_listing=config_files_listing ) return text
python
def cli_help_message(self, description): ''' Get a user friendly help message that can be dropped in a `click.Command`\ 's epilog. Parameters ---------- description : str Description of the configuration file to include in the message. Returns ------- str A help message that uses :py:mod:`click`\ 's help formatting constructs (e.g. ``\b``). ''' config_files_listing = '\n'.join(' {}. {!s}'.format(i, path) for i, path in enumerate(self._paths, 1)) text = dedent('''\ {config_file}: {description} {config_file} files are read from the following locations: \b {config_files_listing} Any configuration file can override options set by previous configuration files. Some configuration file locations can be changed using the XDG standard (http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html). ''').format( config_file='{}.conf'.format(self._configuration_name), description=description, config_files_listing=config_files_listing ) return text
[ "def", "cli_help_message", "(", "self", ",", "description", ")", ":", "config_files_listing", "=", "'\\n'", ".", "join", "(", "' {}. {!s}'", ".", "format", "(", "i", ",", "path", ")", "for", "i", ",", "path", "in", "enumerate", "(", "self", ".", "_pat...
Get a user friendly help message that can be dropped in a `click.Command`\ 's epilog. Parameters ---------- description : str Description of the configuration file to include in the message. Returns ------- str A help message that uses :py:mod:`click`\ 's help formatting constructs (e.g. ``\b``).
[ "Get", "a", "user", "friendly", "help", "message", "that", "can", "be", "dropped", "in", "a", "click", ".", "Command", "\\", "s", "epilog", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/configuration.py#L153-L187
PedalPi/Application
application/application.py
Application.start
def start(self): """ Start the application, initializing your components. """ current_pedalboard = self.controller(CurrentController).pedalboard if current_pedalboard is None: self.log('Not exists any current pedalboard.') self.log('Use CurrentController to set the current pedalboard') else: self.log('Load current pedalboard - "{}"', current_pedalboard.name) self.mod_host.pedalboard = current_pedalboard for component in self.components: component.init() self.log('Load component - {}', component.__class__.__name__) self.log('Components loaded') atexit.register(self.stop)
python
def start(self): """ Start the application, initializing your components. """ current_pedalboard = self.controller(CurrentController).pedalboard if current_pedalboard is None: self.log('Not exists any current pedalboard.') self.log('Use CurrentController to set the current pedalboard') else: self.log('Load current pedalboard - "{}"', current_pedalboard.name) self.mod_host.pedalboard = current_pedalboard for component in self.components: component.init() self.log('Load component - {}', component.__class__.__name__) self.log('Components loaded') atexit.register(self.stop)
[ "def", "start", "(", "self", ")", ":", "current_pedalboard", "=", "self", ".", "controller", "(", "CurrentController", ")", ".", "pedalboard", "if", "current_pedalboard", "is", "None", ":", "self", ".", "log", "(", "'Not exists any current pedalboard.'", ")", "s...
Start the application, initializing your components.
[ "Start", "the", "application", "initializing", "your", "components", "." ]
train
https://github.com/PedalPi/Application/blob/3fdf6f97cfef97a7f1d90a5881dd04324c229f9d/application/application.py#L159-L177
PedalPi/Application
application/application.py
Application.stop
def stop(self): """ Stop the application, closing your components. """ for component in self.components: component.close() self.log('Stopping component - {}', component.__class__.__name__) for controller in self.controllers.values(): controller.close() self.log('Stopping controller - {}', controller.__class__.__name__) atexit.unregister(self.stop)
python
def stop(self): """ Stop the application, closing your components. """ for component in self.components: component.close() self.log('Stopping component - {}', component.__class__.__name__) for controller in self.controllers.values(): controller.close() self.log('Stopping controller - {}', controller.__class__.__name__) atexit.unregister(self.stop)
[ "def", "stop", "(", "self", ")", ":", "for", "component", "in", "self", ".", "components", ":", "component", ".", "close", "(", ")", "self", ".", "log", "(", "'Stopping component - {}'", ",", "component", ".", "__class__", ".", "__name__", ")", "for", "c...
Stop the application, closing your components.
[ "Stop", "the", "application", "closing", "your", "components", "." ]
train
https://github.com/PedalPi/Application/blob/3fdf6f97cfef97a7f1d90a5881dd04324c229f9d/application/application.py#L179-L191
Locu/chronology
jia/jia/utils.py
get_seconds
def get_seconds(value, scale): """Convert time scale dict to seconds Given a dictionary with keys for scale and value, convert value into seconds based on scale. """ scales = { 'seconds': lambda x: x, 'minutes': lambda x: x * 60, 'hours': lambda x: x * 60 * 60, 'days': lambda x: x * 60 * 60 * 24, 'weeks': lambda x: x * 60 * 60 * 24 * 7, 'months': lambda x: x * 60 * 60 * 24 * 30, 'years': lambda x: x * 60 * 60 * 24 * 365, } return scales[scale](value)
python
def get_seconds(value, scale): """Convert time scale dict to seconds Given a dictionary with keys for scale and value, convert value into seconds based on scale. """ scales = { 'seconds': lambda x: x, 'minutes': lambda x: x * 60, 'hours': lambda x: x * 60 * 60, 'days': lambda x: x * 60 * 60 * 24, 'weeks': lambda x: x * 60 * 60 * 24 * 7, 'months': lambda x: x * 60 * 60 * 24 * 30, 'years': lambda x: x * 60 * 60 * 24 * 365, } return scales[scale](value)
[ "def", "get_seconds", "(", "value", ",", "scale", ")", ":", "scales", "=", "{", "'seconds'", ":", "lambda", "x", ":", "x", ",", "'minutes'", ":", "lambda", "x", ":", "x", "*", "60", ",", "'hours'", ":", "lambda", "x", ":", "x", "*", "60", "*", ...
Convert time scale dict to seconds Given a dictionary with keys for scale and value, convert value into seconds based on scale.
[ "Convert", "time", "scale", "dict", "to", "seconds" ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/jia/utils.py#L6-L21
elemoine/papyrus
papyrus/protocol.py
_get_col_epsg
def _get_col_epsg(mapped_class, geom_attr): """Get the EPSG code associated with a geometry attribute. Arguments: geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. """ col = class_mapper(mapped_class).get_property(geom_attr).columns[0] return col.type.srid
python
def _get_col_epsg(mapped_class, geom_attr): """Get the EPSG code associated with a geometry attribute. Arguments: geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. """ col = class_mapper(mapped_class).get_property(geom_attr).columns[0] return col.type.srid
[ "def", "_get_col_epsg", "(", "mapped_class", ",", "geom_attr", ")", ":", "col", "=", "class_mapper", "(", "mapped_class", ")", ".", "get_property", "(", "geom_attr", ")", ".", "columns", "[", "0", "]", "return", "col", ".", "type", ".", "srid" ]
Get the EPSG code associated with a geometry attribute. Arguments: geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class.
[ "Get", "the", "EPSG", "code", "associated", "with", "a", "geometry", "attribute", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L44-L56
elemoine/papyrus
papyrus/protocol.py
create_geom_filter
def create_geom_filter(request, mapped_class, geom_attr): """Create MapFish geometry filter based on the request params. Either a box or within or geometry filter, depending on the request params. Additional named arguments are passed to the spatial filter. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. """ tolerance = float(request.params.get('tolerance', 0.0)) epsg = None if 'epsg' in request.params: epsg = int(request.params['epsg']) box = request.params.get('bbox') shape = None if box is not None: box = [float(x) for x in box.split(',')] shape = Polygon(((box[0], box[1]), (box[0], box[3]), (box[2], box[3]), (box[2], box[1]), (box[0], box[1]))) elif 'lon' in request.params and 'lat' in request.params: shape = Point(float(request.params['lon']), float(request.params['lat'])) elif 'geometry' in request.params: shape = loads(request.params['geometry'], object_hook=GeoJSON.to_instance) shape = asShape(shape) if shape is None: return None column_epsg = _get_col_epsg(mapped_class, geom_attr) geom_attr = getattr(mapped_class, geom_attr) epsg = column_epsg if epsg is None else epsg if epsg != column_epsg: geom_attr = func.ST_Transform(geom_attr, epsg) geometry = from_shape(shape, srid=epsg) return func.ST_DWITHIN(geom_attr, geometry, tolerance)
python
def create_geom_filter(request, mapped_class, geom_attr): """Create MapFish geometry filter based on the request params. Either a box or within or geometry filter, depending on the request params. Additional named arguments are passed to the spatial filter. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. """ tolerance = float(request.params.get('tolerance', 0.0)) epsg = None if 'epsg' in request.params: epsg = int(request.params['epsg']) box = request.params.get('bbox') shape = None if box is not None: box = [float(x) for x in box.split(',')] shape = Polygon(((box[0], box[1]), (box[0], box[3]), (box[2], box[3]), (box[2], box[1]), (box[0], box[1]))) elif 'lon' in request.params and 'lat' in request.params: shape = Point(float(request.params['lon']), float(request.params['lat'])) elif 'geometry' in request.params: shape = loads(request.params['geometry'], object_hook=GeoJSON.to_instance) shape = asShape(shape) if shape is None: return None column_epsg = _get_col_epsg(mapped_class, geom_attr) geom_attr = getattr(mapped_class, geom_attr) epsg = column_epsg if epsg is None else epsg if epsg != column_epsg: geom_attr = func.ST_Transform(geom_attr, epsg) geometry = from_shape(shape, srid=epsg) return func.ST_DWITHIN(geom_attr, geometry, tolerance)
[ "def", "create_geom_filter", "(", "request", ",", "mapped_class", ",", "geom_attr", ")", ":", "tolerance", "=", "float", "(", "request", ".", "params", ".", "get", "(", "'tolerance'", ",", "0.0", ")", ")", "epsg", "=", "None", "if", "'epsg'", "in", "requ...
Create MapFish geometry filter based on the request params. Either a box or within or geometry filter, depending on the request params. Additional named arguments are passed to the spatial filter. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class.
[ "Create", "MapFish", "geometry", "filter", "based", "on", "the", "request", "params", ".", "Either", "a", "box", "or", "within", "or", "geometry", "filter", "depending", "on", "the", "request", "params", ".", "Additional", "named", "arguments", "are", "passed"...
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L59-L103
elemoine/papyrus
papyrus/protocol.py
create_attr_filter
def create_attr_filter(request, mapped_class): """Create an ``and_`` SQLAlchemy filter (a ClauseList object) based on the request params (``queryable``, ``eq``, ``ne``, ...). Arguments: request the request. mapped_class the SQLAlchemy mapped class. """ mapping = { 'eq': '__eq__', 'ne': '__ne__', 'lt': '__lt__', 'lte': '__le__', 'gt': '__gt__', 'gte': '__ge__', 'like': 'like', 'ilike': 'ilike' } filters = [] if 'queryable' in request.params: queryable = request.params['queryable'].split(',') for k in request.params: if len(request.params[k]) <= 0 or '__' not in k: continue col, op = k.split("__") if col not in queryable or op not in mapping: continue column = getattr(mapped_class, col) f = getattr(column, mapping[op])(request.params[k]) filters.append(f) return and_(*filters) if len(filters) > 0 else None
python
def create_attr_filter(request, mapped_class): """Create an ``and_`` SQLAlchemy filter (a ClauseList object) based on the request params (``queryable``, ``eq``, ``ne``, ...). Arguments: request the request. mapped_class the SQLAlchemy mapped class. """ mapping = { 'eq': '__eq__', 'ne': '__ne__', 'lt': '__lt__', 'lte': '__le__', 'gt': '__gt__', 'gte': '__ge__', 'like': 'like', 'ilike': 'ilike' } filters = [] if 'queryable' in request.params: queryable = request.params['queryable'].split(',') for k in request.params: if len(request.params[k]) <= 0 or '__' not in k: continue col, op = k.split("__") if col not in queryable or op not in mapping: continue column = getattr(mapped_class, col) f = getattr(column, mapping[op])(request.params[k]) filters.append(f) return and_(*filters) if len(filters) > 0 else None
[ "def", "create_attr_filter", "(", "request", ",", "mapped_class", ")", ":", "mapping", "=", "{", "'eq'", ":", "'__eq__'", ",", "'ne'", ":", "'__ne__'", ",", "'lt'", ":", "'__lt__'", ",", "'lte'", ":", "'__le__'", ",", "'gt'", ":", "'__gt__'", ",", "'gte'...
Create an ``and_`` SQLAlchemy filter (a ClauseList object) based on the request params (``queryable``, ``eq``, ``ne``, ...). Arguments: request the request. mapped_class the SQLAlchemy mapped class.
[ "Create", "an", "and_", "SQLAlchemy", "filter", "(", "a", "ClauseList", "object", ")", "based", "on", "the", "request", "params", "(", "queryable", "eq", "ne", "...", ")", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L106-L141
elemoine/papyrus
papyrus/protocol.py
create_filter
def create_filter(request, mapped_class, geom_attr, **kwargs): """ Create MapFish default filter based on the request params. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. \\**kwargs additional arguments passed to ``create_geom_filter()``. """ attr_filter = create_attr_filter(request, mapped_class) geom_filter = create_geom_filter(request, mapped_class, geom_attr, **kwargs) if geom_filter is None and attr_filter is None: return None if geom_filter is None: return attr_filter if attr_filter is None: return geom_filter return and_(geom_filter, attr_filter)
python
def create_filter(request, mapped_class, geom_attr, **kwargs): """ Create MapFish default filter based on the request params. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. \\**kwargs additional arguments passed to ``create_geom_filter()``. """ attr_filter = create_attr_filter(request, mapped_class) geom_filter = create_geom_filter(request, mapped_class, geom_attr, **kwargs) if geom_filter is None and attr_filter is None: return None if geom_filter is None: return attr_filter if attr_filter is None: return geom_filter return and_(geom_filter, attr_filter)
[ "def", "create_filter", "(", "request", ",", "mapped_class", ",", "geom_attr", ",", "*", "*", "kwargs", ")", ":", "attr_filter", "=", "create_attr_filter", "(", "request", ",", "mapped_class", ")", "geom_filter", "=", "create_geom_filter", "(", "request", ",", ...
Create MapFish default filter based on the request params. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class. \\**kwargs additional arguments passed to ``create_geom_filter()``.
[ "Create", "MapFish", "default", "filter", "based", "on", "the", "request", "params", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L144-L172
elemoine/papyrus
papyrus/protocol.py
Protocol._filter_attrs
def _filter_attrs(self, feature, request): """ Remove some attributes from the feature and set the geometry to None in the feature based ``attrs`` and the ``no_geom`` parameters. """ if 'attrs' in request.params: attrs = request.params['attrs'].split(',') props = feature.properties new_props = {} for name in attrs: if name in props: new_props[name] = props[name] feature.properties = new_props if asbool(request.params.get('no_geom', False)): feature.geometry = None return feature
python
def _filter_attrs(self, feature, request): """ Remove some attributes from the feature and set the geometry to None in the feature based ``attrs`` and the ``no_geom`` parameters. """ if 'attrs' in request.params: attrs = request.params['attrs'].split(',') props = feature.properties new_props = {} for name in attrs: if name in props: new_props[name] = props[name] feature.properties = new_props if asbool(request.params.get('no_geom', False)): feature.geometry = None return feature
[ "def", "_filter_attrs", "(", "self", ",", "feature", ",", "request", ")", ":", "if", "'attrs'", "in", "request", ".", "params", ":", "attrs", "=", "request", ".", "params", "[", "'attrs'", "]", ".", "split", "(", "','", ")", "props", "=", "feature", ...
Remove some attributes from the feature and set the geometry to None in the feature based ``attrs`` and the ``no_geom`` parameters.
[ "Remove", "some", "attributes", "from", "the", "feature", "and", "set", "the", "geometry", "to", "None", "in", "the", "feature", "based", "attrs", "and", "the", "no_geom", "parameters", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L233-L246
elemoine/papyrus
papyrus/protocol.py
Protocol._get_order_by
def _get_order_by(self, request): """ Return an SA order_by """ attr = request.params.get('sort', request.params.get('order_by')) if attr is None or not hasattr(self.mapped_class, attr): return None if request.params.get('dir', '').upper() == 'DESC': return desc(getattr(self.mapped_class, attr)) else: return asc(getattr(self.mapped_class, attr))
python
def _get_order_by(self, request): """ Return an SA order_by """ attr = request.params.get('sort', request.params.get('order_by')) if attr is None or not hasattr(self.mapped_class, attr): return None if request.params.get('dir', '').upper() == 'DESC': return desc(getattr(self.mapped_class, attr)) else: return asc(getattr(self.mapped_class, attr))
[ "def", "_get_order_by", "(", "self", ",", "request", ")", ":", "attr", "=", "request", ".", "params", ".", "get", "(", "'sort'", ",", "request", ".", "params", ".", "get", "(", "'order_by'", ")", ")", "if", "attr", "is", "None", "or", "not", "hasattr...
Return an SA order_by
[ "Return", "an", "SA", "order_by" ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L248-L256
elemoine/papyrus
papyrus/protocol.py
Protocol._query
def _query(self, request, filter=None): """ Build a query based on the filter and the request params, and send the query to the database. """ limit = None offset = None if 'maxfeatures' in request.params: limit = int(request.params['maxfeatures']) if 'limit' in request.params: limit = int(request.params['limit']) if 'offset' in request.params: offset = int(request.params['offset']) if filter is None: filter = create_filter(request, self.mapped_class, self.geom_attr) query = self.Session().query(self.mapped_class) if filter is not None: query = query.filter(filter) order_by = self._get_order_by(request) if order_by is not None: query = query.order_by(order_by) query = query.limit(limit).offset(offset) return query.all()
python
def _query(self, request, filter=None): """ Build a query based on the filter and the request params, and send the query to the database. """ limit = None offset = None if 'maxfeatures' in request.params: limit = int(request.params['maxfeatures']) if 'limit' in request.params: limit = int(request.params['limit']) if 'offset' in request.params: offset = int(request.params['offset']) if filter is None: filter = create_filter(request, self.mapped_class, self.geom_attr) query = self.Session().query(self.mapped_class) if filter is not None: query = query.filter(filter) order_by = self._get_order_by(request) if order_by is not None: query = query.order_by(order_by) query = query.limit(limit).offset(offset) return query.all()
[ "def", "_query", "(", "self", ",", "request", ",", "filter", "=", "None", ")", ":", "limit", "=", "None", "offset", "=", "None", "if", "'maxfeatures'", "in", "request", ".", "params", ":", "limit", "=", "int", "(", "request", ".", "params", "[", "'ma...
Build a query based on the filter and the request params, and send the query to the database.
[ "Build", "a", "query", "based", "on", "the", "filter", "and", "the", "request", "params", "and", "send", "the", "query", "to", "the", "database", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L258-L278
elemoine/papyrus
papyrus/protocol.py
Protocol.count
def count(self, request, filter=None): """ Return the number of records matching the given filter. """ if filter is None: filter = create_filter(request, self.mapped_class, self.geom_attr) query = self.Session().query(self.mapped_class) if filter is not None: query = query.filter(filter) return query.count()
python
def count(self, request, filter=None): """ Return the number of records matching the given filter. """ if filter is None: filter = create_filter(request, self.mapped_class, self.geom_attr) query = self.Session().query(self.mapped_class) if filter is not None: query = query.filter(filter) return query.count()
[ "def", "count", "(", "self", ",", "request", ",", "filter", "=", "None", ")", ":", "if", "filter", "is", "None", ":", "filter", "=", "create_filter", "(", "request", ",", "self", ".", "mapped_class", ",", "self", ".", "geom_attr", ")", "query", "=", ...
Return the number of records matching the given filter.
[ "Return", "the", "number", "of", "records", "matching", "the", "given", "filter", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L280-L287
elemoine/papyrus
papyrus/protocol.py
Protocol.read
def read(self, request, filter=None, id=None): """ Build a query based on the filter or the idenfier, send the query to the database, and return a Feature or a FeatureCollection. """ ret = None if id is not None: o = self.Session().query(self.mapped_class).get(id) if o is None: return HTTPNotFound() # FIXME: we return a Feature here, not a mapped object, do # we really want that? ret = self._filter_attrs(o.__geo_interface__, request) else: objs = self._query(request, filter) ret = FeatureCollection( [self._filter_attrs(o.__geo_interface__, request) for o in objs if o is not None]) return ret
python
def read(self, request, filter=None, id=None): """ Build a query based on the filter or the idenfier, send the query to the database, and return a Feature or a FeatureCollection. """ ret = None if id is not None: o = self.Session().query(self.mapped_class).get(id) if o is None: return HTTPNotFound() # FIXME: we return a Feature here, not a mapped object, do # we really want that? ret = self._filter_attrs(o.__geo_interface__, request) else: objs = self._query(request, filter) ret = FeatureCollection( [self._filter_attrs(o.__geo_interface__, request) for o in objs if o is not None]) return ret
[ "def", "read", "(", "self", ",", "request", ",", "filter", "=", "None", ",", "id", "=", "None", ")", ":", "ret", "=", "None", "if", "id", "is", "not", "None", ":", "o", "=", "self", ".", "Session", "(", ")", ".", "query", "(", "self", ".", "m...
Build a query based on the filter or the idenfier, send the query to the database, and return a Feature or a FeatureCollection.
[ "Build", "a", "query", "based", "on", "the", "filter", "or", "the", "idenfier", "send", "the", "query", "to", "the", "database", "and", "return", "a", "Feature", "or", "a", "FeatureCollection", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L289-L305
elemoine/papyrus
papyrus/protocol.py
Protocol.create
def create(self, request): """ Read the GeoJSON feature collection from the request body and create new objects in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) collection = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(collection, FeatureCollection): return HTTPBadRequest() session = self.Session() objects = [] for feature in collection.features: create = False obj = None if hasattr(feature, 'id') and feature.id is not None: obj = session.query(self.mapped_class).get(feature.id) if self.before_create is not None: self.before_create(request, feature, obj) if obj is None: obj = self.mapped_class(feature) create = True else: obj.__update__(feature) if create: session.add(obj) objects.append(obj) session.flush() collection = FeatureCollection(objects) if len(objects) > 0 else None request.response.status_int = 201 return collection
python
def create(self, request): """ Read the GeoJSON feature collection from the request body and create new objects in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) collection = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(collection, FeatureCollection): return HTTPBadRequest() session = self.Session() objects = [] for feature in collection.features: create = False obj = None if hasattr(feature, 'id') and feature.id is not None: obj = session.query(self.mapped_class).get(feature.id) if self.before_create is not None: self.before_create(request, feature, obj) if obj is None: obj = self.mapped_class(feature) create = True else: obj.__update__(feature) if create: session.add(obj) objects.append(obj) session.flush() collection = FeatureCollection(objects) if len(objects) > 0 else None request.response.status_int = 201 return collection
[ "def", "create", "(", "self", ",", "request", ")", ":", "if", "self", ".", "readonly", ":", "return", "HTTPMethodNotAllowed", "(", "headers", "=", "{", "'Allow'", ":", "'GET, HEAD'", "}", ")", "collection", "=", "loads", "(", "request", ".", "body", ",",...
Read the GeoJSON feature collection from the request body and create new objects in the database.
[ "Read", "the", "GeoJSON", "feature", "collection", "from", "the", "request", "body", "and", "create", "new", "objects", "in", "the", "database", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L307-L335
elemoine/papyrus
papyrus/protocol.py
Protocol.update
def update(self, request, id): """ Read the GeoJSON feature from the request body and update the corresponding object in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) session = self.Session() obj = session.query(self.mapped_class).get(id) if obj is None: return HTTPNotFound() feature = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(feature, Feature): return HTTPBadRequest() if self.before_update is not None: self.before_update(request, feature, obj) obj.__update__(feature) session.flush() request.response.status_int = 200 return obj
python
def update(self, request, id): """ Read the GeoJSON feature from the request body and update the corresponding object in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) session = self.Session() obj = session.query(self.mapped_class).get(id) if obj is None: return HTTPNotFound() feature = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(feature, Feature): return HTTPBadRequest() if self.before_update is not None: self.before_update(request, feature, obj) obj.__update__(feature) session.flush() request.response.status_int = 200 return obj
[ "def", "update", "(", "self", ",", "request", ",", "id", ")", ":", "if", "self", ".", "readonly", ":", "return", "HTTPMethodNotAllowed", "(", "headers", "=", "{", "'Allow'", ":", "'GET, HEAD'", "}", ")", "session", "=", "self", ".", "Session", "(", ")"...
Read the GeoJSON feature from the request body and update the corresponding object in the database.
[ "Read", "the", "GeoJSON", "feature", "from", "the", "request", "body", "and", "update", "the", "corresponding", "object", "in", "the", "database", "." ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L337-L354
elemoine/papyrus
papyrus/protocol.py
Protocol.delete
def delete(self, request, id): """ Remove the targeted feature from the database """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) session = self.Session() obj = session.query(self.mapped_class).get(id) if obj is None: return HTTPNotFound() if self.before_delete is not None: self.before_delete(request, obj) session.delete(obj) return Response(status_int=204)
python
def delete(self, request, id): """ Remove the targeted feature from the database """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) session = self.Session() obj = session.query(self.mapped_class).get(id) if obj is None: return HTTPNotFound() if self.before_delete is not None: self.before_delete(request, obj) session.delete(obj) return Response(status_int=204)
[ "def", "delete", "(", "self", ",", "request", ",", "id", ")", ":", "if", "self", ".", "readonly", ":", "return", "HTTPMethodNotAllowed", "(", "headers", "=", "{", "'Allow'", ":", "'GET, HEAD'", "}", ")", "session", "=", "self", ".", "Session", "(", ")"...
Remove the targeted feature from the database
[ "Remove", "the", "targeted", "feature", "from", "the", "database" ]
train
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L356-L367
MakersF/LoLScraper
lol_scraper/persist.py
TierStore.store
def store(self, text, tier): """ Writes text to the underlying Store mapped at tier. If the store doesn't exists, yet, it creates it :param text: the text to write :param tier: the tier used to identify the store :return: """ store = self._stores.get(tier, None) if not store: store = AutoSplittingFile(self._dir, self._lines_per_store, self._file_name, tier) self._stores[tier] = store store.write(text)
python
def store(self, text, tier): """ Writes text to the underlying Store mapped at tier. If the store doesn't exists, yet, it creates it :param text: the text to write :param tier: the tier used to identify the store :return: """ store = self._stores.get(tier, None) if not store: store = AutoSplittingFile(self._dir, self._lines_per_store, self._file_name, tier) self._stores[tier] = store store.write(text)
[ "def", "store", "(", "self", ",", "text", ",", "tier", ")", ":", "store", "=", "self", ".", "_stores", ".", "get", "(", "tier", ",", "None", ")", "if", "not", "store", ":", "store", "=", "AutoSplittingFile", "(", "self", ".", "_dir", ",", "self", ...
Writes text to the underlying Store mapped at tier. If the store doesn't exists, yet, it creates it :param text: the text to write :param tier: the tier used to identify the store :return:
[ "Writes", "text", "to", "the", "underlying", "Store", "mapped", "at", "tier", ".", "If", "the", "store", "doesn", "t", "exists", "yet", "it", "creates", "it", ":", "param", "text", ":", "the", "text", "to", "write", ":", "param", "tier", ":", "the", ...
train
https://github.com/MakersF/LoLScraper/blob/71d9f2ef24159f2ba5d21467aac1ab785c2bb7e6/lol_scraper/persist.py#L86-L97
Locu/chronology
kronos/kronos/utils/uuid.py
uuid_from_kronos_time
def uuid_from_kronos_time(time, _type=UUIDType.RANDOM): """ Generate a UUID with the specified time. If `lowest` is true, return the lexicographically first UUID for the specified time. """ return timeuuid_from_time(int(time) + UUID_TIME_OFFSET, type=_type)
python
def uuid_from_kronos_time(time, _type=UUIDType.RANDOM): """ Generate a UUID with the specified time. If `lowest` is true, return the lexicographically first UUID for the specified time. """ return timeuuid_from_time(int(time) + UUID_TIME_OFFSET, type=_type)
[ "def", "uuid_from_kronos_time", "(", "time", ",", "_type", "=", "UUIDType", ".", "RANDOM", ")", ":", "return", "timeuuid_from_time", "(", "int", "(", "time", ")", "+", "UUID_TIME_OFFSET", ",", "type", "=", "_type", ")" ]
Generate a UUID with the specified time. If `lowest` is true, return the lexicographically first UUID for the specified time.
[ "Generate", "a", "UUID", "with", "the", "specified", "time", ".", "If", "lowest", "is", "true", "return", "the", "lexicographically", "first", "UUID", "for", "the", "specified", "time", "." ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/utils/uuid.py#L14-L20
PedalPi/Application
application/controller/plugins_controller.py
PluginsController.by
def by(self, technology): """ Get the plugins registered in PedalPi by technology :param PluginTechnology technology: PluginTechnology identifier """ if technology == PluginTechnology.LV2 \ or str(technology).upper() == PluginTechnology.LV2.value.upper(): return self.lv2_builder.all else: return []
python
def by(self, technology): """ Get the plugins registered in PedalPi by technology :param PluginTechnology technology: PluginTechnology identifier """ if technology == PluginTechnology.LV2 \ or str(technology).upper() == PluginTechnology.LV2.value.upper(): return self.lv2_builder.all else: return []
[ "def", "by", "(", "self", ",", "technology", ")", ":", "if", "technology", "==", "PluginTechnology", ".", "LV2", "or", "str", "(", "technology", ")", ".", "upper", "(", ")", "==", "PluginTechnology", ".", "LV2", ".", "value", ".", "upper", "(", ")", ...
Get the plugins registered in PedalPi by technology :param PluginTechnology technology: PluginTechnology identifier
[ "Get", "the", "plugins", "registered", "in", "PedalPi", "by", "technology" ]
train
https://github.com/PedalPi/Application/blob/3fdf6f97cfef97a7f1d90a5881dd04324c229f9d/application/controller/plugins_controller.py#L67-L77
PedalPi/Application
application/controller/plugins_controller.py
PluginsController.reload_lv2_plugins_data
def reload_lv2_plugins_data(self): """ Search for LV2 audio plugins in the system and extract the metadata needed by pluginsmanager to generate audio plugins. """ plugins_data = self.lv2_builder.lv2_plugins_data() self._dao.save(plugins_data)
python
def reload_lv2_plugins_data(self): """ Search for LV2 audio plugins in the system and extract the metadata needed by pluginsmanager to generate audio plugins. """ plugins_data = self.lv2_builder.lv2_plugins_data() self._dao.save(plugins_data)
[ "def", "reload_lv2_plugins_data", "(", "self", ")", ":", "plugins_data", "=", "self", ".", "lv2_builder", ".", "lv2_plugins_data", "(", ")", "self", ".", "_dao", ".", "save", "(", "plugins_data", ")" ]
Search for LV2 audio plugins in the system and extract the metadata needed by pluginsmanager to generate audio plugins.
[ "Search", "for", "LV2", "audio", "plugins", "in", "the", "system", "and", "extract", "the", "metadata", "needed", "by", "pluginsmanager", "to", "generate", "audio", "plugins", "." ]
train
https://github.com/PedalPi/Application/blob/3fdf6f97cfef97a7f1d90a5881dd04324c229f9d/application/controller/plugins_controller.py#L112-L118
timdiels/pytil
pytil/multi_dict.py
MultiDict.invert
def invert(self): ''' Invert by swapping each value with its key. Returns ------- MultiDict Inverted multi-dict. Examples -------- >>> MultiDict({1: {1}, 2: {1,2,3}}, 4: {}).invert() MultiDict({1: {1,2}, 2: {2}, 3: {2}}) ''' result = defaultdict(set) for k, val in self.items(): result[val].add(k) return MultiDict(dict(result))
python
def invert(self): ''' Invert by swapping each value with its key. Returns ------- MultiDict Inverted multi-dict. Examples -------- >>> MultiDict({1: {1}, 2: {1,2,3}}, 4: {}).invert() MultiDict({1: {1,2}, 2: {2}, 3: {2}}) ''' result = defaultdict(set) for k, val in self.items(): result[val].add(k) return MultiDict(dict(result))
[ "def", "invert", "(", "self", ")", ":", "result", "=", "defaultdict", "(", "set", ")", "for", "k", ",", "val", "in", "self", ".", "items", "(", ")", ":", "result", "[", "val", "]", ".", "add", "(", "k", ")", "return", "MultiDict", "(", "dict", ...
Invert by swapping each value with its key. Returns ------- MultiDict Inverted multi-dict. Examples -------- >>> MultiDict({1: {1}, 2: {1,2,3}}, 4: {}).invert() MultiDict({1: {1,2}, 2: {2}, 3: {2}})
[ "Invert", "by", "swapping", "each", "value", "with", "its", "key", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/multi_dict.py#L74-L91
Locu/chronology
jia/scheduler/client.py
_send_with_auth
def _send_with_auth(values, secret_key, url): """Send dictionary of JSON serializable `values` as a POST body to `url` along with `auth_token` that's generated from `secret_key` and `values` scheduler.auth.create_token expects a JSON serializable payload, so we send a dictionary. On the receiving end of the POST request, the Flask view will have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest and most surefire way to ensure that the payload sent to create_token will be consistent on both ends is to generate an ImmutableMultiDict using the werkzeug.Request. """ data = urllib.urlencode(values) # Simulate a Flask request because that is what will be unpacked when the # request is received on the other side request = Request.from_values( content_length=len(data), input_stream=StringIO(data), content_type='application/x-www-form-urlencoded', method='POST') # Add the auth_token, re-encode, and send values['auth_token'] = create_token(secret_key, dict(request.form)) data = urllib.urlencode(values) req = urllib2.Request(url, data) response = urllib2.urlopen(req) return json.loads(response.read())
python
def _send_with_auth(values, secret_key, url): """Send dictionary of JSON serializable `values` as a POST body to `url` along with `auth_token` that's generated from `secret_key` and `values` scheduler.auth.create_token expects a JSON serializable payload, so we send a dictionary. On the receiving end of the POST request, the Flask view will have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest and most surefire way to ensure that the payload sent to create_token will be consistent on both ends is to generate an ImmutableMultiDict using the werkzeug.Request. """ data = urllib.urlencode(values) # Simulate a Flask request because that is what will be unpacked when the # request is received on the other side request = Request.from_values( content_length=len(data), input_stream=StringIO(data), content_type='application/x-www-form-urlencoded', method='POST') # Add the auth_token, re-encode, and send values['auth_token'] = create_token(secret_key, dict(request.form)) data = urllib.urlencode(values) req = urllib2.Request(url, data) response = urllib2.urlopen(req) return json.loads(response.read())
[ "def", "_send_with_auth", "(", "values", ",", "secret_key", ",", "url", ")", ":", "data", "=", "urllib", ".", "urlencode", "(", "values", ")", "# Simulate a Flask request because that is what will be unpacked when the", "# request is received on the other side", "request", ...
Send dictionary of JSON serializable `values` as a POST body to `url` along with `auth_token` that's generated from `secret_key` and `values` scheduler.auth.create_token expects a JSON serializable payload, so we send a dictionary. On the receiving end of the POST request, the Flask view will have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest and most surefire way to ensure that the payload sent to create_token will be consistent on both ends is to generate an ImmutableMultiDict using the werkzeug.Request.
[ "Send", "dictionary", "of", "JSON", "serializable", "values", "as", "a", "POST", "body", "to", "url", "along", "with", "auth_token", "that", "s", "generated", "from", "secret_key", "and", "values" ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/scheduler/client.py#L21-L48
Locu/chronology
jia/scheduler/client.py
schedule
def schedule(code, interval, secret_key=None, url=None): """Schedule a string of `code` to be executed every `interval` Specificying an `interval` of 0 indicates the event should only be run one time and will not be rescheduled. """ if not secret_key: secret_key = default_key() if not url: url = default_url() url = '%s/schedule' % url values = { 'interval': interval, 'code': code, } return _send_with_auth(values, secret_key, url)
python
def schedule(code, interval, secret_key=None, url=None): """Schedule a string of `code` to be executed every `interval` Specificying an `interval` of 0 indicates the event should only be run one time and will not be rescheduled. """ if not secret_key: secret_key = default_key() if not url: url = default_url() url = '%s/schedule' % url values = { 'interval': interval, 'code': code, } return _send_with_auth(values, secret_key, url)
[ "def", "schedule", "(", "code", ",", "interval", ",", "secret_key", "=", "None", ",", "url", "=", "None", ")", ":", "if", "not", "secret_key", ":", "secret_key", "=", "default_key", "(", ")", "if", "not", "url", ":", "url", "=", "default_url", "(", "...
Schedule a string of `code` to be executed every `interval` Specificying an `interval` of 0 indicates the event should only be run one time and will not be rescheduled.
[ "Schedule", "a", "string", "of", "code", "to", "be", "executed", "every", "interval" ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/scheduler/client.py#L51-L67
Locu/chronology
jia/scheduler/client.py
cancel
def cancel(task_id, secret_key=None, url=None): """Cancel scheduled task with `task_id`""" if not secret_key: secret_key = default_key() if not url: url = default_url() url = '%s/cancel' % url values = { 'id': task_id, } return _send_with_auth(values, secret_key, url)
python
def cancel(task_id, secret_key=None, url=None): """Cancel scheduled task with `task_id`""" if not secret_key: secret_key = default_key() if not url: url = default_url() url = '%s/cancel' % url values = { 'id': task_id, } return _send_with_auth(values, secret_key, url)
[ "def", "cancel", "(", "task_id", ",", "secret_key", "=", "None", ",", "url", "=", "None", ")", ":", "if", "not", "secret_key", ":", "secret_key", "=", "default_key", "(", ")", "if", "not", "url", ":", "url", "=", "default_url", "(", ")", "url", "=", ...
Cancel scheduled task with `task_id`
[ "Cancel", "scheduled", "task", "with", "task_id" ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/scheduler/client.py#L70-L81
bovee/Aston
aston/tracefile/agilent_extra_cs.py
read_multireg_file
def read_multireg_file(f, title=None): """ Some REG files have multiple "sections" with different data. This parses each chunk out of such a file (e.g. LCDIAG.REG) """ f.seek(0x26) nparts = struct.unpack('<H', f.read(2))[0] foff = 0x2D if title is None: data = [] for _ in range(nparts): d = read_reg_file(f, foff) data.append(d) foff = f.tell() + 1 else: for _ in range(nparts): d = read_reg_file(f, foff) if d.get('Title') == title: data = d break foff = f.tell() + 1 else: data = {} return data
python
def read_multireg_file(f, title=None): """ Some REG files have multiple "sections" with different data. This parses each chunk out of such a file (e.g. LCDIAG.REG) """ f.seek(0x26) nparts = struct.unpack('<H', f.read(2))[0] foff = 0x2D if title is None: data = [] for _ in range(nparts): d = read_reg_file(f, foff) data.append(d) foff = f.tell() + 1 else: for _ in range(nparts): d = read_reg_file(f, foff) if d.get('Title') == title: data = d break foff = f.tell() + 1 else: data = {} return data
[ "def", "read_multireg_file", "(", "f", ",", "title", "=", "None", ")", ":", "f", ".", "seek", "(", "0x26", ")", "nparts", "=", "struct", ".", "unpack", "(", "'<H'", ",", "f", ".", "read", "(", "2", ")", ")", "[", "0", "]", "foff", "=", "0x2D", ...
Some REG files have multiple "sections" with different data. This parses each chunk out of such a file (e.g. LCDIAG.REG)
[ "Some", "REG", "files", "have", "multiple", "sections", "with", "different", "data", ".", "This", "parses", "each", "chunk", "out", "of", "such", "a", "file", "(", "e", ".", "g", ".", "LCDIAG", ".", "REG", ")" ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/agilent_extra_cs.py#L133-L156
bovee/Aston
aston/tracefile/agilent_extra_cs.py
read_reg_file
def read_reg_file(f, foff=0x2D): """ Given a file handle for an old-style Agilent *.REG file, this will parse that file into a dictonary of key/value pairs (including any tables that are in the *.REG file, which will be parsed into lists of lists). """ # convenience function for reading in data def rd(st): return struct.unpack(st, f.read(struct.calcsize(st))) f.seek(0x19) if f.read(1) != b'A': # raise TypeError("Version of REG file is too new.") return {} f.seek(foff) nrecs = rd('<I')[0] # TODO: should be '<H' rec_tab = [rd('<HHIII') for n in range(nrecs)] names = {} f.seek(foff + 20 * nrecs + 4) for r in rec_tab: d = f.read(r[2]) if r[1] == 1539: # '0306' # this is part of the linked list too, but contains a # reference to a table cd = struct.unpack('<HIII21sI', d) names[cd[5]] = cd[4].decode('iso8859').strip('\x00') # except: # pass elif r[1] == 32769 or r[1] == 32771: # b'0180' or b'0380' names[r[4]] = d[:-1].decode('iso8859') elif r[1] == 32774: # b'0680' # this is a string that is referenced elsewhere (in a table) names[r[4]] = d[2:-1].decode('iso8859') elif r[1] == 32770: # b'0280' # this is just a flattened numeric array names[r[4]] = np.frombuffer(d, dtype=np.uint32, offset=4) data = {} f.seek(foff + 20 * nrecs + 4) for r in rec_tab: d = f.read(r[2]) if r[1] == 1538: # '0206' # this is part of a linked list if len(d) == 43: cd = struct.unpack('<HIII21sd', d) data[cd[4].decode('iso8859').strip('\x00')] = cd[5] else: pass elif r[1] == 1537: # b'0106' # name of property n = d[14:30].split(b'\x00')[0].decode('iso8859') # with value from names data[n] = names.get(struct.unpack('<I', d[35:39])[0], '') elif r[1] == 1793: # b'0107' # this is a table of values nrow = struct.unpack('<H', d[4:6])[0] ncol = struct.unpack('<H', d[16:18])[0] if ncol != 0: cols = [struct.unpack('<16sHHHHHI', d[20 + 30 * i:50 + 30 * i]) for i in range(ncol)] colnames = [c[0].split(b'\x00')[0].decode('iso8859') for c in cols] # TODO: type 2 is not a constant size? 31, 17 rty2sty = {1: 'H', 3: 'I', 4: 'f', 5: 'H', 7: 'H', 8: 'd', 11: 'H', 12: 'H', 13: 'I', 14: 'I', 16: 'H'} coltype = '<' + ''.join([rty2sty.get(c[3], str(c[2]) + 's') for c in cols]) lencol = struct.calcsize(coltype) tab = [] for i in reversed(range(2, nrow + 2)): rawrow = struct.unpack(coltype, d[-i * lencol: (1 - i) * lencol]) row = [] for j, p in enumerate(rawrow): if cols[j][3] == 3: row.append(names.get(p, str(p))) else: row.append(p) tab.append(row) data[names[r[4]]] = [colnames, tab] elif r[1] == 1281 or r[1] == 1283: # b'0105' or b'0305' fm = '<HHBIIhIdII12shIddQQB8sII12shIddQQB8s' m = struct.unpack(fm, d) nrecs = m[4] # number of points in table # x_units = names.get(m[8], '') x_arr = m[14] * names.get(m[9], np.arange(nrecs - 1)) y_arr = m[25] * names.get(m[20]) y_units = names.get(m[19], '') if y_units == 'bar': y_arr *= 0.1 # convert to MPa # TODO: what to call this? data['Trace'] = Trace(y_arr, x_arr, name='') # elif r[1] == 1025: # b'0104' # # lots of zeros? maybe one or two numbers? # # only found in REG entries that have long 0280 records # fm = '<HQQQIHHHHIIHB' # m = struct.unpack(fm, d) # print(m) # #print(r[1], len(d), binascii.hexlify(d)) # pass # elif r[1] == 512: # b'0002' # # either points to two null pointers or two other pointers # # (indicates start of linked list?) # print(r[1], len(d), binascii.hexlify(d)) # elif r[1] == 769 or r[1] == 772: # b'0103' or b'0403' # # points to 2nd, 3rd & 4th records (two 0002 records and a 0180) # b = binascii.hexlify # print(b(d[10:14]), b(d[14:18]), b(d[18:22])) return data
python
def read_reg_file(f, foff=0x2D): """ Given a file handle for an old-style Agilent *.REG file, this will parse that file into a dictonary of key/value pairs (including any tables that are in the *.REG file, which will be parsed into lists of lists). """ # convenience function for reading in data def rd(st): return struct.unpack(st, f.read(struct.calcsize(st))) f.seek(0x19) if f.read(1) != b'A': # raise TypeError("Version of REG file is too new.") return {} f.seek(foff) nrecs = rd('<I')[0] # TODO: should be '<H' rec_tab = [rd('<HHIII') for n in range(nrecs)] names = {} f.seek(foff + 20 * nrecs + 4) for r in rec_tab: d = f.read(r[2]) if r[1] == 1539: # '0306' # this is part of the linked list too, but contains a # reference to a table cd = struct.unpack('<HIII21sI', d) names[cd[5]] = cd[4].decode('iso8859').strip('\x00') # except: # pass elif r[1] == 32769 or r[1] == 32771: # b'0180' or b'0380' names[r[4]] = d[:-1].decode('iso8859') elif r[1] == 32774: # b'0680' # this is a string that is referenced elsewhere (in a table) names[r[4]] = d[2:-1].decode('iso8859') elif r[1] == 32770: # b'0280' # this is just a flattened numeric array names[r[4]] = np.frombuffer(d, dtype=np.uint32, offset=4) data = {} f.seek(foff + 20 * nrecs + 4) for r in rec_tab: d = f.read(r[2]) if r[1] == 1538: # '0206' # this is part of a linked list if len(d) == 43: cd = struct.unpack('<HIII21sd', d) data[cd[4].decode('iso8859').strip('\x00')] = cd[5] else: pass elif r[1] == 1537: # b'0106' # name of property n = d[14:30].split(b'\x00')[0].decode('iso8859') # with value from names data[n] = names.get(struct.unpack('<I', d[35:39])[0], '') elif r[1] == 1793: # b'0107' # this is a table of values nrow = struct.unpack('<H', d[4:6])[0] ncol = struct.unpack('<H', d[16:18])[0] if ncol != 0: cols = [struct.unpack('<16sHHHHHI', d[20 + 30 * i:50 + 30 * i]) for i in range(ncol)] colnames = [c[0].split(b'\x00')[0].decode('iso8859') for c in cols] # TODO: type 2 is not a constant size? 31, 17 rty2sty = {1: 'H', 3: 'I', 4: 'f', 5: 'H', 7: 'H', 8: 'd', 11: 'H', 12: 'H', 13: 'I', 14: 'I', 16: 'H'} coltype = '<' + ''.join([rty2sty.get(c[3], str(c[2]) + 's') for c in cols]) lencol = struct.calcsize(coltype) tab = [] for i in reversed(range(2, nrow + 2)): rawrow = struct.unpack(coltype, d[-i * lencol: (1 - i) * lencol]) row = [] for j, p in enumerate(rawrow): if cols[j][3] == 3: row.append(names.get(p, str(p))) else: row.append(p) tab.append(row) data[names[r[4]]] = [colnames, tab] elif r[1] == 1281 or r[1] == 1283: # b'0105' or b'0305' fm = '<HHBIIhIdII12shIddQQB8sII12shIddQQB8s' m = struct.unpack(fm, d) nrecs = m[4] # number of points in table # x_units = names.get(m[8], '') x_arr = m[14] * names.get(m[9], np.arange(nrecs - 1)) y_arr = m[25] * names.get(m[20]) y_units = names.get(m[19], '') if y_units == 'bar': y_arr *= 0.1 # convert to MPa # TODO: what to call this? data['Trace'] = Trace(y_arr, x_arr, name='') # elif r[1] == 1025: # b'0104' # # lots of zeros? maybe one or two numbers? # # only found in REG entries that have long 0280 records # fm = '<HQQQIHHHHIIHB' # m = struct.unpack(fm, d) # print(m) # #print(r[1], len(d), binascii.hexlify(d)) # pass # elif r[1] == 512: # b'0002' # # either points to two null pointers or two other pointers # # (indicates start of linked list?) # print(r[1], len(d), binascii.hexlify(d)) # elif r[1] == 769 or r[1] == 772: # b'0103' or b'0403' # # points to 2nd, 3rd & 4th records (two 0002 records and a 0180) # b = binascii.hexlify # print(b(d[10:14]), b(d[14:18]), b(d[18:22])) return data
[ "def", "read_reg_file", "(", "f", ",", "foff", "=", "0x2D", ")", ":", "# convenience function for reading in data", "def", "rd", "(", "st", ")", ":", "return", "struct", ".", "unpack", "(", "st", ",", "f", ".", "read", "(", "struct", ".", "calcsize", "("...
Given a file handle for an old-style Agilent *.REG file, this will parse that file into a dictonary of key/value pairs (including any tables that are in the *.REG file, which will be parsed into lists of lists).
[ "Given", "a", "file", "handle", "for", "an", "old", "-", "style", "Agilent", "*", ".", "REG", "file", "this", "will", "parse", "that", "file", "into", "a", "dictonary", "of", "key", "/", "value", "pairs", "(", "including", "any", "tables", "that", "are...
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/agilent_extra_cs.py#L159-L273
LasLabs/python-helpscout
helpscout/apis/teams.py
Teams.get
def get(cls, session, team_id): """Return a specific team. Args: session (requests.sessions.Session): Authenticated session. team_id (int): The ID of the team to get. Returns: helpscout.models.Person: A person singleton representing the team, if existing. Otherwise ``None``. """ return cls( '/teams/%d.json' % team_id, singleton=True, session=session, )
python
def get(cls, session, team_id): """Return a specific team. Args: session (requests.sessions.Session): Authenticated session. team_id (int): The ID of the team to get. Returns: helpscout.models.Person: A person singleton representing the team, if existing. Otherwise ``None``. """ return cls( '/teams/%d.json' % team_id, singleton=True, session=session, )
[ "def", "get", "(", "cls", ",", "session", ",", "team_id", ")", ":", "return", "cls", "(", "'/teams/%d.json'", "%", "team_id", ",", "singleton", "=", "True", ",", "session", "=", "session", ",", ")" ]
Return a specific team. Args: session (requests.sessions.Session): Authenticated session. team_id (int): The ID of the team to get. Returns: helpscout.models.Person: A person singleton representing the team, if existing. Otherwise ``None``.
[ "Return", "a", "specific", "team", "." ]
train
https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/teams.py#L31-L46
LasLabs/python-helpscout
helpscout/apis/teams.py
Teams.get_members
def get_members(cls, session, team_or_id): """List the members for the team. Args: team_or_id (helpscout.models.Person or int): Team or the ID of the team to get the folders for. Returns: RequestPaginator(output_type=helpscout.models.Users): Users iterator. """ if isinstance(team_or_id, Person): team_or_id = team_or_id.id return cls( '/teams/%d/members.json' % team_or_id, session=session, out_type=User, )
python
def get_members(cls, session, team_or_id): """List the members for the team. Args: team_or_id (helpscout.models.Person or int): Team or the ID of the team to get the folders for. Returns: RequestPaginator(output_type=helpscout.models.Users): Users iterator. """ if isinstance(team_or_id, Person): team_or_id = team_or_id.id return cls( '/teams/%d/members.json' % team_or_id, session=session, out_type=User, )
[ "def", "get_members", "(", "cls", ",", "session", ",", "team_or_id", ")", ":", "if", "isinstance", "(", "team_or_id", ",", "Person", ")", ":", "team_or_id", "=", "team_or_id", ".", "id", "return", "cls", "(", "'/teams/%d/members.json'", "%", "team_or_id", ",...
List the members for the team. Args: team_or_id (helpscout.models.Person or int): Team or the ID of the team to get the folders for. Returns: RequestPaginator(output_type=helpscout.models.Users): Users iterator.
[ "List", "the", "members", "for", "the", "team", "." ]
train
https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/teams.py#L62-L79
bovee/Aston
aston/tracefile/mzml.py
t_to_min
def t_to_min(x): """ Convert XML 'xs: duration type' to decimal minutes, e.g.: t_to_min('PT1H2M30S') == 62.5 """ g = re.match('PT(?:(.*)H)?(?:(.*)M)?(?:(.*)S)?', x).groups() return sum(0 if g[i] is None else float(g[i]) * 60. ** (1 - i) for i in range(3))
python
def t_to_min(x): """ Convert XML 'xs: duration type' to decimal minutes, e.g.: t_to_min('PT1H2M30S') == 62.5 """ g = re.match('PT(?:(.*)H)?(?:(.*)M)?(?:(.*)S)?', x).groups() return sum(0 if g[i] is None else float(g[i]) * 60. ** (1 - i) for i in range(3))
[ "def", "t_to_min", "(", "x", ")", ":", "g", "=", "re", ".", "match", "(", "'PT(?:(.*)H)?(?:(.*)M)?(?:(.*)S)?'", ",", "x", ")", ".", "groups", "(", ")", "return", "sum", "(", "0", "if", "g", "[", "i", "]", "is", "None", "else", "float", "(", "g", ...
Convert XML 'xs: duration type' to decimal minutes, e.g.: t_to_min('PT1H2M30S') == 62.5
[ "Convert", "XML", "xs", ":", "duration", "type", "to", "decimal", "minutes", "e", ".", "g", ".", ":", "t_to_min", "(", "PT1H2M30S", ")", "==", "62", ".", "5" ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/mzml.py#L12-L19
bovee/Aston
aston/tracefile/mzml.py
write_mzxml
def write_mzxml(filename, df, info=None, precision='f'): """ Precision is either f or d. """ for r in df.values: df.columns pass
python
def write_mzxml(filename, df, info=None, precision='f'): """ Precision is either f or d. """ for r in df.values: df.columns pass
[ "def", "write_mzxml", "(", "filename", ",", "df", ",", "info", "=", "None", ",", "precision", "=", "'f'", ")", ":", "for", "r", "in", "df", ".", "values", ":", "df", ".", "columns", "pass" ]
Precision is either f or d.
[ "Precision", "is", "either", "f", "or", "d", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/mzml.py#L194-L200
bovee/Aston
aston/tracefile/mzml.py
MzML.read_binary
def read_binary(self, ba, param_groups=None): """ ba - binaryDataArray XML node """ if ba is None: return [] pgr = ba.find('m:referenceableParamGroupRef', namespaces=self.ns) if pgr is not None and param_groups is not None: q = 'm:referenceableParamGroup[@id="' + pgr.get('ref') + '"]' pg = param_groups.find(q, namespaces=self.ns) else: pg = ba if pg.find('m:cvParam[@accession="MS:1000574"]', namespaces=self.ns) is not None: compress = True elif pg.find('m:cvParam[@accession="MS:1000576"]', namespaces=self.ns) is not None: compress = False else: # TODO: no info? should check the other record? pass if pg.find('m:cvParam[@accession="MS:1000521"]', namespaces=self.ns) is not None: dtype = 'f' elif pg.find('m:cvParam[@accession="MS:1000523"]', namespaces=self.ns) is not None: dtype = 'd' else: # TODO: no info? should check the other record? pass datatext = ba.find('m:binary', namespaces=self.ns).text if compress: rawdata = zlib.decompress(base64.b64decode(datatext)) else: rawdata = base64.b64decode(datatext) return np.fromstring(rawdata, dtype=dtype)
python
def read_binary(self, ba, param_groups=None): """ ba - binaryDataArray XML node """ if ba is None: return [] pgr = ba.find('m:referenceableParamGroupRef', namespaces=self.ns) if pgr is not None and param_groups is not None: q = 'm:referenceableParamGroup[@id="' + pgr.get('ref') + '"]' pg = param_groups.find(q, namespaces=self.ns) else: pg = ba if pg.find('m:cvParam[@accession="MS:1000574"]', namespaces=self.ns) is not None: compress = True elif pg.find('m:cvParam[@accession="MS:1000576"]', namespaces=self.ns) is not None: compress = False else: # TODO: no info? should check the other record? pass if pg.find('m:cvParam[@accession="MS:1000521"]', namespaces=self.ns) is not None: dtype = 'f' elif pg.find('m:cvParam[@accession="MS:1000523"]', namespaces=self.ns) is not None: dtype = 'd' else: # TODO: no info? should check the other record? pass datatext = ba.find('m:binary', namespaces=self.ns).text if compress: rawdata = zlib.decompress(base64.b64decode(datatext)) else: rawdata = base64.b64decode(datatext) return np.fromstring(rawdata, dtype=dtype)
[ "def", "read_binary", "(", "self", ",", "ba", ",", "param_groups", "=", "None", ")", ":", "if", "ba", "is", "None", ":", "return", "[", "]", "pgr", "=", "ba", ".", "find", "(", "'m:referenceableParamGroupRef'", ",", "namespaces", "=", "self", ".", "ns"...
ba - binaryDataArray XML node
[ "ba", "-", "binaryDataArray", "XML", "node" ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/mzml.py#L128-L167
timdiels/pytil
pytil/debug.py
pretty_memory_info
def pretty_memory_info(): ''' Pretty format memory info. Returns ------- str Memory info. Examples -------- >>> pretty_memory_info() '5MB memory usage' ''' process = psutil.Process(os.getpid()) return '{}MB memory usage'.format(int(process.memory_info().rss / 2**20))
python
def pretty_memory_info(): ''' Pretty format memory info. Returns ------- str Memory info. Examples -------- >>> pretty_memory_info() '5MB memory usage' ''' process = psutil.Process(os.getpid()) return '{}MB memory usage'.format(int(process.memory_info().rss / 2**20))
[ "def", "pretty_memory_info", "(", ")", ":", "process", "=", "psutil", ".", "Process", "(", "os", ".", "getpid", "(", ")", ")", "return", "'{}MB memory usage'", ".", "format", "(", "int", "(", "process", ".", "memory_info", "(", ")", ".", "rss", "/", "2...
Pretty format memory info. Returns ------- str Memory info. Examples -------- >>> pretty_memory_info() '5MB memory usage'
[ "Pretty", "format", "memory", "info", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/debug.py#L25-L40
biosustain/venom
venom/rpc/inspection.py
dynamic
def dynamic(name: str, expression: Union[type, Callable[[Type[Any]], type]]) \ -> Callable[[Callable[..., Any]], Callable[..., Any]]: # TODO type annotations for pass-through decorator """ :param name: :param expression: a subclass of ``type`` or a callable in the format ``(owner: Type[Any]) -> type``. :return: """ def decorator(func): if not hasattr(func, '__dynamic__'): func.__dynamic__ = {name: expression} else: func.__dynamic__[name] = expression return func return decorator
python
def dynamic(name: str, expression: Union[type, Callable[[Type[Any]], type]]) \ -> Callable[[Callable[..., Any]], Callable[..., Any]]: # TODO type annotations for pass-through decorator """ :param name: :param expression: a subclass of ``type`` or a callable in the format ``(owner: Type[Any]) -> type``. :return: """ def decorator(func): if not hasattr(func, '__dynamic__'): func.__dynamic__ = {name: expression} else: func.__dynamic__[name] = expression return func return decorator
[ "def", "dynamic", "(", "name", ":", "str", ",", "expression", ":", "Union", "[", "type", ",", "Callable", "[", "[", "Type", "[", "Any", "]", "]", ",", "type", "]", "]", ")", "->", "Callable", "[", "[", "Callable", "[", "...", ",", "Any", "]", "...
:param name: :param expression: a subclass of ``type`` or a callable in the format ``(owner: Type[Any]) -> type``. :return:
[ ":", "param", "name", ":", ":", "param", "expression", ":", "a", "subclass", "of", "type", "or", "a", "callable", "in", "the", "format", "(", "owner", ":", "Type", "[", "Any", "]", ")", "-", ">", "type", ".", ":", "return", ":" ]
train
https://github.com/biosustain/venom/blob/167967e7a8078a7227dc50dbc40df38e5dd6f520/venom/rpc/inspection.py#L24-L40
timdiels/pytil
pytil/series.py
invert
def invert(series): ''' Swap index with values of series. Parameters ---------- series : ~pandas.Series Series to swap on, must have a name. Returns ------- ~pandas.Series Series after swap. See also -------- pandas.Series.map Joins series ``a -> b`` and ``b -> c`` into ``a -> c``. ''' df = series.reset_index() #TODO alt is to to_frame and then use som dataframe methods df.set_index(series.name, inplace=True) return df[df.columns[0]]
python
def invert(series): ''' Swap index with values of series. Parameters ---------- series : ~pandas.Series Series to swap on, must have a name. Returns ------- ~pandas.Series Series after swap. See also -------- pandas.Series.map Joins series ``a -> b`` and ``b -> c`` into ``a -> c``. ''' df = series.reset_index() #TODO alt is to to_frame and then use som dataframe methods df.set_index(series.name, inplace=True) return df[df.columns[0]]
[ "def", "invert", "(", "series", ")", ":", "df", "=", "series", ".", "reset_index", "(", ")", "#TODO alt is to to_frame and then use som dataframe methods", "df", ".", "set_index", "(", "series", ".", "name", ",", "inplace", "=", "True", ")", "return", "df", "[...
Swap index with values of series. Parameters ---------- series : ~pandas.Series Series to swap on, must have a name. Returns ------- ~pandas.Series Series after swap. See also -------- pandas.Series.map Joins series ``a -> b`` and ``b -> c`` into ``a -> c``.
[ "Swap", "index", "with", "values", "of", "series", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/series.py#L24-L45
timdiels/pytil
pytil/series.py
split
def split(series): ''' Split values. The index is dropped, but this may change in the future. Parameters ---------- series : ~pandas.Series[~pytil.numpy.ArrayLike] Series with array-like values. Returns ------- ~pandas.Series Series with values split across rows. Examples -------- >>> series = pd.Series([[1,2],[1,2],[3,4,5]]) >>> series 0 [1, 2] 1 [1, 2] 2 [3, 4, 5] dtype: object >>> split(series) 0 1 1 2 2 1 3 2 4 3 5 4 6 5 dtype: object ''' s = df_.split_array_like(series.apply(list).to_frame('column'), 'column')['column'] s.name = series.name return s
python
def split(series): ''' Split values. The index is dropped, but this may change in the future. Parameters ---------- series : ~pandas.Series[~pytil.numpy.ArrayLike] Series with array-like values. Returns ------- ~pandas.Series Series with values split across rows. Examples -------- >>> series = pd.Series([[1,2],[1,2],[3,4,5]]) >>> series 0 [1, 2] 1 [1, 2] 2 [3, 4, 5] dtype: object >>> split(series) 0 1 1 2 2 1 3 2 4 3 5 4 6 5 dtype: object ''' s = df_.split_array_like(series.apply(list).to_frame('column'), 'column')['column'] s.name = series.name return s
[ "def", "split", "(", "series", ")", ":", "s", "=", "df_", ".", "split_array_like", "(", "series", ".", "apply", "(", "list", ")", ".", "to_frame", "(", "'column'", ")", ",", "'column'", ")", "[", "'column'", "]", "s", ".", "name", "=", "series", "....
Split values. The index is dropped, but this may change in the future. Parameters ---------- series : ~pandas.Series[~pytil.numpy.ArrayLike] Series with array-like values. Returns ------- ~pandas.Series Series with values split across rows. Examples -------- >>> series = pd.Series([[1,2],[1,2],[3,4,5]]) >>> series 0 [1, 2] 1 [1, 2] 2 [3, 4, 5] dtype: object >>> split(series) 0 1 1 2 2 1 3 2 4 3 5 4 6 5 dtype: object
[ "Split", "values", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/series.py#L47-L83
timdiels/pytil
pytil/series.py
equals
def equals(series1, series2, ignore_order=False, ignore_index=False, all_close=False, _return_reason=False): ''' Get whether 2 series are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- series1 : pandas.Series Series to compare. series2 : pandas.Series Series to compare. ignore_order : bool Ignore order of values (and index). ignore_index : bool Ignore index values and name. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the series aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible. ''' result = _equals(series1, series2, ignore_order, ignore_index, all_close) if _return_reason: return result else: return result[0]
python
def equals(series1, series2, ignore_order=False, ignore_index=False, all_close=False, _return_reason=False): ''' Get whether 2 series are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- series1 : pandas.Series Series to compare. series2 : pandas.Series Series to compare. ignore_order : bool Ignore order of values (and index). ignore_index : bool Ignore index values and name. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the series aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible. ''' result = _equals(series1, series2, ignore_order, ignore_index, all_close) if _return_reason: return result else: return result[0]
[ "def", "equals", "(", "series1", ",", "series2", ",", "ignore_order", "=", "False", ",", "ignore_index", "=", "False", ",", "all_close", "=", "False", ",", "_return_reason", "=", "False", ")", ":", "result", "=", "_equals", "(", "series1", ",", "series2", ...
Get whether 2 series are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- series1 : pandas.Series Series to compare. series2 : pandas.Series Series to compare. ignore_order : bool Ignore order of values (and index). ignore_index : bool Ignore index values and name. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the series aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible.
[ "Get", "whether", "2", "series", "are", "equal", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/series.py#L85-L130
timdiels/pytil
pytil/series.py
assert_equals
def assert_equals(actual, expected, ignore_order=False, ignore_index=False, all_close=False): ''' Assert 2 series are equal. Like ``assert equals(series1, series2, ...)``, but with better hints at where the series differ. See `equals` for detailed parameter doc. Parameters ---------- actual : ~pandas.Series expected : ~pandas.Series ignore_order : bool ignore_index : bool all_close : bool ''' equals_, reason = equals(actual, expected, ignore_order, ignore_index, all_close, _return_reason=True) assert equals_, '{}\n\n{}\n\n{}'.format(reason, actual.to_string(), expected.to_string())
python
def assert_equals(actual, expected, ignore_order=False, ignore_index=False, all_close=False): ''' Assert 2 series are equal. Like ``assert equals(series1, series2, ...)``, but with better hints at where the series differ. See `equals` for detailed parameter doc. Parameters ---------- actual : ~pandas.Series expected : ~pandas.Series ignore_order : bool ignore_index : bool all_close : bool ''' equals_, reason = equals(actual, expected, ignore_order, ignore_index, all_close, _return_reason=True) assert equals_, '{}\n\n{}\n\n{}'.format(reason, actual.to_string(), expected.to_string())
[ "def", "assert_equals", "(", "actual", ",", "expected", ",", "ignore_order", "=", "False", ",", "ignore_index", "=", "False", ",", "all_close", "=", "False", ")", ":", "equals_", ",", "reason", "=", "equals", "(", "actual", ",", "expected", ",", "ignore_or...
Assert 2 series are equal. Like ``assert equals(series1, series2, ...)``, but with better hints at where the series differ. See `equals` for detailed parameter doc. Parameters ---------- actual : ~pandas.Series expected : ~pandas.Series ignore_order : bool ignore_index : bool all_close : bool
[ "Assert", "2", "series", "are", "equal", "." ]
train
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/series.py#L145-L162
nitipit/appkit
appkit/app.py
App.do_startup
def do_startup(self): """Gtk.Application.run() will call this function()""" Gtk.Application.do_startup(self) gtk_window = Gtk.ApplicationWindow(application=self) gtk_window.set_title('AppKit') webkit_web_view = WebKit.WebView() webkit_web_view.load_uri('http://localhost:' + str(self.port)) screen = Gdk.Screen.get_default() monitor_geometry = screen.get_primary_monitor() monitor_geometry = screen.get_monitor_geometry(monitor_geometry) settings = webkit_web_view.get_settings() settings.set_property('enable-universal-access-from-file-uris', True) settings.set_property('enable-file-access-from-file-uris', True) settings.set_property('default-encoding', 'utf-8') gtk_window.set_default_size( monitor_geometry.width * 1.0 / 2.0, monitor_geometry.height * 3.0 / 5.0, ) scrollWindow = Gtk.ScrolledWindow() scrollWindow.add(webkit_web_view) gtk_window.add(scrollWindow) gtk_window.connect('delete-event', self._on_gtk_window_destroy) webkit_web_view.connect('notify::title', self._on_notify_title) self.gtk_window = gtk_window self.webkit_web_view = webkit_web_view gtk_window.show_all()
python
def do_startup(self): """Gtk.Application.run() will call this function()""" Gtk.Application.do_startup(self) gtk_window = Gtk.ApplicationWindow(application=self) gtk_window.set_title('AppKit') webkit_web_view = WebKit.WebView() webkit_web_view.load_uri('http://localhost:' + str(self.port)) screen = Gdk.Screen.get_default() monitor_geometry = screen.get_primary_monitor() monitor_geometry = screen.get_monitor_geometry(monitor_geometry) settings = webkit_web_view.get_settings() settings.set_property('enable-universal-access-from-file-uris', True) settings.set_property('enable-file-access-from-file-uris', True) settings.set_property('default-encoding', 'utf-8') gtk_window.set_default_size( monitor_geometry.width * 1.0 / 2.0, monitor_geometry.height * 3.0 / 5.0, ) scrollWindow = Gtk.ScrolledWindow() scrollWindow.add(webkit_web_view) gtk_window.add(scrollWindow) gtk_window.connect('delete-event', self._on_gtk_window_destroy) webkit_web_view.connect('notify::title', self._on_notify_title) self.gtk_window = gtk_window self.webkit_web_view = webkit_web_view gtk_window.show_all()
[ "def", "do_startup", "(", "self", ")", ":", "Gtk", ".", "Application", ".", "do_startup", "(", "self", ")", "gtk_window", "=", "Gtk", ".", "ApplicationWindow", "(", "application", "=", "self", ")", "gtk_window", ".", "set_title", "(", "'AppKit'", ")", "web...
Gtk.Application.run() will call this function()
[ "Gtk", ".", "Application", ".", "run", "()", "will", "call", "this", "function", "()" ]
train
https://github.com/nitipit/appkit/blob/08eeaf45a9ca884bf5fe105d47a81269d44b1412/appkit/app.py#L40-L68
jessamynsmith/twitterbot
twitter_bot/twitter_bot.py
TwitterBot.tokenize
def tokenize(self, message, max_length, mentions=None): """ Tokenize a message into a list of messages of no more than max_length, including mentions in each message :param message: Message to be sent :param max_length: Maximum allowed length for each resulting message :param mentions: List of usernames to mention in each message :return: """ mention_text = '' mention_length = 0 if mentions: formatted_mentions = ['@{0}'.format(mention) for mention in mentions] mention_text = " ".join(formatted_mentions) message = '{0} {1}'.format(mention_text, message) mention_length = len(mention_text) + 1 if len(message) <= max_length: return [message] tokens = message.split(' ') indices = [] index = 1 length = len(tokens[0]) while index < len(tokens): # 1 for leading space, 4 for trailing " ..." if length + 1 + len(tokens[index]) + 4 > max_length: indices.append(index) # 4 for leading "... " length = 4 + mention_length + len(tokens[index]) else: # 1 for leading space length += 1 + len(tokens[index]) index += 1 indices.append(index) messages = [" ".join(tokens[0:indices[0]])] for i in range(1, len(indices)): messages[i - 1] += ' ...' parts = [] if mention_text: parts.append(mention_text) parts.append("...") parts.extend(tokens[indices[i - 1]:indices[i]]) messages.append(" ".join(parts)) return messages
python
def tokenize(self, message, max_length, mentions=None): """ Tokenize a message into a list of messages of no more than max_length, including mentions in each message :param message: Message to be sent :param max_length: Maximum allowed length for each resulting message :param mentions: List of usernames to mention in each message :return: """ mention_text = '' mention_length = 0 if mentions: formatted_mentions = ['@{0}'.format(mention) for mention in mentions] mention_text = " ".join(formatted_mentions) message = '{0} {1}'.format(mention_text, message) mention_length = len(mention_text) + 1 if len(message) <= max_length: return [message] tokens = message.split(' ') indices = [] index = 1 length = len(tokens[0]) while index < len(tokens): # 1 for leading space, 4 for trailing " ..." if length + 1 + len(tokens[index]) + 4 > max_length: indices.append(index) # 4 for leading "... " length = 4 + mention_length + len(tokens[index]) else: # 1 for leading space length += 1 + len(tokens[index]) index += 1 indices.append(index) messages = [" ".join(tokens[0:indices[0]])] for i in range(1, len(indices)): messages[i - 1] += ' ...' parts = [] if mention_text: parts.append(mention_text) parts.append("...") parts.extend(tokens[indices[i - 1]:indices[i]]) messages.append(" ".join(parts)) return messages
[ "def", "tokenize", "(", "self", ",", "message", ",", "max_length", ",", "mentions", "=", "None", ")", ":", "mention_text", "=", "''", "mention_length", "=", "0", "if", "mentions", ":", "formatted_mentions", "=", "[", "'@{0}'", ".", "format", "(", "mention"...
Tokenize a message into a list of messages of no more than max_length, including mentions in each message :param message: Message to be sent :param max_length: Maximum allowed length for each resulting message :param mentions: List of usernames to mention in each message :return:
[ "Tokenize", "a", "message", "into", "a", "list", "of", "messages", "of", "no", "more", "than", "max_length", "including", "mentions", "in", "each", "message", ":", "param", "message", ":", "Message", "to", "be", "sent", ":", "param", "max_length", ":", "Ma...
train
https://github.com/jessamynsmith/twitterbot/blob/10a5d07b9eda659057ef86241823b0672f0f219f/twitter_bot/twitter_bot.py#L76-L121
jessamynsmith/twitterbot
twitter_bot/twitter_bot.py
TwitterBot.send_message
def send_message(self, message, mention_id=None, mentions=[]): """ Send the specified message to twitter, with appropriate mentions, tokenized as necessary :param message: Message to be sent :param mention_id: In-reply-to mention_id (to link messages to a previous message) :param mentions: List of usernames to mention in reply :return: """ messages = self.tokenize(message, self.MESSAGE_LENGTH, mentions) code = 0 for message in messages: if self.dry_run: mention_message = '' if mention_id: mention_message = " to mention_id '{0}'".format(mention_id) logging.info("Not posting to Twitter because DRY_RUN is set. Would have posted " "the following message{0}:\n{1}".format(mention_message, message)) else: try: self.twitter.statuses.update(status=message, in_reply_to_status_id=mention_id) except TwitterHTTPError as e: logging.error('Unable to post to twitter: {0}'.format(e)) code = e.response_data['errors'][0]['code'] return code
python
def send_message(self, message, mention_id=None, mentions=[]): """ Send the specified message to twitter, with appropriate mentions, tokenized as necessary :param message: Message to be sent :param mention_id: In-reply-to mention_id (to link messages to a previous message) :param mentions: List of usernames to mention in reply :return: """ messages = self.tokenize(message, self.MESSAGE_LENGTH, mentions) code = 0 for message in messages: if self.dry_run: mention_message = '' if mention_id: mention_message = " to mention_id '{0}'".format(mention_id) logging.info("Not posting to Twitter because DRY_RUN is set. Would have posted " "the following message{0}:\n{1}".format(mention_message, message)) else: try: self.twitter.statuses.update(status=message, in_reply_to_status_id=mention_id) except TwitterHTTPError as e: logging.error('Unable to post to twitter: {0}'.format(e)) code = e.response_data['errors'][0]['code'] return code
[ "def", "send_message", "(", "self", ",", "message", ",", "mention_id", "=", "None", ",", "mentions", "=", "[", "]", ")", ":", "messages", "=", "self", ".", "tokenize", "(", "message", ",", "self", ".", "MESSAGE_LENGTH", ",", "mentions", ")", "code", "=...
Send the specified message to twitter, with appropriate mentions, tokenized as necessary :param message: Message to be sent :param mention_id: In-reply-to mention_id (to link messages to a previous message) :param mentions: List of usernames to mention in reply :return:
[ "Send", "the", "specified", "message", "to", "twitter", "with", "appropriate", "mentions", "tokenized", "as", "necessary", ":", "param", "message", ":", "Message", "to", "be", "sent", ":", "param", "mention_id", ":", "In", "-", "reply", "-", "to", "mention_i...
train
https://github.com/jessamynsmith/twitterbot/blob/10a5d07b9eda659057ef86241823b0672f0f219f/twitter_bot/twitter_bot.py#L123-L147
jessamynsmith/twitterbot
twitter_bot/twitter_bot.py
TwitterBot.get_reply_to_names
def get_reply_to_names(self, mention): """ Get a sorted list of unique usernames mentioned in the message, excluding the bot's own name :param mention: JSON mention object from twitter :return: list of usernames """ mention_list = [user['screen_name'] for user in mention['entities']['user_mentions']] mention_list.append(mention['user']['screen_name']) reply_to_names = set(mention_list) # Do not include bot's own name reply_to_names.discard(self.screen_name) return sorted(list(reply_to_names))
python
def get_reply_to_names(self, mention): """ Get a sorted list of unique usernames mentioned in the message, excluding the bot's own name :param mention: JSON mention object from twitter :return: list of usernames """ mention_list = [user['screen_name'] for user in mention['entities']['user_mentions']] mention_list.append(mention['user']['screen_name']) reply_to_names = set(mention_list) # Do not include bot's own name reply_to_names.discard(self.screen_name) return sorted(list(reply_to_names))
[ "def", "get_reply_to_names", "(", "self", ",", "mention", ")", ":", "mention_list", "=", "[", "user", "[", "'screen_name'", "]", "for", "user", "in", "mention", "[", "'entities'", "]", "[", "'user_mentions'", "]", "]", "mention_list", ".", "append", "(", "...
Get a sorted list of unique usernames mentioned in the message, excluding the bot's own name :param mention: JSON mention object from twitter :return: list of usernames
[ "Get", "a", "sorted", "list", "of", "unique", "usernames", "mentioned", "in", "the", "message", "excluding", "the", "bot", "s", "own", "name", ":", "param", "mention", ":", "JSON", "mention", "object", "from", "twitter", ":", "return", ":", "list", "of", ...
train
https://github.com/jessamynsmith/twitterbot/blob/10a5d07b9eda659057ef86241823b0672f0f219f/twitter_bot/twitter_bot.py#L149-L160
jessamynsmith/twitterbot
twitter_bot/twitter_bot.py
TwitterBot.reply_to_mentions
def reply_to_mentions(self): """ For every mention since since_id, create a message with the provider and use it to reply to the mention :return: Number of mentions processed """ since_id = self.since_id.get() kwargs = {'count': 200} if since_id: kwargs['since_id'] = since_id mentions_list = [] try: mentions_list = self.twitter.statuses.mentions_timeline(**kwargs) except TwitterHTTPError as e: logging.error('Unable to retrieve mentions from twitter: {0}'.format(e)) logging.info("Retrieved {0} mentions".format(len(mentions_list))) mentions_processed = 0 # We want to process least recent to most recent, so that since_id is set properly for mention in reversed(mentions_list): mention_id = mention['id'] reply_to_names = self.get_reply_to_names(mention) error_code = self.DUPLICATE_CODE tries = 0 message = '' while error_code == self.DUPLICATE_CODE: if tries > 10: logging.error('Unable to post duplicate message to {0}: {1}'.format( reply_to_names, message)) break elif tries == 10: # Tried 10 times to post a message, but all were duplicates message = 'No unique messages found.' else: message = self.messages.create(mention, self.MESSAGE_LENGTH) error_code = self.send_message(message, mention_id, reply_to_names) tries += 1 mentions_processed += 1 self.since_id.set('{0}'.format(mention_id)) return mentions_processed
python
def reply_to_mentions(self): """ For every mention since since_id, create a message with the provider and use it to reply to the mention :return: Number of mentions processed """ since_id = self.since_id.get() kwargs = {'count': 200} if since_id: kwargs['since_id'] = since_id mentions_list = [] try: mentions_list = self.twitter.statuses.mentions_timeline(**kwargs) except TwitterHTTPError as e: logging.error('Unable to retrieve mentions from twitter: {0}'.format(e)) logging.info("Retrieved {0} mentions".format(len(mentions_list))) mentions_processed = 0 # We want to process least recent to most recent, so that since_id is set properly for mention in reversed(mentions_list): mention_id = mention['id'] reply_to_names = self.get_reply_to_names(mention) error_code = self.DUPLICATE_CODE tries = 0 message = '' while error_code == self.DUPLICATE_CODE: if tries > 10: logging.error('Unable to post duplicate message to {0}: {1}'.format( reply_to_names, message)) break elif tries == 10: # Tried 10 times to post a message, but all were duplicates message = 'No unique messages found.' else: message = self.messages.create(mention, self.MESSAGE_LENGTH) error_code = self.send_message(message, mention_id, reply_to_names) tries += 1 mentions_processed += 1 self.since_id.set('{0}'.format(mention_id)) return mentions_processed
[ "def", "reply_to_mentions", "(", "self", ")", ":", "since_id", "=", "self", ".", "since_id", ".", "get", "(", ")", "kwargs", "=", "{", "'count'", ":", "200", "}", "if", "since_id", ":", "kwargs", "[", "'since_id'", "]", "=", "since_id", "mentions_list", ...
For every mention since since_id, create a message with the provider and use it to reply to the mention :return: Number of mentions processed
[ "For", "every", "mention", "since", "since_id", "create", "a", "message", "with", "the", "provider", "and", "use", "it", "to", "reply", "to", "the", "mention", ":", "return", ":", "Number", "of", "mentions", "processed" ]
train
https://github.com/jessamynsmith/twitterbot/blob/10a5d07b9eda659057ef86241823b0672f0f219f/twitter_bot/twitter_bot.py#L162-L207
jessamynsmith/twitterbot
twitter_bot/twitter_bot.py
BotRunner.go
def go(self, settings, command): """ Run the specified command using a TwitterBot created with the provided settings :param settings: Settings class :param command: Command to run, either 'post_message' or 'reply_to_mentions' :return: Result of running the command """ bot = TwitterBot(settings) result = 1 if command == 'post_message': result = bot.post_message() elif command == 'reply_to_mentions': result = bot.reply_to_mentions() else: print("Command must be either 'post_message' or 'reply_to_mentions'") return result
python
def go(self, settings, command): """ Run the specified command using a TwitterBot created with the provided settings :param settings: Settings class :param command: Command to run, either 'post_message' or 'reply_to_mentions' :return: Result of running the command """ bot = TwitterBot(settings) result = 1 if command == 'post_message': result = bot.post_message() elif command == 'reply_to_mentions': result = bot.reply_to_mentions() else: print("Command must be either 'post_message' or 'reply_to_mentions'") return result
[ "def", "go", "(", "self", ",", "settings", ",", "command", ")", ":", "bot", "=", "TwitterBot", "(", "settings", ")", "result", "=", "1", "if", "command", "==", "'post_message'", ":", "result", "=", "bot", ".", "post_message", "(", ")", "elif", "command...
Run the specified command using a TwitterBot created with the provided settings :param settings: Settings class :param command: Command to run, either 'post_message' or 'reply_to_mentions' :return: Result of running the command
[ "Run", "the", "specified", "command", "using", "a", "TwitterBot", "created", "with", "the", "provided", "settings", ":", "param", "settings", ":", "Settings", "class", ":", "param", "command", ":", "Command", "to", "run", "either", "post_message", "or", "reply...
train
https://github.com/jessamynsmith/twitterbot/blob/10a5d07b9eda659057ef86241823b0672f0f219f/twitter_bot/twitter_bot.py#L220-L237
bovee/Aston
aston/trace/trace.py
decompress
def decompress(zdata): """ Unserializes an AstonFrame. Parameters ---------- zdata : bytes Returns ------- Trace or Chromatogram """ data = zlib.decompress(zdata) lc = struct.unpack('<L', data[0:4])[0] li = struct.unpack('<L', data[4:8])[0] c = json.loads(data[8:8 + lc].decode('utf-8')) i = np.fromstring(data[8 + lc:8 + lc + li], dtype=np.float32) v = np.fromstring(data[8 + lc + li:], dtype=np.float64) if len(c) == 1: return Trace(v, i, name=c[0]) else: return Chromatogram(v.reshape(len(i), len(c)), i, c)
python
def decompress(zdata): """ Unserializes an AstonFrame. Parameters ---------- zdata : bytes Returns ------- Trace or Chromatogram """ data = zlib.decompress(zdata) lc = struct.unpack('<L', data[0:4])[0] li = struct.unpack('<L', data[4:8])[0] c = json.loads(data[8:8 + lc].decode('utf-8')) i = np.fromstring(data[8 + lc:8 + lc + li], dtype=np.float32) v = np.fromstring(data[8 + lc + li:], dtype=np.float64) if len(c) == 1: return Trace(v, i, name=c[0]) else: return Chromatogram(v.reshape(len(i), len(c)), i, c)
[ "def", "decompress", "(", "zdata", ")", ":", "data", "=", "zlib", ".", "decompress", "(", "zdata", ")", "lc", "=", "struct", ".", "unpack", "(", "'<L'", ",", "data", "[", "0", ":", "4", "]", ")", "[", "0", "]", "li", "=", "struct", ".", "unpack...
Unserializes an AstonFrame. Parameters ---------- zdata : bytes Returns ------- Trace or Chromatogram
[ "Unserializes", "an", "AstonFrame", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/trace.py#L456-L479
bovee/Aston
aston/trace/trace.py
_slice_idxs
def _slice_idxs(df, twin=None): """ Returns a slice of the incoming array filtered between the two times specified. Assumes the array is the same length as self.data. Acts in the time() and trace() functions. """ if twin is None: return 0, df.shape[0] tme = df.index if twin[0] is None: st_idx = 0 else: st_idx = (np.abs(tme - twin[0])).argmin() if twin[1] is None: en_idx = df.shape[0] else: en_idx = (np.abs(tme - twin[1])).argmin() + 1 return st_idx, en_idx
python
def _slice_idxs(df, twin=None): """ Returns a slice of the incoming array filtered between the two times specified. Assumes the array is the same length as self.data. Acts in the time() and trace() functions. """ if twin is None: return 0, df.shape[0] tme = df.index if twin[0] is None: st_idx = 0 else: st_idx = (np.abs(tme - twin[0])).argmin() if twin[1] is None: en_idx = df.shape[0] else: en_idx = (np.abs(tme - twin[1])).argmin() + 1 return st_idx, en_idx
[ "def", "_slice_idxs", "(", "df", ",", "twin", "=", "None", ")", ":", "if", "twin", "is", "None", ":", "return", "0", ",", "df", ".", "shape", "[", "0", "]", "tme", "=", "df", ".", "index", "if", "twin", "[", "0", "]", "is", "None", ":", "st_i...
Returns a slice of the incoming array filtered between the two times specified. Assumes the array is the same length as self.data. Acts in the time() and trace() functions.
[ "Returns", "a", "slice", "of", "the", "incoming", "array", "filtered", "between", "the", "two", "times", "specified", ".", "Assumes", "the", "array", "is", "the", "same", "length", "as", "self", ".", "data", ".", "Acts", "in", "the", "time", "()", "and",...
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/trace.py#L482-L501
bovee/Aston
aston/trace/trace.py
Trace._apply_data
def _apply_data(self, f, ts, reverse=False): """ Convenience function for all of the math stuff. """ # TODO: needs to catch np numeric types? if isinstance(ts, (int, float)): d = ts * np.ones(self.shape[0]) elif ts is None: d = None elif np.array_equal(ts.index, self.index): d = ts.values else: d = ts._retime(self.index) if not reverse: new_data = np.apply_along_axis(f, 0, self.values, d) else: new_data = np.apply_along_axis(f, 0, d, self.values) return Trace(new_data, self.index, name=self.name)
python
def _apply_data(self, f, ts, reverse=False): """ Convenience function for all of the math stuff. """ # TODO: needs to catch np numeric types? if isinstance(ts, (int, float)): d = ts * np.ones(self.shape[0]) elif ts is None: d = None elif np.array_equal(ts.index, self.index): d = ts.values else: d = ts._retime(self.index) if not reverse: new_data = np.apply_along_axis(f, 0, self.values, d) else: new_data = np.apply_along_axis(f, 0, d, self.values) return Trace(new_data, self.index, name=self.name)
[ "def", "_apply_data", "(", "self", ",", "f", ",", "ts", ",", "reverse", "=", "False", ")", ":", "# TODO: needs to catch np numeric types?", "if", "isinstance", "(", "ts", ",", "(", "int", ",", "float", ")", ")", ":", "d", "=", "ts", "*", "np", ".", "...
Convenience function for all of the math stuff.
[ "Convenience", "function", "for", "all", "of", "the", "math", "stuff", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/trace.py#L95-L113
bovee/Aston
aston/trace/trace.py
Chromatogram.traces
def traces(self): """ Decomposes the Chromatogram into a collection of Traces. Returns ------- list """ traces = [] for v, c in zip(self.values.T, self.columns): traces.append(Trace(v, self.index, name=c)) return traces
python
def traces(self): """ Decomposes the Chromatogram into a collection of Traces. Returns ------- list """ traces = [] for v, c in zip(self.values.T, self.columns): traces.append(Trace(v, self.index, name=c)) return traces
[ "def", "traces", "(", "self", ")", ":", "traces", "=", "[", "]", "for", "v", ",", "c", "in", "zip", "(", "self", ".", "values", ".", "T", ",", "self", ".", "columns", ")", ":", "traces", ".", "append", "(", "Trace", "(", "v", ",", "self", "."...
Decomposes the Chromatogram into a collection of Traces. Returns ------- list
[ "Decomposes", "the", "Chromatogram", "into", "a", "collection", "of", "Traces", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/trace.py#L235-L246
bovee/Aston
aston/trace/trace.py
Chromatogram.plot
def plot(self, style='heatmap', legend=False, cmap=None, ax=None): """ Presents the AstonFrame using matplotlib. Parameters ---------- style : {'heatmap', 'colors', ''} legend : bool, optional cmap: matplotlib.colors.Colormap, optional ax : matplotlib.axes.Axes, optional """ # styles: 2d, colors, otherwise interpret as trace? if ax is None: import matplotlib.pyplot as plt ax = plt.gca() if style == 'heatmap': ions = self.columns ext = (self.index[0], self.index[-1], min(ions), max(ions)) grid = self.values[:, np.argsort(self.columns)].transpose() if isinstance(self.values, scipy.sparse.spmatrix): grid = grid.toarray() img = ax.imshow(grid, origin='lower', aspect='auto', extent=ext, cmap=cmap) if legend: ax.figure.colorbar(img) elif style == 'colors': # TODO: importing gaussian at the top leads to a whole # mess of dependency issues => fix somehow? from aston.peak.peak_models import gaussian from matplotlib.colors import ListedColormap wvs = np.genfromtxt(np.array(self.columns).astype(bytes)) # wvs = self.columns.astype(float) # http://www.ppsloan.org/publications/XYZJCGT.pdf vis_filt = np.zeros((3, len(wvs))) vis_filt[0] = 1.065 * gaussian(wvs, x=595.8, w=33.33) + \ 0.366 * gaussian(wvs, x=446.8, w=19.44) vis_filt[1] = 1.014 * gaussian(np.log(wvs), x=np.log(556.3), w=0.075) vis_filt[2] = 1.839 * gaussian(np.log(wvs), x=np.log(449.8), w=0.051) if isinstance(self.values, scipy.sparse.spmatrix): xyz = np.dot(self.values.toarray(), vis_filt.T) else: xyz = np.dot(self.values.copy(), vis_filt.T) # http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html xyz_rgb = [[3.2404542, -1.5371385, -0.4985314], [-0.9692660, 1.8760108, 0.0415560], [0.0556434, -0.2040259, 1.0572252]] xyz_rgb = np.array(xyz_rgb) rgb = np.dot(xyz_rgb, xyz.T).T # normalize rgb[rgb < 0] = 0 rgb /= np.max(rgb) rgb = 1 - np.abs(rgb) # plot cmask = np.meshgrid(np.arange(rgb.shape[0]), 0)[0] ax.imshow(cmask, cmap=ListedColormap(rgb), aspect='auto', extent=(self.index[0], self.index[-1], 0, 1)) ax.yaxis.set_ticks([]) else: if cmap is not None: color = cmap(0, 1) else: color = 'k' self.trace().plot(color=color, ax=ax)
python
def plot(self, style='heatmap', legend=False, cmap=None, ax=None): """ Presents the AstonFrame using matplotlib. Parameters ---------- style : {'heatmap', 'colors', ''} legend : bool, optional cmap: matplotlib.colors.Colormap, optional ax : matplotlib.axes.Axes, optional """ # styles: 2d, colors, otherwise interpret as trace? if ax is None: import matplotlib.pyplot as plt ax = plt.gca() if style == 'heatmap': ions = self.columns ext = (self.index[0], self.index[-1], min(ions), max(ions)) grid = self.values[:, np.argsort(self.columns)].transpose() if isinstance(self.values, scipy.sparse.spmatrix): grid = grid.toarray() img = ax.imshow(grid, origin='lower', aspect='auto', extent=ext, cmap=cmap) if legend: ax.figure.colorbar(img) elif style == 'colors': # TODO: importing gaussian at the top leads to a whole # mess of dependency issues => fix somehow? from aston.peak.peak_models import gaussian from matplotlib.colors import ListedColormap wvs = np.genfromtxt(np.array(self.columns).astype(bytes)) # wvs = self.columns.astype(float) # http://www.ppsloan.org/publications/XYZJCGT.pdf vis_filt = np.zeros((3, len(wvs))) vis_filt[0] = 1.065 * gaussian(wvs, x=595.8, w=33.33) + \ 0.366 * gaussian(wvs, x=446.8, w=19.44) vis_filt[1] = 1.014 * gaussian(np.log(wvs), x=np.log(556.3), w=0.075) vis_filt[2] = 1.839 * gaussian(np.log(wvs), x=np.log(449.8), w=0.051) if isinstance(self.values, scipy.sparse.spmatrix): xyz = np.dot(self.values.toarray(), vis_filt.T) else: xyz = np.dot(self.values.copy(), vis_filt.T) # http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html xyz_rgb = [[3.2404542, -1.5371385, -0.4985314], [-0.9692660, 1.8760108, 0.0415560], [0.0556434, -0.2040259, 1.0572252]] xyz_rgb = np.array(xyz_rgb) rgb = np.dot(xyz_rgb, xyz.T).T # normalize rgb[rgb < 0] = 0 rgb /= np.max(rgb) rgb = 1 - np.abs(rgb) # plot cmask = np.meshgrid(np.arange(rgb.shape[0]), 0)[0] ax.imshow(cmask, cmap=ListedColormap(rgb), aspect='auto', extent=(self.index[0], self.index[-1], 0, 1)) ax.yaxis.set_ticks([]) else: if cmap is not None: color = cmap(0, 1) else: color = 'k' self.trace().plot(color=color, ax=ax)
[ "def", "plot", "(", "self", ",", "style", "=", "'heatmap'", ",", "legend", "=", "False", ",", "cmap", "=", "None", ",", "ax", "=", "None", ")", ":", "# styles: 2d, colors, otherwise interpret as trace?", "if", "ax", "is", "None", ":", "import", "matplotlib",...
Presents the AstonFrame using matplotlib. Parameters ---------- style : {'heatmap', 'colors', ''} legend : bool, optional cmap: matplotlib.colors.Colormap, optional ax : matplotlib.axes.Axes, optional
[ "Presents", "the", "AstonFrame", "using", "matplotlib", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/trace.py#L278-L349
bovee/Aston
aston/trace/trace.py
Chromatogram.as_sound
def as_sound(self, filename, speed=60, cutoff=50): """ Convert AstonFrame into a WAV file. Parameters ---------- filename : str Name of wavfile to create. speed : float, optional How much to speed up for sound recording, e.g. a value of 60 will turn an hour-long AstonFrame into a minute-long sound clip. cutoff : float, optional m/z's under this value will be clipped out. """ # make a 1d array for the sound def to_t(t): return (t - self.index[0]) / speed wav_len = int(to_t(self.index[-1]) * 60 * 44100) wav = np.zeros(wav_len) # create an artificial array to interpolate times out of tmask = np.linspace(0, 1, self.shape[0]) # come up with a mapping from mz to tone min_hz, max_hz = 50, 1000 min_mz, max_mz = min(self.columns), max(self.columns) def mz_to_wv(mz): """ Maps a wavelength/mz to a tone. """ try: mz = float(mz) except: return 100 wv = (mz * (max_hz - min_hz) - max_hz * min_mz + min_hz * max_mz) / (max_mz - min_mz) return int(44100 / wv) # go through each trace and map it into the sound array for i, mz in enumerate(self.columns): if float(mz) < cutoff: # clip out mz/wv below a certain threshold # handy if data has low level noise continue print(str(i) + '/' + str(self.shape[1])) inter_x = np.linspace(0, 1, wav[::mz_to_wv(mz)].shape[0]) wav[::mz_to_wv(mz)] += np.interp(inter_x, tmask, self.values[:, i]) # scale the new array and write it out scaled = wav / np.max(np.abs(wav)) scaled = scipy.signal.fftconvolve(scaled, np.ones(5) / 5, mode='same') scaled = np.int16(scaled * 32767) scipy.io.wavfile.write(filename, 44100, scaled)
python
def as_sound(self, filename, speed=60, cutoff=50): """ Convert AstonFrame into a WAV file. Parameters ---------- filename : str Name of wavfile to create. speed : float, optional How much to speed up for sound recording, e.g. a value of 60 will turn an hour-long AstonFrame into a minute-long sound clip. cutoff : float, optional m/z's under this value will be clipped out. """ # make a 1d array for the sound def to_t(t): return (t - self.index[0]) / speed wav_len = int(to_t(self.index[-1]) * 60 * 44100) wav = np.zeros(wav_len) # create an artificial array to interpolate times out of tmask = np.linspace(0, 1, self.shape[0]) # come up with a mapping from mz to tone min_hz, max_hz = 50, 1000 min_mz, max_mz = min(self.columns), max(self.columns) def mz_to_wv(mz): """ Maps a wavelength/mz to a tone. """ try: mz = float(mz) except: return 100 wv = (mz * (max_hz - min_hz) - max_hz * min_mz + min_hz * max_mz) / (max_mz - min_mz) return int(44100 / wv) # go through each trace and map it into the sound array for i, mz in enumerate(self.columns): if float(mz) < cutoff: # clip out mz/wv below a certain threshold # handy if data has low level noise continue print(str(i) + '/' + str(self.shape[1])) inter_x = np.linspace(0, 1, wav[::mz_to_wv(mz)].shape[0]) wav[::mz_to_wv(mz)] += np.interp(inter_x, tmask, self.values[:, i]) # scale the new array and write it out scaled = wav / np.max(np.abs(wav)) scaled = scipy.signal.fftconvolve(scaled, np.ones(5) / 5, mode='same') scaled = np.int16(scaled * 32767) scipy.io.wavfile.write(filename, 44100, scaled)
[ "def", "as_sound", "(", "self", ",", "filename", ",", "speed", "=", "60", ",", "cutoff", "=", "50", ")", ":", "# make a 1d array for the sound", "def", "to_t", "(", "t", ")", ":", "return", "(", "t", "-", "self", ".", "index", "[", "0", "]", ")", "...
Convert AstonFrame into a WAV file. Parameters ---------- filename : str Name of wavfile to create. speed : float, optional How much to speed up for sound recording, e.g. a value of 60 will turn an hour-long AstonFrame into a minute-long sound clip. cutoff : float, optional m/z's under this value will be clipped out.
[ "Convert", "AstonFrame", "into", "a", "WAV", "file", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/trace.py#L351-L405
bovee/Aston
aston/trace/trace.py
Chromatogram.scan
def scan(self, t, dt=None, aggfunc=None): """ Returns the spectrum from a specific time. Parameters ---------- t : float dt : float """ idx = (np.abs(self.index - t)).argmin() if dt is None: # only take the spectra at the nearest time mz_abn = self.values[idx, :].copy() else: # sum up all the spectra over a range en_idx = (np.abs(self.index - t - dt)).argmin() idx, en_idx = min(idx, en_idx), max(idx, en_idx) if aggfunc is None: mz_abn = self.values[idx:en_idx + 1, :].copy().sum(axis=0) else: mz_abn = aggfunc(self.values[idx:en_idx + 1, :].copy()) if isinstance(mz_abn, scipy.sparse.spmatrix): mz_abn = mz_abn.toarray()[0] return Scan(self.columns, mz_abn)
python
def scan(self, t, dt=None, aggfunc=None): """ Returns the spectrum from a specific time. Parameters ---------- t : float dt : float """ idx = (np.abs(self.index - t)).argmin() if dt is None: # only take the spectra at the nearest time mz_abn = self.values[idx, :].copy() else: # sum up all the spectra over a range en_idx = (np.abs(self.index - t - dt)).argmin() idx, en_idx = min(idx, en_idx), max(idx, en_idx) if aggfunc is None: mz_abn = self.values[idx:en_idx + 1, :].copy().sum(axis=0) else: mz_abn = aggfunc(self.values[idx:en_idx + 1, :].copy()) if isinstance(mz_abn, scipy.sparse.spmatrix): mz_abn = mz_abn.toarray()[0] return Scan(self.columns, mz_abn)
[ "def", "scan", "(", "self", ",", "t", ",", "dt", "=", "None", ",", "aggfunc", "=", "None", ")", ":", "idx", "=", "(", "np", ".", "abs", "(", "self", ".", "index", "-", "t", ")", ")", ".", "argmin", "(", ")", "if", "dt", "is", "None", ":", ...
Returns the spectrum from a specific time. Parameters ---------- t : float dt : float
[ "Returns", "the", "spectrum", "from", "a", "specific", "time", "." ]
train
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/trace.py#L411-L435
Locu/chronology
kronos/kronos/storage/cassandra/client.py
CassandraStorage.setup_cassandra
def setup_cassandra(self, namespaces): """ Set up a connection to the specified Cassandra cluster and create the specified keyspaces if they dont exist. """ connections_to_shutdown = [] self.cluster = Cluster(self.hosts) for namespace_name in namespaces: keyspace = '%s_%s' % (self.keyspace_prefix, namespace_name) namespace = Namespace(self.cluster, keyspace, self.replication_factor, self.read_size) connections_to_shutdown.append(namespace.session) self.namespaces[namespace_name] = namespace # Shutdown Cluster instance after shutting down all Sessions. connections_to_shutdown.append(self.cluster) # Shutdown all connections to Cassandra before exiting Python interpretter. atexit.register(lambda: map(lambda c: c.shutdown(), connections_to_shutdown))
python
def setup_cassandra(self, namespaces): """ Set up a connection to the specified Cassandra cluster and create the specified keyspaces if they dont exist. """ connections_to_shutdown = [] self.cluster = Cluster(self.hosts) for namespace_name in namespaces: keyspace = '%s_%s' % (self.keyspace_prefix, namespace_name) namespace = Namespace(self.cluster, keyspace, self.replication_factor, self.read_size) connections_to_shutdown.append(namespace.session) self.namespaces[namespace_name] = namespace # Shutdown Cluster instance after shutting down all Sessions. connections_to_shutdown.append(self.cluster) # Shutdown all connections to Cassandra before exiting Python interpretter. atexit.register(lambda: map(lambda c: c.shutdown(), connections_to_shutdown))
[ "def", "setup_cassandra", "(", "self", ",", "namespaces", ")", ":", "connections_to_shutdown", "=", "[", "]", "self", ".", "cluster", "=", "Cluster", "(", "self", ".", "hosts", ")", "for", "namespace_name", "in", "namespaces", ":", "keyspace", "=", "'%s_%s'"...
Set up a connection to the specified Cassandra cluster and create the specified keyspaces if they dont exist.
[ "Set", "up", "a", "connection", "to", "the", "specified", "Cassandra", "cluster", "and", "create", "the", "specified", "keyspaces", "if", "they", "dont", "exist", "." ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/cassandra/client.py#L47-L67
Locu/chronology
kronos/kronos/storage/cassandra/client.py
CassandraStorage._delete
def _delete(self, namespace, stream, start_id, end_time, configuration): """ Delete events for `stream` between `start_id` and `end_time`. `stream` : The stream to delete events for. `start_id` : Delete events with id > `start_id`. `end_time` : Delete events ending <= `end_time`. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval. """ stream = self.get_stream(namespace, stream, configuration) return stream.delete(start_id, uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST))
python
def _delete(self, namespace, stream, start_id, end_time, configuration): """ Delete events for `stream` between `start_id` and `end_time`. `stream` : The stream to delete events for. `start_id` : Delete events with id > `start_id`. `end_time` : Delete events ending <= `end_time`. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval. """ stream = self.get_stream(namespace, stream, configuration) return stream.delete(start_id, uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST))
[ "def", "_delete", "(", "self", ",", "namespace", ",", "stream", ",", "start_id", ",", "end_time", ",", "configuration", ")", ":", "stream", "=", "self", ".", "get_stream", "(", "namespace", ",", "stream", ",", "configuration", ")", "return", "stream", ".",...
Delete events for `stream` between `start_id` and `end_time`. `stream` : The stream to delete events for. `start_id` : Delete events with id > `start_id`. `end_time` : Delete events ending <= `end_time`. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval.
[ "Delete", "events", "for", "stream", "between", "start_id", "and", "end_time", ".", "stream", ":", "The", "stream", "to", "delete", "events", "for", ".", "start_id", ":", "Delete", "events", "with", "id", ">", "start_id", ".", "end_time", ":", "Delete", "e...
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/cassandra/client.py#L92-L105
Locu/chronology
kronos/kronos/storage/cassandra/client.py
CassandraStorage._retrieve
def _retrieve(self, namespace, stream, start_id, end_time, order, limit, configuration): """ Retrieve events for `stream` between `start_id` and `end_time`. `stream` : The stream to return events for. `start_id` : Return events with id > `start_id`. `end_time` : Return events ending <= `end_time`. `order` : Whether to return the results in ResultOrder.ASCENDING or ResultOrder.DESCENDING time-order. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval. """ stream = self.get_stream(namespace, stream, configuration) events = stream.iterator(start_id, uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST), order == ResultOrder.DESCENDING, limit) events = events.__iter__() event = events.next() # If first event's ID is equal to `start_id`, skip it. if event.id != start_id: yield event.json while True: yield events.next().json
python
def _retrieve(self, namespace, stream, start_id, end_time, order, limit, configuration): """ Retrieve events for `stream` between `start_id` and `end_time`. `stream` : The stream to return events for. `start_id` : Return events with id > `start_id`. `end_time` : Return events ending <= `end_time`. `order` : Whether to return the results in ResultOrder.ASCENDING or ResultOrder.DESCENDING time-order. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval. """ stream = self.get_stream(namespace, stream, configuration) events = stream.iterator(start_id, uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST), order == ResultOrder.DESCENDING, limit) events = events.__iter__() event = events.next() # If first event's ID is equal to `start_id`, skip it. if event.id != start_id: yield event.json while True: yield events.next().json
[ "def", "_retrieve", "(", "self", ",", "namespace", ",", "stream", ",", "start_id", ",", "end_time", ",", "order", ",", "limit", ",", "configuration", ")", ":", "stream", "=", "self", ".", "get_stream", "(", "namespace", ",", "stream", ",", "configuration",...
Retrieve events for `stream` between `start_id` and `end_time`. `stream` : The stream to return events for. `start_id` : Return events with id > `start_id`. `end_time` : Return events ending <= `end_time`. `order` : Whether to return the results in ResultOrder.ASCENDING or ResultOrder.DESCENDING time-order. `configuration` : A dictionary of settings to override any default settings, such as number of shards or width of a time interval.
[ "Retrieve", "events", "for", "stream", "between", "start_id", "and", "end_time", ".", "stream", ":", "The", "stream", "to", "return", "events", "for", ".", "start_id", ":", "Return", "events", "with", "id", ">", "start_id", ".", "end_time", ":", "Return", ...
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/cassandra/client.py#L107-L131
Locu/chronology
kronos/kronos/app.py
index
def index(environment, start_response, headers): """ Return the status of this Kronos instance + its backends> Doesn't expect any URL parameters. """ response = {'service': 'kronosd', 'version': kronos.__version__, 'id': settings.node['id'], 'storage': {}, SUCCESS_FIELD: True} # Check if each backend is alive for name, backend in router.get_backends(): response['storage'][name] = {'alive': backend.is_alive(), 'backend': settings.storage[name]['backend']} start_response('200 OK', headers) return response
python
def index(environment, start_response, headers): """ Return the status of this Kronos instance + its backends> Doesn't expect any URL parameters. """ response = {'service': 'kronosd', 'version': kronos.__version__, 'id': settings.node['id'], 'storage': {}, SUCCESS_FIELD: True} # Check if each backend is alive for name, backend in router.get_backends(): response['storage'][name] = {'alive': backend.is_alive(), 'backend': settings.storage[name]['backend']} start_response('200 OK', headers) return response
[ "def", "index", "(", "environment", ",", "start_response", ",", "headers", ")", ":", "response", "=", "{", "'service'", ":", "'kronosd'", ",", "'version'", ":", "kronos", ".", "__version__", ",", "'id'", ":", "settings", ".", "node", "[", "'id'", "]", ",...
Return the status of this Kronos instance + its backends> Doesn't expect any URL parameters.
[ "Return", "the", "status", "of", "this", "Kronos", "instance", "+", "its", "backends", ">", "Doesn", "t", "expect", "any", "URL", "parameters", "." ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/app.py#L52-L69
Locu/chronology
kronos/kronos/app.py
put_events
def put_events(environment, start_response, headers): """ Store events in backends POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), events: { stream_name1 : [event1, event2, ...], stream_name2 : [event1, event2, ...], ... } } Where each event is a dictionary of keys and values. """ errors = [] events_to_insert = defaultdict(list) request_json = environment['json'] namespace = request_json.get('namespace', settings.default_namespace) # Validate streams and events for stream, events in request_json.get('events', {}).iteritems(): try: validate_stream(stream) except Exception, e: log.exception('put_events: stream validation failed for `%s`', stream) errors.append(repr(e)) continue for event in events: try: events_to_insert[stream].append(validate_event_and_assign_id(event)) except Exception, e: log.exception('put_events: event validation failed for `%s`', event) errors.append(repr(e)) results = {} for stream, events in events_to_insert.iteritems(): backends = router.backends_to_mutate(namespace, stream) for backend, configuration in backends.iteritems(): results[(stream, backend.name)] = execute_greenlet_async( backend.insert, namespace, stream, events, configuration) wait(results.values()) # Did any insertion fail? success = True response = defaultdict(dict) for (stream, backend), result in results.iteritems(): try: result.get() response[stream][backend] = { 'num_inserted': len(events_to_insert[stream]) } except Exception, e: log.exception('put_events: insertion to backend `%s` failed.', backend) success = False response[stream][backend] = {'num_inserted': -1, ERRORS_FIELD: [repr(e)]} response[SUCCESS_FIELD] = success and not errors if errors: response[ERRORS_FIELD] = errors start_response('200 OK', headers) return response
python
def put_events(environment, start_response, headers): """ Store events in backends POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), events: { stream_name1 : [event1, event2, ...], stream_name2 : [event1, event2, ...], ... } } Where each event is a dictionary of keys and values. """ errors = [] events_to_insert = defaultdict(list) request_json = environment['json'] namespace = request_json.get('namespace', settings.default_namespace) # Validate streams and events for stream, events in request_json.get('events', {}).iteritems(): try: validate_stream(stream) except Exception, e: log.exception('put_events: stream validation failed for `%s`', stream) errors.append(repr(e)) continue for event in events: try: events_to_insert[stream].append(validate_event_and_assign_id(event)) except Exception, e: log.exception('put_events: event validation failed for `%s`', event) errors.append(repr(e)) results = {} for stream, events in events_to_insert.iteritems(): backends = router.backends_to_mutate(namespace, stream) for backend, configuration in backends.iteritems(): results[(stream, backend.name)] = execute_greenlet_async( backend.insert, namespace, stream, events, configuration) wait(results.values()) # Did any insertion fail? success = True response = defaultdict(dict) for (stream, backend), result in results.iteritems(): try: result.get() response[stream][backend] = { 'num_inserted': len(events_to_insert[stream]) } except Exception, e: log.exception('put_events: insertion to backend `%s` failed.', backend) success = False response[stream][backend] = {'num_inserted': -1, ERRORS_FIELD: [repr(e)]} response[SUCCESS_FIELD] = success and not errors if errors: response[ERRORS_FIELD] = errors start_response('200 OK', headers) return response
[ "def", "put_events", "(", "environment", ",", "start_response", ",", "headers", ")", ":", "errors", "=", "[", "]", "events_to_insert", "=", "defaultdict", "(", "list", ")", "request_json", "=", "environment", "[", "'json'", "]", "namespace", "=", "request_json...
Store events in backends POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), events: { stream_name1 : [event1, event2, ...], stream_name2 : [event1, event2, ...], ... } } Where each event is a dictionary of keys and values.
[ "Store", "events", "in", "backends", "POST", "body", "should", "contain", "a", "JSON", "encoded", "version", "of", ":", "{", "namespace", ":", "namespace_name", "(", "optional", ")", "events", ":", "{", "stream_name1", ":", "[", "event1", "event2", "...", ...
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/app.py#L73-L133
Locu/chronology
kronos/kronos/app.py
get_events
def get_events(environment, start_response, headers): """ Retrieve events POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), stream : stream_name, start_time : starting_time_as_kronos_time, end_time : ending_time_as_kronos_time, start_id : only_return_events_with_id_greater_than_me, limit: optional_maximum_number_of_events, order: ResultOrder.ASCENDING or ResultOrder.DESCENDING (default ResultOrder.ASCENDING) } Either start_time or start_id should be specified. If a retrieval breaks while returning results, you can send another retrieval request and specify start_id as the last id that you saw. Kronos will only return events that occurred after the event with that id. """ request_json = environment['json'] try: stream = request_json['stream'] validate_stream(stream) except Exception, e: log.exception('get_events: stream validation failed for `%s`', request_json.get('stream')) start_response('400 Bad Request', headers) yield marshal.dumps({ERRORS_FIELD: [repr(e)], SUCCESS_FIELD: False}) return namespace = request_json.get('namespace', settings.default_namespace) limit = int(request_json.get('limit', MAX_LIMIT)) if limit <= 0: events = [] else: backend, configuration = router.backend_to_retrieve(namespace, stream) events = backend.retrieve( namespace, stream, long(request_json.get('start_time', 0)), long(request_json['end_time']), request_json.get('start_id'), configuration, order=request_json.get('order', ResultOrder.ASCENDING), limit=limit) start_response('200 OK', headers) string_buffer = StringIO() for event in events: # TODO(usmanm): Once all backends start respecting limit, remove this check. if limit <= 0: break if string_buffer.tell() >= settings.node.flush_size: yield string_buffer.getvalue() string_buffer.close() string_buffer = StringIO() string_buffer.write(event) string_buffer.write('\r\n') limit -= 1 if string_buffer.tell(): yield string_buffer.getvalue() string_buffer.close() yield ''
python
def get_events(environment, start_response, headers): """ Retrieve events POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), stream : stream_name, start_time : starting_time_as_kronos_time, end_time : ending_time_as_kronos_time, start_id : only_return_events_with_id_greater_than_me, limit: optional_maximum_number_of_events, order: ResultOrder.ASCENDING or ResultOrder.DESCENDING (default ResultOrder.ASCENDING) } Either start_time or start_id should be specified. If a retrieval breaks while returning results, you can send another retrieval request and specify start_id as the last id that you saw. Kronos will only return events that occurred after the event with that id. """ request_json = environment['json'] try: stream = request_json['stream'] validate_stream(stream) except Exception, e: log.exception('get_events: stream validation failed for `%s`', request_json.get('stream')) start_response('400 Bad Request', headers) yield marshal.dumps({ERRORS_FIELD: [repr(e)], SUCCESS_FIELD: False}) return namespace = request_json.get('namespace', settings.default_namespace) limit = int(request_json.get('limit', MAX_LIMIT)) if limit <= 0: events = [] else: backend, configuration = router.backend_to_retrieve(namespace, stream) events = backend.retrieve( namespace, stream, long(request_json.get('start_time', 0)), long(request_json['end_time']), request_json.get('start_id'), configuration, order=request_json.get('order', ResultOrder.ASCENDING), limit=limit) start_response('200 OK', headers) string_buffer = StringIO() for event in events: # TODO(usmanm): Once all backends start respecting limit, remove this check. if limit <= 0: break if string_buffer.tell() >= settings.node.flush_size: yield string_buffer.getvalue() string_buffer.close() string_buffer = StringIO() string_buffer.write(event) string_buffer.write('\r\n') limit -= 1 if string_buffer.tell(): yield string_buffer.getvalue() string_buffer.close() yield ''
[ "def", "get_events", "(", "environment", ",", "start_response", ",", "headers", ")", ":", "request_json", "=", "environment", "[", "'json'", "]", "try", ":", "stream", "=", "request_json", "[", "'stream'", "]", "validate_stream", "(", "stream", ")", "except", ...
Retrieve events POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), stream : stream_name, start_time : starting_time_as_kronos_time, end_time : ending_time_as_kronos_time, start_id : only_return_events_with_id_greater_than_me, limit: optional_maximum_number_of_events, order: ResultOrder.ASCENDING or ResultOrder.DESCENDING (default ResultOrder.ASCENDING) } Either start_time or start_id should be specified. If a retrieval breaks while returning results, you can send another retrieval request and specify start_id as the last id that you saw. Kronos will only return events that occurred after the event with that id.
[ "Retrieve", "events", "POST", "body", "should", "contain", "a", "JSON", "encoded", "version", "of", ":", "{", "namespace", ":", "namespace_name", "(", "optional", ")", "stream", ":", "stream_name", "start_time", ":", "starting_time_as_kronos_time", "end_time", ":"...
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/app.py#L138-L201
Locu/chronology
kronos/kronos/app.py
delete_events
def delete_events(environment, start_response, headers): """ Delete events POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), stream : stream_name, start_time : starting_time_as_kronos_time, end_time : ending_time_as_kronos_time, start_id : only_delete_events_with_id_gte_me, } Either start_time or start_id should be specified. """ request_json = environment['json'] try: stream = request_json['stream'] validate_stream(stream) except Exception, e: log.exception('delete_events: stream validation failed for `%s`.', request_json.get('stream')) start_response('400 Bad Request', headers) return {ERRORS_FIELD: [repr(e)]} namespace = request_json.get('namespace', settings.default_namespace) backends = router.backends_to_mutate(namespace, stream) statuses = {} for backend, conf in backends.iteritems(): statuses[backend.name] = execute_greenlet_async( backend.delete, namespace, stream, long(request_json.get('start_time', 0)), long(request_json['end_time']), request_json.get('start_id'), conf) wait(statuses.values()) success = True response = {} for backend, status in statuses.iteritems(): try: num_deleted, errors = status.get() response[backend] = {'num_deleted': num_deleted} if errors: success = False response[ERRORS_FIELD] = errors except Exception, e: log.exception('delete_events: delete from backend `%s` failed.', backend) success = False response[backend] = {'num_deleted': -1, ERRORS_FIELD: [repr(e)]} response = {request_json['stream']: response, SUCCESS_FIELD: success} start_response('200 OK', headers) return response
python
def delete_events(environment, start_response, headers): """ Delete events POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), stream : stream_name, start_time : starting_time_as_kronos_time, end_time : ending_time_as_kronos_time, start_id : only_delete_events_with_id_gte_me, } Either start_time or start_id should be specified. """ request_json = environment['json'] try: stream = request_json['stream'] validate_stream(stream) except Exception, e: log.exception('delete_events: stream validation failed for `%s`.', request_json.get('stream')) start_response('400 Bad Request', headers) return {ERRORS_FIELD: [repr(e)]} namespace = request_json.get('namespace', settings.default_namespace) backends = router.backends_to_mutate(namespace, stream) statuses = {} for backend, conf in backends.iteritems(): statuses[backend.name] = execute_greenlet_async( backend.delete, namespace, stream, long(request_json.get('start_time', 0)), long(request_json['end_time']), request_json.get('start_id'), conf) wait(statuses.values()) success = True response = {} for backend, status in statuses.iteritems(): try: num_deleted, errors = status.get() response[backend] = {'num_deleted': num_deleted} if errors: success = False response[ERRORS_FIELD] = errors except Exception, e: log.exception('delete_events: delete from backend `%s` failed.', backend) success = False response[backend] = {'num_deleted': -1, ERRORS_FIELD: [repr(e)]} response = {request_json['stream']: response, SUCCESS_FIELD: success} start_response('200 OK', headers) return response
[ "def", "delete_events", "(", "environment", ",", "start_response", ",", "headers", ")", ":", "request_json", "=", "environment", "[", "'json'", "]", "try", ":", "stream", "=", "request_json", "[", "'stream'", "]", "validate_stream", "(", "stream", ")", "except...
Delete events POST body should contain a JSON encoded version of: { namespace: namespace_name (optional), stream : stream_name, start_time : starting_time_as_kronos_time, end_time : ending_time_as_kronos_time, start_id : only_delete_events_with_id_gte_me, } Either start_time or start_id should be specified.
[ "Delete", "events", "POST", "body", "should", "contain", "a", "JSON", "encoded", "version", "of", ":", "{", "namespace", ":", "namespace_name", "(", "optional", ")", "stream", ":", "stream_name", "start_time", ":", "starting_time_as_kronos_time", "end_time", ":", ...
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/app.py#L205-L260
Locu/chronology
kronos/kronos/app.py
get_streams
def get_streams(environment, start_response, headers): """ List all streams that can be read from Kronos right now. POST body should contain a JSON encoded version of: { namespace: namespace_name (optional) } """ start_response('200 OK', headers) streams_seen_so_far = set() namespace = environment['json'].get('namespace', settings.default_namespace) for prefix, backend in router.get_read_backends(namespace): for stream in backend.streams(namespace): if stream.startswith(prefix) and stream not in streams_seen_so_far: streams_seen_so_far.add(stream) yield '{0}\r\n'.format(stream) yield ''
python
def get_streams(environment, start_response, headers): """ List all streams that can be read from Kronos right now. POST body should contain a JSON encoded version of: { namespace: namespace_name (optional) } """ start_response('200 OK', headers) streams_seen_so_far = set() namespace = environment['json'].get('namespace', settings.default_namespace) for prefix, backend in router.get_read_backends(namespace): for stream in backend.streams(namespace): if stream.startswith(prefix) and stream not in streams_seen_so_far: streams_seen_so_far.add(stream) yield '{0}\r\n'.format(stream) yield ''
[ "def", "get_streams", "(", "environment", ",", "start_response", ",", "headers", ")", ":", "start_response", "(", "'200 OK'", ",", "headers", ")", "streams_seen_so_far", "=", "set", "(", ")", "namespace", "=", "environment", "[", "'json'", "]", ".", "get", "...
List all streams that can be read from Kronos right now. POST body should contain a JSON encoded version of: { namespace: namespace_name (optional) }
[ "List", "all", "streams", "that", "can", "be", "read", "from", "Kronos", "right", "now", ".", "POST", "body", "should", "contain", "a", "JSON", "encoded", "version", "of", ":", "{", "namespace", ":", "namespace_name", "(", "optional", ")", "}" ]
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/app.py#L264-L279
Locu/chronology
kronos/kronos/app.py
infer_schema
def infer_schema(environment, start_response, headers): """ Return the inferred schema of the requested stream. POST body should contain a JSON encoded version of: { stream: stream_name, namespace: namespace_name (optional) } """ stream = environment['json']['stream'] namespace = environment['json'].get('namespace') or settings.default_namespace start_response('200 OK', headers) schema = _infer_schema(namespace, stream) response = { 'stream': stream, 'namespace': namespace, 'schema': schema, SUCCESS_FIELD: True } return response
python
def infer_schema(environment, start_response, headers): """ Return the inferred schema of the requested stream. POST body should contain a JSON encoded version of: { stream: stream_name, namespace: namespace_name (optional) } """ stream = environment['json']['stream'] namespace = environment['json'].get('namespace') or settings.default_namespace start_response('200 OK', headers) schema = _infer_schema(namespace, stream) response = { 'stream': stream, 'namespace': namespace, 'schema': schema, SUCCESS_FIELD: True } return response
[ "def", "infer_schema", "(", "environment", ",", "start_response", ",", "headers", ")", ":", "stream", "=", "environment", "[", "'json'", "]", "[", "'stream'", "]", "namespace", "=", "environment", "[", "'json'", "]", ".", "get", "(", "'namespace'", ")", "o...
Return the inferred schema of the requested stream. POST body should contain a JSON encoded version of: { stream: stream_name, namespace: namespace_name (optional) }
[ "Return", "the", "inferred", "schema", "of", "the", "requested", "stream", ".", "POST", "body", "should", "contain", "a", "JSON", "encoded", "version", "of", ":", "{", "stream", ":", "stream_name", "namespace", ":", "namespace_name", "(", "optional", ")", "}...
train
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/app.py#L283-L302
noahgoldman/adbpy
adbpy/adb.py
Adb.devices
def devices(self): """ Return a list of connected devices in the form (*serial*, *status*) where status can be any of the following: 1. device 2. offline 3. unauthorized :returns: A list of tuples representing connected devices """ devices = None with self.socket.Connect(): devices = self._command("host:devices") return parse_device_list(devices)
python
def devices(self): """ Return a list of connected devices in the form (*serial*, *status*) where status can be any of the following: 1. device 2. offline 3. unauthorized :returns: A list of tuples representing connected devices """ devices = None with self.socket.Connect(): devices = self._command("host:devices") return parse_device_list(devices)
[ "def", "devices", "(", "self", ")", ":", "devices", "=", "None", "with", "self", ".", "socket", ".", "Connect", "(", ")", ":", "devices", "=", "self", ".", "_command", "(", "\"host:devices\"", ")", "return", "parse_device_list", "(", "devices", ")" ]
Return a list of connected devices in the form (*serial*, *status*) where status can be any of the following: 1. device 2. offline 3. unauthorized :returns: A list of tuples representing connected devices
[ "Return", "a", "list", "of", "connected", "devices", "in", "the", "form", "(", "*", "serial", "*", "*", "status", "*", ")", "where", "status", "can", "be", "any", "of", "the", "following", ":" ]
train
https://github.com/noahgoldman/adbpy/blob/ecbff8a8f151852b5c36847dc812582a8674a503/adbpy/adb.py#L59-L74
StorjOld/heartbeat
heartbeat/Merkle/MerkleTree.py
MerkleTree.add_leaf
def add_leaf(self, leaf_blob): """Adds a leaf to the list of leaves. Does not build the tree so call `build()` to construct the rest of the tree from the added leaves. :param leaf_blob: the leaf payload to add. should be a hashable object """ self.leaves.append(MerkleLeaf(len(self.leaves), leaf_blob))
python
def add_leaf(self, leaf_blob): """Adds a leaf to the list of leaves. Does not build the tree so call `build()` to construct the rest of the tree from the added leaves. :param leaf_blob: the leaf payload to add. should be a hashable object """ self.leaves.append(MerkleLeaf(len(self.leaves), leaf_blob))
[ "def", "add_leaf", "(", "self", ",", "leaf_blob", ")", ":", "self", ".", "leaves", ".", "append", "(", "MerkleLeaf", "(", "len", "(", "self", ".", "leaves", ")", ",", "leaf_blob", ")", ")" ]
Adds a leaf to the list of leaves. Does not build the tree so call `build()` to construct the rest of the tree from the added leaves. :param leaf_blob: the leaf payload to add. should be a hashable object
[ "Adds", "a", "leaf", "to", "the", "list", "of", "leaves", ".", "Does", "not", "build", "the", "tree", "so", "call", "build", "()", "to", "construct", "the", "rest", "of", "the", "tree", "from", "the", "added", "leaves", "." ]
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/Merkle/MerkleTree.py#L137-L143
StorjOld/heartbeat
heartbeat/Merkle/MerkleTree.py
MerkleTree.build
def build(self): """Builds the tree from the leaves that have been added. This function populates the tree from the leaves down non-recursively """ self.order = MerkleTree.get_order(len(self.leaves)) n = 2 ** self.order self.nodes = [b''] * 2 * n # populate lowest nodes with leaf hashes for j in range(0, n): if (j < len(self.leaves)): self.nodes[j + n - 1] = self.leaves[j].get_hash() else: break # now populate the entire tree for i in range(1, self.order + 1): p = 2 ** (self.order - i) for j in range(0, p): k = p + j - 1 h = hashlib.sha256() l = self.nodes[MerkleTree.get_left_child(k)] if (len(l) > 0): h.update(l) r = self.nodes[MerkleTree.get_right_child(k)] if (len(r) > 0): h.update(r) self.nodes[k] = h.digest()
python
def build(self): """Builds the tree from the leaves that have been added. This function populates the tree from the leaves down non-recursively """ self.order = MerkleTree.get_order(len(self.leaves)) n = 2 ** self.order self.nodes = [b''] * 2 * n # populate lowest nodes with leaf hashes for j in range(0, n): if (j < len(self.leaves)): self.nodes[j + n - 1] = self.leaves[j].get_hash() else: break # now populate the entire tree for i in range(1, self.order + 1): p = 2 ** (self.order - i) for j in range(0, p): k = p + j - 1 h = hashlib.sha256() l = self.nodes[MerkleTree.get_left_child(k)] if (len(l) > 0): h.update(l) r = self.nodes[MerkleTree.get_right_child(k)] if (len(r) > 0): h.update(r) self.nodes[k] = h.digest()
[ "def", "build", "(", "self", ")", ":", "self", ".", "order", "=", "MerkleTree", ".", "get_order", "(", "len", "(", "self", ".", "leaves", ")", ")", "n", "=", "2", "**", "self", ".", "order", "self", ".", "nodes", "=", "[", "b''", "]", "*", "2",...
Builds the tree from the leaves that have been added. This function populates the tree from the leaves down non-recursively
[ "Builds", "the", "tree", "from", "the", "leaves", "that", "have", "been", "added", "." ]
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/Merkle/MerkleTree.py#L145-L173
StorjOld/heartbeat
heartbeat/Merkle/MerkleTree.py
MerkleTree.get_branch
def get_branch(self, i): """Gets a branch associated with leaf i. This will trace the tree from the leaves down to the root, constructing a list of tuples that represent the pairs of nodes all the way from leaf i to the root. :param i: the leaf identifying the branch to retrieve """ branch = MerkleBranch(self.order) j = i + 2 ** self.order - 1 for k in range(0, self.order): if (self.is_left(j)): branch.set_row(k, (self.nodes[j], self.nodes[j + 1])) else: branch.set_row(k, (self.nodes[j - 1], self.nodes[j])) j = MerkleTree.get_parent(j) return branch
python
def get_branch(self, i): """Gets a branch associated with leaf i. This will trace the tree from the leaves down to the root, constructing a list of tuples that represent the pairs of nodes all the way from leaf i to the root. :param i: the leaf identifying the branch to retrieve """ branch = MerkleBranch(self.order) j = i + 2 ** self.order - 1 for k in range(0, self.order): if (self.is_left(j)): branch.set_row(k, (self.nodes[j], self.nodes[j + 1])) else: branch.set_row(k, (self.nodes[j - 1], self.nodes[j])) j = MerkleTree.get_parent(j) return branch
[ "def", "get_branch", "(", "self", ",", "i", ")", ":", "branch", "=", "MerkleBranch", "(", "self", ".", "order", ")", "j", "=", "i", "+", "2", "**", "self", ".", "order", "-", "1", "for", "k", "in", "range", "(", "0", ",", "self", ".", "order", ...
Gets a branch associated with leaf i. This will trace the tree from the leaves down to the root, constructing a list of tuples that represent the pairs of nodes all the way from leaf i to the root. :param i: the leaf identifying the branch to retrieve
[ "Gets", "a", "branch", "associated", "with", "leaf", "i", ".", "This", "will", "trace", "the", "tree", "from", "the", "leaves", "down", "to", "the", "root", "constructing", "a", "list", "of", "tuples", "that", "represent", "the", "pairs", "of", "nodes", ...
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/Merkle/MerkleTree.py#L175-L192
StorjOld/heartbeat
heartbeat/Merkle/MerkleTree.py
MerkleTree.verify_branch
def verify_branch(leaf, branch, root): """This will verify that the given branch fits the given leaf and root It calculates the hash of the leaf, and then verifies that one of the bottom level nodes in the branch matches the leaf hash. Then it calculates the hash of the two nodes on the next level and checks that one of the nodes on the level above matches. It continues this until it reaches the top level of the tree where it asserts that the root is equal to the hash of the nodes below :param leaf: the leaf to check :param branch: a list of tuples (pairs) of the nodes in the branch, ordered from leaf to root. :param root: the root node """ # just check the hashes are correct try: lh = leaf.get_hash() except: return False for i in range(0, branch.get_order()): if (branch.get_left(i) != lh and branch.get_right(i) != lh): return False h = hashlib.sha256() if (len(branch.get_left(i)) > 0): h.update(branch.get_left(i)) if (len(branch.get_right(i)) > 0): h.update(branch.get_right(i)) lh = h.digest() if (root != lh): return False return True
python
def verify_branch(leaf, branch, root): """This will verify that the given branch fits the given leaf and root It calculates the hash of the leaf, and then verifies that one of the bottom level nodes in the branch matches the leaf hash. Then it calculates the hash of the two nodes on the next level and checks that one of the nodes on the level above matches. It continues this until it reaches the top level of the tree where it asserts that the root is equal to the hash of the nodes below :param leaf: the leaf to check :param branch: a list of tuples (pairs) of the nodes in the branch, ordered from leaf to root. :param root: the root node """ # just check the hashes are correct try: lh = leaf.get_hash() except: return False for i in range(0, branch.get_order()): if (branch.get_left(i) != lh and branch.get_right(i) != lh): return False h = hashlib.sha256() if (len(branch.get_left(i)) > 0): h.update(branch.get_left(i)) if (len(branch.get_right(i)) > 0): h.update(branch.get_right(i)) lh = h.digest() if (root != lh): return False return True
[ "def", "verify_branch", "(", "leaf", ",", "branch", ",", "root", ")", ":", "# just check the hashes are correct", "try", ":", "lh", "=", "leaf", ".", "get_hash", "(", ")", "except", ":", "return", "False", "for", "i", "in", "range", "(", "0", ",", "branc...
This will verify that the given branch fits the given leaf and root It calculates the hash of the leaf, and then verifies that one of the bottom level nodes in the branch matches the leaf hash. Then it calculates the hash of the two nodes on the next level and checks that one of the nodes on the level above matches. It continues this until it reaches the top level of the tree where it asserts that the root is equal to the hash of the nodes below :param leaf: the leaf to check :param branch: a list of tuples (pairs) of the nodes in the branch, ordered from leaf to root. :param root: the root node
[ "This", "will", "verify", "that", "the", "given", "branch", "fits", "the", "given", "leaf", "and", "root", "It", "calculates", "the", "hash", "of", "the", "leaf", "and", "then", "verifies", "that", "one", "of", "the", "bottom", "level", "nodes", "in", "t...
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/Merkle/MerkleTree.py#L255-L285