repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pytest-dev/pluggy | pluggy/manager.py | PluginManager.set_blocked | def set_blocked(self, name):
""" block registrations of the given name, unregister if already registered. """
self.unregister(name=name)
self._name2plugin[name] = None | python | def set_blocked(self, name):
""" block registrations of the given name, unregister if already registered. """
self.unregister(name=name)
self._name2plugin[name] = None | [
"def",
"set_blocked",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"unregister",
"(",
"name",
"=",
"name",
")",
"self",
".",
"_name2plugin",
"[",
"name",
"]",
"=",
"None"
] | block registrations of the given name, unregister if already registered. | [
"block",
"registrations",
"of",
"the",
"given",
"name",
"unregister",
"if",
"already",
"registered",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L149-L152 | train | 207,900 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.add_hookspecs | def add_hookspecs(self, module_or_class):
""" add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. """
names = []
for name in dir(module_or_class):
spec_opts = self.parse_hookspec_opts(module_or_class, name)
if spec_opts is not None:
hc = getattr(self.hook, name, None)
if hc is None:
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
setattr(self.hook, name, hc)
else:
# plugins registered this hook without knowing the spec
hc.set_specification(module_or_class, spec_opts)
for hookfunction in hc.get_hookimpls():
self._verify_hook(hc, hookfunction)
names.append(name)
if not names:
raise ValueError(
"did not find any %r hooks in %r" % (self.project_name, module_or_class)
) | python | def add_hookspecs(self, module_or_class):
""" add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. """
names = []
for name in dir(module_or_class):
spec_opts = self.parse_hookspec_opts(module_or_class, name)
if spec_opts is not None:
hc = getattr(self.hook, name, None)
if hc is None:
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
setattr(self.hook, name, hc)
else:
# plugins registered this hook without knowing the spec
hc.set_specification(module_or_class, spec_opts)
for hookfunction in hc.get_hookimpls():
self._verify_hook(hc, hookfunction)
names.append(name)
if not names:
raise ValueError(
"did not find any %r hooks in %r" % (self.project_name, module_or_class)
) | [
"def",
"add_hookspecs",
"(",
"self",
",",
"module_or_class",
")",
":",
"names",
"=",
"[",
"]",
"for",
"name",
"in",
"dir",
"(",
"module_or_class",
")",
":",
"spec_opts",
"=",
"self",
".",
"parse_hookspec_opts",
"(",
"module_or_class",
",",
"name",
")",
"if... | add new hook specifications defined in the given module_or_class.
Functions are recognized if they have been decorated accordingly. | [
"add",
"new",
"hook",
"specifications",
"defined",
"in",
"the",
"given",
"module_or_class",
".",
"Functions",
"are",
"recognized",
"if",
"they",
"have",
"been",
"decorated",
"accordingly",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L158-L179 | train | 207,901 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.get_name | def get_name(self, plugin):
""" Return name for registered plugin or None if not registered. """
for name, val in self._name2plugin.items():
if plugin == val:
return name | python | def get_name(self, plugin):
""" Return name for registered plugin or None if not registered. """
for name, val in self._name2plugin.items():
if plugin == val:
return name | [
"def",
"get_name",
"(",
"self",
",",
"plugin",
")",
":",
"for",
"name",
",",
"val",
"in",
"self",
".",
"_name2plugin",
".",
"items",
"(",
")",
":",
"if",
"plugin",
"==",
"val",
":",
"return",
"name"
] | Return name for registered plugin or None if not registered. | [
"Return",
"name",
"for",
"registered",
"plugin",
"or",
"None",
"if",
"not",
"registered",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L208-L212 | train | 207,902 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.check_pending | def check_pending(self):
""" Verify that all hooks which have not been verified against
a hook specification are optional, otherwise raise PluginValidationError"""
for name in self.hook.__dict__:
if name[0] != "_":
hook = getattr(self.hook, name)
if not hook.has_spec():
for hookimpl in hook.get_hookimpls():
if not hookimpl.optionalhook:
raise PluginValidationError(
hookimpl.plugin,
"unknown hook %r in plugin %r"
% (name, hookimpl.plugin),
) | python | def check_pending(self):
""" Verify that all hooks which have not been verified against
a hook specification are optional, otherwise raise PluginValidationError"""
for name in self.hook.__dict__:
if name[0] != "_":
hook = getattr(self.hook, name)
if not hook.has_spec():
for hookimpl in hook.get_hookimpls():
if not hookimpl.optionalhook:
raise PluginValidationError(
hookimpl.plugin,
"unknown hook %r in plugin %r"
% (name, hookimpl.plugin),
) | [
"def",
"check_pending",
"(",
"self",
")",
":",
"for",
"name",
"in",
"self",
".",
"hook",
".",
"__dict__",
":",
"if",
"name",
"[",
"0",
"]",
"!=",
"\"_\"",
":",
"hook",
"=",
"getattr",
"(",
"self",
".",
"hook",
",",
"name",
")",
"if",
"not",
"hook... | Verify that all hooks which have not been verified against
a hook specification are optional, otherwise raise PluginValidationError | [
"Verify",
"that",
"all",
"hooks",
"which",
"have",
"not",
"been",
"verified",
"against",
"a",
"hook",
"specification",
"are",
"optional",
"otherwise",
"raise",
"PluginValidationError"
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L239-L252 | train | 207,903 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.load_setuptools_entrypoints | def load_setuptools_entrypoints(self, group, name=None):
""" Load modules from querying the specified setuptools ``group``.
:param str group: entry point group to load plugins
:param str name: if given, loads only plugins with the given ``name``.
:rtype: int
:return: return the number of loaded plugins by this call.
"""
from pkg_resources import (
iter_entry_points,
DistributionNotFound,
VersionConflict,
)
count = 0
for ep in iter_entry_points(group, name=name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
plugin=None,
message="Plugin %r could not be loaded: %s!" % (ep.name, e),
)
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
count += 1
return count | python | def load_setuptools_entrypoints(self, group, name=None):
""" Load modules from querying the specified setuptools ``group``.
:param str group: entry point group to load plugins
:param str name: if given, loads only plugins with the given ``name``.
:rtype: int
:return: return the number of loaded plugins by this call.
"""
from pkg_resources import (
iter_entry_points,
DistributionNotFound,
VersionConflict,
)
count = 0
for ep in iter_entry_points(group, name=name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
plugin=None,
message="Plugin %r could not be loaded: %s!" % (ep.name, e),
)
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
count += 1
return count | [
"def",
"load_setuptools_entrypoints",
"(",
"self",
",",
"group",
",",
"name",
"=",
"None",
")",
":",
"from",
"pkg_resources",
"import",
"(",
"iter_entry_points",
",",
"DistributionNotFound",
",",
"VersionConflict",
",",
")",
"count",
"=",
"0",
"for",
"ep",
"in... | Load modules from querying the specified setuptools ``group``.
:param str group: entry point group to load plugins
:param str name: if given, loads only plugins with the given ``name``.
:rtype: int
:return: return the number of loaded plugins by this call. | [
"Load",
"modules",
"from",
"querying",
"the",
"specified",
"setuptools",
"group",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L254-L285 | train | 207,904 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.enable_tracing | def enable_tracing(self):
""" enable tracing of hook calls and return an undo function. """
hooktrace = self.hook._trace
def before(hook_name, methods, kwargs):
hooktrace.root.indent += 1
hooktrace(hook_name, kwargs)
def after(outcome, hook_name, methods, kwargs):
if outcome.excinfo is None:
hooktrace("finish", hook_name, "-->", outcome.get_result())
hooktrace.root.indent -= 1
return self.add_hookcall_monitoring(before, after) | python | def enable_tracing(self):
""" enable tracing of hook calls and return an undo function. """
hooktrace = self.hook._trace
def before(hook_name, methods, kwargs):
hooktrace.root.indent += 1
hooktrace(hook_name, kwargs)
def after(outcome, hook_name, methods, kwargs):
if outcome.excinfo is None:
hooktrace("finish", hook_name, "-->", outcome.get_result())
hooktrace.root.indent -= 1
return self.add_hookcall_monitoring(before, after) | [
"def",
"enable_tracing",
"(",
"self",
")",
":",
"hooktrace",
"=",
"self",
".",
"hook",
".",
"_trace",
"def",
"before",
"(",
"hook_name",
",",
"methods",
",",
"kwargs",
")",
":",
"hooktrace",
".",
"root",
".",
"indent",
"+=",
"1",
"hooktrace",
"(",
"hoo... | enable tracing of hook calls and return an undo function. | [
"enable",
"tracing",
"of",
"hook",
"calls",
"and",
"return",
"an",
"undo",
"function",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L315-L328 | train | 207,905 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.subset_hook_caller | def subset_hook_caller(self, name, remove_plugins):
""" Return a new _HookCaller instance for the named method
which manages calls to all registered plugins except the
ones from remove_plugins. """
orig = getattr(self.hook, name)
plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
if plugins_to_remove:
hc = _HookCaller(
orig.name, orig._hookexec, orig.spec.namespace, orig.spec.opts
)
for hookimpl in orig.get_hookimpls():
plugin = hookimpl.plugin
if plugin not in plugins_to_remove:
hc._add_hookimpl(hookimpl)
# we also keep track of this hook caller so it
# gets properly removed on plugin unregistration
self._plugin2hookcallers.setdefault(plugin, []).append(hc)
return hc
return orig | python | def subset_hook_caller(self, name, remove_plugins):
""" Return a new _HookCaller instance for the named method
which manages calls to all registered plugins except the
ones from remove_plugins. """
orig = getattr(self.hook, name)
plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
if plugins_to_remove:
hc = _HookCaller(
orig.name, orig._hookexec, orig.spec.namespace, orig.spec.opts
)
for hookimpl in orig.get_hookimpls():
plugin = hookimpl.plugin
if plugin not in plugins_to_remove:
hc._add_hookimpl(hookimpl)
# we also keep track of this hook caller so it
# gets properly removed on plugin unregistration
self._plugin2hookcallers.setdefault(plugin, []).append(hc)
return hc
return orig | [
"def",
"subset_hook_caller",
"(",
"self",
",",
"name",
",",
"remove_plugins",
")",
":",
"orig",
"=",
"getattr",
"(",
"self",
".",
"hook",
",",
"name",
")",
"plugins_to_remove",
"=",
"[",
"plug",
"for",
"plug",
"in",
"remove_plugins",
"if",
"hasattr",
"(",
... | Return a new _HookCaller instance for the named method
which manages calls to all registered plugins except the
ones from remove_plugins. | [
"Return",
"a",
"new",
"_HookCaller",
"instance",
"for",
"the",
"named",
"method",
"which",
"manages",
"calls",
"to",
"all",
"registered",
"plugins",
"except",
"the",
"ones",
"from",
"remove_plugins",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L330-L348 | train | 207,906 |
pytest-dev/pluggy | pluggy/hooks.py | varnames | def varnames(func):
"""Return tuple of positional and keywrord argument names for a function,
method, class or callable.
In case of a class, its ``__init__`` method is considered.
For methods the ``self`` parameter is not included.
"""
cache = getattr(func, "__dict__", {})
try:
return cache["_varnames"]
except KeyError:
pass
if inspect.isclass(func):
try:
func = func.__init__
except AttributeError:
return (), ()
elif not inspect.isroutine(func): # callable object?
try:
func = getattr(func, "__call__", func)
except Exception:
return ()
try: # func MUST be a function or method here or we won't parse any args
spec = _getargspec(func)
except TypeError:
return (), ()
args, defaults = tuple(spec.args), spec.defaults
if defaults:
index = -len(defaults)
args, defaults = args[:index], tuple(args[index:])
else:
defaults = ()
# strip any implicit instance arg
# pypy3 uses "obj" instead of "self" for default dunder methods
implicit_names = ("self",) if not _PYPY3 else ("self", "obj")
if args:
if inspect.ismethod(func) or (
"." in getattr(func, "__qualname__", ()) and args[0] in implicit_names
):
args = args[1:]
try:
cache["_varnames"] = args, defaults
except TypeError:
pass
return args, defaults | python | def varnames(func):
"""Return tuple of positional and keywrord argument names for a function,
method, class or callable.
In case of a class, its ``__init__`` method is considered.
For methods the ``self`` parameter is not included.
"""
cache = getattr(func, "__dict__", {})
try:
return cache["_varnames"]
except KeyError:
pass
if inspect.isclass(func):
try:
func = func.__init__
except AttributeError:
return (), ()
elif not inspect.isroutine(func): # callable object?
try:
func = getattr(func, "__call__", func)
except Exception:
return ()
try: # func MUST be a function or method here or we won't parse any args
spec = _getargspec(func)
except TypeError:
return (), ()
args, defaults = tuple(spec.args), spec.defaults
if defaults:
index = -len(defaults)
args, defaults = args[:index], tuple(args[index:])
else:
defaults = ()
# strip any implicit instance arg
# pypy3 uses "obj" instead of "self" for default dunder methods
implicit_names = ("self",) if not _PYPY3 else ("self", "obj")
if args:
if inspect.ismethod(func) or (
"." in getattr(func, "__qualname__", ()) and args[0] in implicit_names
):
args = args[1:]
try:
cache["_varnames"] = args, defaults
except TypeError:
pass
return args, defaults | [
"def",
"varnames",
"(",
"func",
")",
":",
"cache",
"=",
"getattr",
"(",
"func",
",",
"\"__dict__\"",
",",
"{",
"}",
")",
"try",
":",
"return",
"cache",
"[",
"\"_varnames\"",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"inspect",
".",
"isclass",
"(",
... | Return tuple of positional and keywrord argument names for a function,
method, class or callable.
In case of a class, its ``__init__`` method is considered.
For methods the ``self`` parameter is not included. | [
"Return",
"tuple",
"of",
"positional",
"and",
"keywrord",
"argument",
"names",
"for",
"a",
"function",
"method",
"class",
"or",
"callable",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/hooks.py#L142-L191 | train | 207,907 |
pytest-dev/pluggy | pluggy/hooks.py | _HookCaller._add_hookimpl | def _add_hookimpl(self, hookimpl):
"""Add an implementation to the callback chain.
"""
if hookimpl.hookwrapper:
methods = self._wrappers
else:
methods = self._nonwrappers
if hookimpl.trylast:
methods.insert(0, hookimpl)
elif hookimpl.tryfirst:
methods.append(hookimpl)
else:
# find last non-tryfirst method
i = len(methods) - 1
while i >= 0 and methods[i].tryfirst:
i -= 1
methods.insert(i + 1, hookimpl)
if "__multicall__" in hookimpl.argnames:
warnings.warn(
"Support for __multicall__ is now deprecated and will be"
"removed in an upcoming release.",
DeprecationWarning,
)
self.multicall = _legacymulticall | python | def _add_hookimpl(self, hookimpl):
"""Add an implementation to the callback chain.
"""
if hookimpl.hookwrapper:
methods = self._wrappers
else:
methods = self._nonwrappers
if hookimpl.trylast:
methods.insert(0, hookimpl)
elif hookimpl.tryfirst:
methods.append(hookimpl)
else:
# find last non-tryfirst method
i = len(methods) - 1
while i >= 0 and methods[i].tryfirst:
i -= 1
methods.insert(i + 1, hookimpl)
if "__multicall__" in hookimpl.argnames:
warnings.warn(
"Support for __multicall__ is now deprecated and will be"
"removed in an upcoming release.",
DeprecationWarning,
)
self.multicall = _legacymulticall | [
"def",
"_add_hookimpl",
"(",
"self",
",",
"hookimpl",
")",
":",
"if",
"hookimpl",
".",
"hookwrapper",
":",
"methods",
"=",
"self",
".",
"_wrappers",
"else",
":",
"methods",
"=",
"self",
".",
"_nonwrappers",
"if",
"hookimpl",
".",
"trylast",
":",
"methods",... | Add an implementation to the callback chain. | [
"Add",
"an",
"implementation",
"to",
"the",
"callback",
"chain",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/hooks.py#L245-L270 | train | 207,908 |
pytest-dev/pluggy | pluggy/hooks.py | _HookCaller.call_historic | def call_historic(self, result_callback=None, kwargs=None, proc=None):
"""Call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards.
If ``result_callback`` is not ``None`` it will be called for for each
non-None result obtained from a hook implementation.
.. note::
The ``proc`` argument is now deprecated.
"""
if proc is not None:
warnings.warn(
"Support for `proc` argument is now deprecated and will be"
"removed in an upcoming release.",
DeprecationWarning,
)
result_callback = proc
self._call_history.append((kwargs or {}, result_callback))
# historizing hooks don't return results
res = self._hookexec(self, self.get_hookimpls(), kwargs)
if result_callback is None:
return
# XXX: remember firstresult isn't compat with historic
for x in res or []:
result_callback(x) | python | def call_historic(self, result_callback=None, kwargs=None, proc=None):
"""Call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards.
If ``result_callback`` is not ``None`` it will be called for for each
non-None result obtained from a hook implementation.
.. note::
The ``proc`` argument is now deprecated.
"""
if proc is not None:
warnings.warn(
"Support for `proc` argument is now deprecated and will be"
"removed in an upcoming release.",
DeprecationWarning,
)
result_callback = proc
self._call_history.append((kwargs or {}, result_callback))
# historizing hooks don't return results
res = self._hookexec(self, self.get_hookimpls(), kwargs)
if result_callback is None:
return
# XXX: remember firstresult isn't compat with historic
for x in res or []:
result_callback(x) | [
"def",
"call_historic",
"(",
"self",
",",
"result_callback",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"proc",
"=",
"None",
")",
":",
"if",
"proc",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"Support for `proc` argument is now deprecated and... | Call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards.
If ``result_callback`` is not ``None`` it will be called for for each
non-None result obtained from a hook implementation.
.. note::
The ``proc`` argument is now deprecated. | [
"Call",
"the",
"hook",
"with",
"given",
"kwargs",
"for",
"all",
"registered",
"plugins",
"and",
"for",
"all",
"plugins",
"which",
"will",
"be",
"registered",
"afterwards",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/hooks.py#L291-L316 | train | 207,909 |
pytest-dev/pluggy | pluggy/hooks.py | _HookCaller.call_extra | def call_extra(self, methods, kwargs):
""" Call the hook with some additional temporarily participating
methods using the specified kwargs as call parameters. """
old = list(self._nonwrappers), list(self._wrappers)
for method in methods:
opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
hookimpl = HookImpl(None, "<temp>", method, opts)
self._add_hookimpl(hookimpl)
try:
return self(**kwargs)
finally:
self._nonwrappers, self._wrappers = old | python | def call_extra(self, methods, kwargs):
""" Call the hook with some additional temporarily participating
methods using the specified kwargs as call parameters. """
old = list(self._nonwrappers), list(self._wrappers)
for method in methods:
opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
hookimpl = HookImpl(None, "<temp>", method, opts)
self._add_hookimpl(hookimpl)
try:
return self(**kwargs)
finally:
self._nonwrappers, self._wrappers = old | [
"def",
"call_extra",
"(",
"self",
",",
"methods",
",",
"kwargs",
")",
":",
"old",
"=",
"list",
"(",
"self",
".",
"_nonwrappers",
")",
",",
"list",
"(",
"self",
".",
"_wrappers",
")",
"for",
"method",
"in",
"methods",
":",
"opts",
"=",
"dict",
"(",
... | Call the hook with some additional temporarily participating
methods using the specified kwargs as call parameters. | [
"Call",
"the",
"hook",
"with",
"some",
"additional",
"temporarily",
"participating",
"methods",
"using",
"the",
"specified",
"kwargs",
"as",
"call",
"parameters",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/hooks.py#L318-L329 | train | 207,910 |
pytest-dev/pluggy | pluggy/hooks.py | _HookCaller._maybe_apply_history | def _maybe_apply_history(self, method):
"""Apply call history to a new hookimpl if it is marked as historic.
"""
if self.is_historic():
for kwargs, result_callback in self._call_history:
res = self._hookexec(self, [method], kwargs)
if res and result_callback is not None:
result_callback(res[0]) | python | def _maybe_apply_history(self, method):
"""Apply call history to a new hookimpl if it is marked as historic.
"""
if self.is_historic():
for kwargs, result_callback in self._call_history:
res = self._hookexec(self, [method], kwargs)
if res and result_callback is not None:
result_callback(res[0]) | [
"def",
"_maybe_apply_history",
"(",
"self",
",",
"method",
")",
":",
"if",
"self",
".",
"is_historic",
"(",
")",
":",
"for",
"kwargs",
",",
"result_callback",
"in",
"self",
".",
"_call_history",
":",
"res",
"=",
"self",
".",
"_hookexec",
"(",
"self",
","... | Apply call history to a new hookimpl if it is marked as historic. | [
"Apply",
"call",
"history",
"to",
"a",
"new",
"hookimpl",
"if",
"it",
"is",
"marked",
"as",
"historic",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/hooks.py#L331-L338 | train | 207,911 |
jruere/multiprocessing-logging | multiprocessing_logging.py | install_mp_handler | def install_mp_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcessingHandler(
'mp-handler-{0}'.format(i), sub_handler=orig_handler)
logger.removeHandler(orig_handler)
logger.addHandler(handler) | python | def install_mp_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcessingHandler(
'mp-handler-{0}'.format(i), sub_handler=orig_handler)
logger.removeHandler(orig_handler)
logger.addHandler(handler) | [
"def",
"install_mp_handler",
"(",
"logger",
"=",
"None",
")",
":",
"if",
"logger",
"is",
"None",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"for",
"i",
",",
"orig_handler",
"in",
"enumerate",
"(",
"list",
"(",
"logger",
".",
"handlers",
... | Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger. | [
"Wraps",
"the",
"handlers",
"in",
"the",
"given",
"Logger",
"with",
"an",
"MultiProcessingHandler",
"."
] | ac3d5eb2ed06263632d43f257d994b6b7bc241c0 | https://github.com/jruere/multiprocessing-logging/blob/ac3d5eb2ed06263632d43f257d994b6b7bc241c0/multiprocessing_logging.py#L21-L34 | train | 207,912 |
MrMinimal64/timezonefinder | example.py | get_offset | def get_offset(target):
"""
returns a location's time zone offset from UTC in minutes.
"""
from pytz import timezone
import pytz
from datetime import datetime
utc = pytz.utc
today = datetime.now()
tz_target = timezone(tf.certain_timezone_at(lat=target['lat'], lng=target['lng']))
# ATTENTION: tz_target could be None! handle error case
today_target = tz_target.localize(today)
today_utc = utc.localize(today)
return (today_utc - today_target).total_seconds() / 60 | python | def get_offset(target):
"""
returns a location's time zone offset from UTC in minutes.
"""
from pytz import timezone
import pytz
from datetime import datetime
utc = pytz.utc
today = datetime.now()
tz_target = timezone(tf.certain_timezone_at(lat=target['lat'], lng=target['lng']))
# ATTENTION: tz_target could be None! handle error case
today_target = tz_target.localize(today)
today_utc = utc.localize(today)
return (today_utc - today_target).total_seconds() / 60 | [
"def",
"get_offset",
"(",
"target",
")",
":",
"from",
"pytz",
"import",
"timezone",
"import",
"pytz",
"from",
"datetime",
"import",
"datetime",
"utc",
"=",
"pytz",
".",
"utc",
"today",
"=",
"datetime",
".",
"now",
"(",
")",
"tz_target",
"=",
"timezone",
... | returns a location's time zone offset from UTC in minutes. | [
"returns",
"a",
"location",
"s",
"time",
"zone",
"offset",
"from",
"UTC",
"in",
"minutes",
"."
] | 96cc43afb3bae57ffd002ab4cf104fe15eda2257 | https://github.com/MrMinimal64/timezonefinder/blob/96cc43afb3bae57ffd002ab4cf104fe15eda2257/example.py#L71-L86 | train | 207,913 |
Pr0Ger/PyAPNs2 | apns2/client.py | APNsClient.connect | def connect(self):
"""
Establish a connection to APNs. If already connected, the function does nothing. If the
connection fails, the function retries up to MAX_CONNECTION_RETRIES times.
"""
retries = 0
while retries < MAX_CONNECTION_RETRIES:
try:
self._connection.connect()
logger.info('Connected to APNs')
return
except Exception: # pylint: disable=broad-except
# close the connnection, otherwise next connect() call would do nothing
self._connection.close()
retries += 1
logger.exception('Failed connecting to APNs (attempt %s of %s)', retries, MAX_CONNECTION_RETRIES)
raise ConnectionFailed() | python | def connect(self):
"""
Establish a connection to APNs. If already connected, the function does nothing. If the
connection fails, the function retries up to MAX_CONNECTION_RETRIES times.
"""
retries = 0
while retries < MAX_CONNECTION_RETRIES:
try:
self._connection.connect()
logger.info('Connected to APNs')
return
except Exception: # pylint: disable=broad-except
# close the connnection, otherwise next connect() call would do nothing
self._connection.close()
retries += 1
logger.exception('Failed connecting to APNs (attempt %s of %s)', retries, MAX_CONNECTION_RETRIES)
raise ConnectionFailed() | [
"def",
"connect",
"(",
"self",
")",
":",
"retries",
"=",
"0",
"while",
"retries",
"<",
"MAX_CONNECTION_RETRIES",
":",
"try",
":",
"self",
".",
"_connection",
".",
"connect",
"(",
")",
"logger",
".",
"info",
"(",
"'Connected to APNs'",
")",
"return",
"excep... | Establish a connection to APNs. If already connected, the function does nothing. If the
connection fails, the function retries up to MAX_CONNECTION_RETRIES times. | [
"Establish",
"a",
"connection",
"to",
"APNs",
".",
"If",
"already",
"connected",
"the",
"function",
"does",
"nothing",
".",
"If",
"the",
"connection",
"fails",
"the",
"function",
"retries",
"up",
"to",
"MAX_CONNECTION_RETRIES",
"times",
"."
] | 6f3b2a7456ef22c6aafc1467618f665130b0aeea | https://github.com/Pr0Ger/PyAPNs2/blob/6f3b2a7456ef22c6aafc1467618f665130b0aeea/apns2/client.py#L207-L224 | train | 207,914 |
kipe/enocean | enocean/protocol/eep.py | EEP._get_raw | def _get_raw(source, bitarray):
''' Get raw data as integer, based on offset and size '''
offset = int(source['offset'])
size = int(source['size'])
return int(''.join(['1' if digit else '0' for digit in bitarray[offset:offset + size]]), 2) | python | def _get_raw(source, bitarray):
''' Get raw data as integer, based on offset and size '''
offset = int(source['offset'])
size = int(source['size'])
return int(''.join(['1' if digit else '0' for digit in bitarray[offset:offset + size]]), 2) | [
"def",
"_get_raw",
"(",
"source",
",",
"bitarray",
")",
":",
"offset",
"=",
"int",
"(",
"source",
"[",
"'offset'",
"]",
")",
"size",
"=",
"int",
"(",
"source",
"[",
"'size'",
"]",
")",
"return",
"int",
"(",
"''",
".",
"join",
"(",
"[",
"'1'",
"if... | Get raw data as integer, based on offset and size | [
"Get",
"raw",
"data",
"as",
"integer",
"based",
"on",
"offset",
"and",
"size"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L49-L53 | train | 207,915 |
kipe/enocean | enocean/protocol/eep.py | EEP._set_raw | def _set_raw(target, raw_value, bitarray):
''' put value into bit array '''
offset = int(target['offset'])
size = int(target['size'])
for digit in range(size):
bitarray[offset+digit] = (raw_value >> (size-digit-1)) & 0x01 != 0
return bitarray | python | def _set_raw(target, raw_value, bitarray):
''' put value into bit array '''
offset = int(target['offset'])
size = int(target['size'])
for digit in range(size):
bitarray[offset+digit] = (raw_value >> (size-digit-1)) & 0x01 != 0
return bitarray | [
"def",
"_set_raw",
"(",
"target",
",",
"raw_value",
",",
"bitarray",
")",
":",
"offset",
"=",
"int",
"(",
"target",
"[",
"'offset'",
"]",
")",
"size",
"=",
"int",
"(",
"target",
"[",
"'size'",
"]",
")",
"for",
"digit",
"in",
"range",
"(",
"size",
"... | put value into bit array | [
"put",
"value",
"into",
"bit",
"array"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L56-L62 | train | 207,916 |
kipe/enocean | enocean/protocol/eep.py | EEP._get_value | def _get_value(self, source, bitarray):
''' Get value, based on the data in XML '''
raw_value = self._get_raw(source, bitarray)
rng = source.find('range')
rng_min = float(rng.find('min').text)
rng_max = float(rng.find('max').text)
scl = source.find('scale')
scl_min = float(scl.find('min').text)
scl_max = float(scl.find('max').text)
return {
source['shortcut']: {
'description': source.get('description'),
'unit': source['unit'],
'value': (scl_max - scl_min) / (rng_max - rng_min) * (raw_value - rng_min) + scl_min,
'raw_value': raw_value,
}
} | python | def _get_value(self, source, bitarray):
''' Get value, based on the data in XML '''
raw_value = self._get_raw(source, bitarray)
rng = source.find('range')
rng_min = float(rng.find('min').text)
rng_max = float(rng.find('max').text)
scl = source.find('scale')
scl_min = float(scl.find('min').text)
scl_max = float(scl.find('max').text)
return {
source['shortcut']: {
'description': source.get('description'),
'unit': source['unit'],
'value': (scl_max - scl_min) / (rng_max - rng_min) * (raw_value - rng_min) + scl_min,
'raw_value': raw_value,
}
} | [
"def",
"_get_value",
"(",
"self",
",",
"source",
",",
"bitarray",
")",
":",
"raw_value",
"=",
"self",
".",
"_get_raw",
"(",
"source",
",",
"bitarray",
")",
"rng",
"=",
"source",
".",
"find",
"(",
"'range'",
")",
"rng_min",
"=",
"float",
"(",
"rng",
"... | Get value, based on the data in XML | [
"Get",
"value",
"based",
"on",
"the",
"data",
"in",
"XML"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L70-L89 | train | 207,917 |
kipe/enocean | enocean/protocol/eep.py | EEP._get_enum | def _get_enum(self, source, bitarray):
''' Get enum value, based on the data in XML '''
raw_value = self._get_raw(source, bitarray)
# Find value description.
value_desc = source.find('item', {'value': str(raw_value)}) or self._get_rangeitem(source, raw_value)
return {
source['shortcut']: {
'description': source.get('description'),
'unit': source.get('unit', ''),
'value': value_desc['description'].format(value=raw_value),
'raw_value': raw_value,
}
} | python | def _get_enum(self, source, bitarray):
''' Get enum value, based on the data in XML '''
raw_value = self._get_raw(source, bitarray)
# Find value description.
value_desc = source.find('item', {'value': str(raw_value)}) or self._get_rangeitem(source, raw_value)
return {
source['shortcut']: {
'description': source.get('description'),
'unit': source.get('unit', ''),
'value': value_desc['description'].format(value=raw_value),
'raw_value': raw_value,
}
} | [
"def",
"_get_enum",
"(",
"self",
",",
"source",
",",
"bitarray",
")",
":",
"raw_value",
"=",
"self",
".",
"_get_raw",
"(",
"source",
",",
"bitarray",
")",
"# Find value description.",
"value_desc",
"=",
"source",
".",
"find",
"(",
"'item'",
",",
"{",
"'val... | Get enum value, based on the data in XML | [
"Get",
"enum",
"value",
"based",
"on",
"the",
"data",
"in",
"XML"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L91-L105 | train | 207,918 |
kipe/enocean | enocean/protocol/eep.py | EEP._get_boolean | def _get_boolean(self, source, bitarray):
''' Get boolean value, based on the data in XML '''
raw_value = self._get_raw(source, bitarray)
return {
source['shortcut']: {
'description': source.get('description'),
'unit': source.get('unit', ''),
'value': True if raw_value else False,
'raw_value': raw_value,
}
} | python | def _get_boolean(self, source, bitarray):
''' Get boolean value, based on the data in XML '''
raw_value = self._get_raw(source, bitarray)
return {
source['shortcut']: {
'description': source.get('description'),
'unit': source.get('unit', ''),
'value': True if raw_value else False,
'raw_value': raw_value,
}
} | [
"def",
"_get_boolean",
"(",
"self",
",",
"source",
",",
"bitarray",
")",
":",
"raw_value",
"=",
"self",
".",
"_get_raw",
"(",
"source",
",",
"bitarray",
")",
"return",
"{",
"source",
"[",
"'shortcut'",
"]",
":",
"{",
"'description'",
":",
"source",
".",
... | Get boolean value, based on the data in XML | [
"Get",
"boolean",
"value",
"based",
"on",
"the",
"data",
"in",
"XML"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L107-L117 | train | 207,919 |
kipe/enocean | enocean/protocol/eep.py | EEP._set_value | def _set_value(self, target, value, bitarray):
''' set given numeric value to target field in bitarray '''
# derive raw value
rng = target.find('range')
rng_min = float(rng.find('min').text)
rng_max = float(rng.find('max').text)
scl = target.find('scale')
scl_min = float(scl.find('min').text)
scl_max = float(scl.find('max').text)
raw_value = (value - scl_min) * (rng_max - rng_min) / (scl_max - scl_min) + rng_min
# store value in bitfield
return self._set_raw(target, int(raw_value), bitarray) | python | def _set_value(self, target, value, bitarray):
''' set given numeric value to target field in bitarray '''
# derive raw value
rng = target.find('range')
rng_min = float(rng.find('min').text)
rng_max = float(rng.find('max').text)
scl = target.find('scale')
scl_min = float(scl.find('min').text)
scl_max = float(scl.find('max').text)
raw_value = (value - scl_min) * (rng_max - rng_min) / (scl_max - scl_min) + rng_min
# store value in bitfield
return self._set_raw(target, int(raw_value), bitarray) | [
"def",
"_set_value",
"(",
"self",
",",
"target",
",",
"value",
",",
"bitarray",
")",
":",
"# derive raw value",
"rng",
"=",
"target",
".",
"find",
"(",
"'range'",
")",
"rng_min",
"=",
"float",
"(",
"rng",
".",
"find",
"(",
"'min'",
")",
".",
"text",
... | set given numeric value to target field in bitarray | [
"set",
"given",
"numeric",
"value",
"to",
"target",
"field",
"in",
"bitarray"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L119-L130 | train | 207,920 |
kipe/enocean | enocean/protocol/eep.py | EEP.find_profile | def find_profile(self, bitarray, eep_rorg, rorg_func, rorg_type, direction=None, command=None):
''' Find profile and data description, matching RORG, FUNC and TYPE '''
if not self.init_ok:
self.logger.warn('EEP.xml not loaded!')
return None
if eep_rorg not in self.telegrams.keys():
self.logger.warn('Cannot find rorg in EEP!')
return None
if rorg_func not in self.telegrams[eep_rorg].keys():
self.logger.warn('Cannot find func in EEP!')
return None
if rorg_type not in self.telegrams[eep_rorg][rorg_func].keys():
self.logger.warn('Cannot find type in EEP!')
return None
profile = self.telegrams[eep_rorg][rorg_func][rorg_type]
if command:
# multiple commands can be defined, with the command id always in same location (per RORG-FUNC-TYPE).
eep_command = profile.find('command', recursive=False)
# If commands are not set in EEP, or command is None,
# get the first data as a "best guess".
if not eep_command:
return profile.find('data', recursive=False)
# If eep_command is defined, so should be data.command
return profile.find('data', {'command': str(command)}, recursive=False)
# extract data description
# the direction tag is optional
if direction is None:
return profile.find('data', recursive=False)
return profile.find('data', {'direction': direction}, recursive=False) | python | def find_profile(self, bitarray, eep_rorg, rorg_func, rorg_type, direction=None, command=None):
''' Find profile and data description, matching RORG, FUNC and TYPE '''
if not self.init_ok:
self.logger.warn('EEP.xml not loaded!')
return None
if eep_rorg not in self.telegrams.keys():
self.logger.warn('Cannot find rorg in EEP!')
return None
if rorg_func not in self.telegrams[eep_rorg].keys():
self.logger.warn('Cannot find func in EEP!')
return None
if rorg_type not in self.telegrams[eep_rorg][rorg_func].keys():
self.logger.warn('Cannot find type in EEP!')
return None
profile = self.telegrams[eep_rorg][rorg_func][rorg_type]
if command:
# multiple commands can be defined, with the command id always in same location (per RORG-FUNC-TYPE).
eep_command = profile.find('command', recursive=False)
# If commands are not set in EEP, or command is None,
# get the first data as a "best guess".
if not eep_command:
return profile.find('data', recursive=False)
# If eep_command is defined, so should be data.command
return profile.find('data', {'command': str(command)}, recursive=False)
# extract data description
# the direction tag is optional
if direction is None:
return profile.find('data', recursive=False)
return profile.find('data', {'direction': direction}, recursive=False) | [
"def",
"find_profile",
"(",
"self",
",",
"bitarray",
",",
"eep_rorg",
",",
"rorg_func",
",",
"rorg_type",
",",
"direction",
"=",
"None",
",",
"command",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"init_ok",
":",
"self",
".",
"logger",
".",
"warn"... | Find profile and data description, matching RORG, FUNC and TYPE | [
"Find",
"profile",
"and",
"data",
"description",
"matching",
"RORG",
"FUNC",
"and",
"TYPE"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L155-L190 | train | 207,921 |
kipe/enocean | enocean/protocol/eep.py | EEP.get_values | def get_values(self, profile, bitarray, status):
''' Get keys and values from bitarray '''
if not self.init_ok or profile is None:
return [], {}
output = OrderedDict({})
for source in profile.contents:
if not source.name:
continue
if source.name == 'value':
output.update(self._get_value(source, bitarray))
if source.name == 'enum':
output.update(self._get_enum(source, bitarray))
if source.name == 'status':
output.update(self._get_boolean(source, status))
return output.keys(), output | python | def get_values(self, profile, bitarray, status):
''' Get keys and values from bitarray '''
if not self.init_ok or profile is None:
return [], {}
output = OrderedDict({})
for source in profile.contents:
if not source.name:
continue
if source.name == 'value':
output.update(self._get_value(source, bitarray))
if source.name == 'enum':
output.update(self._get_enum(source, bitarray))
if source.name == 'status':
output.update(self._get_boolean(source, status))
return output.keys(), output | [
"def",
"get_values",
"(",
"self",
",",
"profile",
",",
"bitarray",
",",
"status",
")",
":",
"if",
"not",
"self",
".",
"init_ok",
"or",
"profile",
"is",
"None",
":",
"return",
"[",
"]",
",",
"{",
"}",
"output",
"=",
"OrderedDict",
"(",
"{",
"}",
")"... | Get keys and values from bitarray | [
"Get",
"keys",
"and",
"values",
"from",
"bitarray"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L192-L207 | train | 207,922 |
kipe/enocean | enocean/protocol/eep.py | EEP.set_values | def set_values(self, profile, data, status, properties):
''' Update data based on data contained in properties '''
if not self.init_ok or profile is None:
return data, status
for shortcut, value in properties.items():
# find the given property from EEP
target = profile.find(shortcut=shortcut)
if not target:
# TODO: Should we raise an error?
self.logger.warning('Cannot find data description for shortcut %s', shortcut)
continue
# update bit_data
if target.name == 'value':
data = self._set_value(target, value, data)
if target.name == 'enum':
data = self._set_enum(target, value, data)
if target.name == 'status':
status = self._set_boolean(target, value, status)
return data, status | python | def set_values(self, profile, data, status, properties):
''' Update data based on data contained in properties '''
if not self.init_ok or profile is None:
return data, status
for shortcut, value in properties.items():
# find the given property from EEP
target = profile.find(shortcut=shortcut)
if not target:
# TODO: Should we raise an error?
self.logger.warning('Cannot find data description for shortcut %s', shortcut)
continue
# update bit_data
if target.name == 'value':
data = self._set_value(target, value, data)
if target.name == 'enum':
data = self._set_enum(target, value, data)
if target.name == 'status':
status = self._set_boolean(target, value, status)
return data, status | [
"def",
"set_values",
"(",
"self",
",",
"profile",
",",
"data",
",",
"status",
",",
"properties",
")",
":",
"if",
"not",
"self",
".",
"init_ok",
"or",
"profile",
"is",
"None",
":",
"return",
"data",
",",
"status",
"for",
"shortcut",
",",
"value",
"in",
... | Update data based on data contained in properties | [
"Update",
"data",
"based",
"on",
"data",
"contained",
"in",
"properties"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/eep.py#L209-L229 | train | 207,923 |
kipe/enocean | enocean/utils.py | combine_hex | def combine_hex(data):
''' Combine list of integer values to one big integer '''
output = 0x00
for i, value in enumerate(reversed(data)):
output |= (value << i * 8)
return output | python | def combine_hex(data):
''' Combine list of integer values to one big integer '''
output = 0x00
for i, value in enumerate(reversed(data)):
output |= (value << i * 8)
return output | [
"def",
"combine_hex",
"(",
"data",
")",
":",
"output",
"=",
"0x00",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"reversed",
"(",
"data",
")",
")",
":",
"output",
"|=",
"(",
"value",
"<<",
"i",
"*",
"8",
")",
"return",
"output"
] | Combine list of integer values to one big integer | [
"Combine",
"list",
"of",
"integer",
"values",
"to",
"one",
"big",
"integer"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/utils.py#L10-L15 | train | 207,924 |
kipe/enocean | enocean/protocol/packet.py | Packet.parse | def parse(self):
''' Parse data from Packet '''
# Parse status from messages
if self.rorg in [RORG.RPS, RORG.BS1, RORG.BS4]:
self.status = self.data[-1]
if self.rorg == RORG.VLD:
self.status = self.optional[-1]
if self.rorg in [RORG.RPS, RORG.BS1, RORG.BS4]:
# These message types should have repeater count in the last for bits of status.
self.repeater_count = enocean.utils.from_bitarray(self._bit_status[4:])
return self.parsed | python | def parse(self):
''' Parse data from Packet '''
# Parse status from messages
if self.rorg in [RORG.RPS, RORG.BS1, RORG.BS4]:
self.status = self.data[-1]
if self.rorg == RORG.VLD:
self.status = self.optional[-1]
if self.rorg in [RORG.RPS, RORG.BS1, RORG.BS4]:
# These message types should have repeater count in the last for bits of status.
self.repeater_count = enocean.utils.from_bitarray(self._bit_status[4:])
return self.parsed | [
"def",
"parse",
"(",
"self",
")",
":",
"# Parse status from messages",
"if",
"self",
".",
"rorg",
"in",
"[",
"RORG",
".",
"RPS",
",",
"RORG",
".",
"BS1",
",",
"RORG",
".",
"BS4",
"]",
":",
"self",
".",
"status",
"=",
"self",
".",
"data",
"[",
"-",
... | Parse data from Packet | [
"Parse",
"data",
"from",
"Packet"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/packet.py#L243-L254 | train | 207,925 |
kipe/enocean | enocean/protocol/packet.py | Packet.select_eep | def select_eep(self, rorg_func, rorg_type, direction=None, command=None):
''' Set EEP based on FUNC and TYPE '''
# set EEP profile
self.rorg_func = rorg_func
self.rorg_type = rorg_type
self._profile = self.eep.find_profile(self._bit_data, self.rorg, rorg_func, rorg_type, direction, command)
return self._profile is not None | python | def select_eep(self, rorg_func, rorg_type, direction=None, command=None):
''' Set EEP based on FUNC and TYPE '''
# set EEP profile
self.rorg_func = rorg_func
self.rorg_type = rorg_type
self._profile = self.eep.find_profile(self._bit_data, self.rorg, rorg_func, rorg_type, direction, command)
return self._profile is not None | [
"def",
"select_eep",
"(",
"self",
",",
"rorg_func",
",",
"rorg_type",
",",
"direction",
"=",
"None",
",",
"command",
"=",
"None",
")",
":",
"# set EEP profile",
"self",
".",
"rorg_func",
"=",
"rorg_func",
"self",
".",
"rorg_type",
"=",
"rorg_type",
"self",
... | Set EEP based on FUNC and TYPE | [
"Set",
"EEP",
"based",
"on",
"FUNC",
"and",
"TYPE"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/packet.py#L256-L262 | train | 207,926 |
kipe/enocean | enocean/protocol/packet.py | Packet.parse_eep | def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None):
''' Parse EEP based on FUNC and TYPE '''
# set EEP profile, if demanded
if rorg_func is not None and rorg_type is not None:
self.select_eep(rorg_func, rorg_type, direction, command)
# parse data
provides, values = self.eep.get_values(self._profile, self._bit_data, self._bit_status)
self.parsed.update(values)
return list(provides) | python | def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None):
''' Parse EEP based on FUNC and TYPE '''
# set EEP profile, if demanded
if rorg_func is not None and rorg_type is not None:
self.select_eep(rorg_func, rorg_type, direction, command)
# parse data
provides, values = self.eep.get_values(self._profile, self._bit_data, self._bit_status)
self.parsed.update(values)
return list(provides) | [
"def",
"parse_eep",
"(",
"self",
",",
"rorg_func",
"=",
"None",
",",
"rorg_type",
"=",
"None",
",",
"direction",
"=",
"None",
",",
"command",
"=",
"None",
")",
":",
"# set EEP profile, if demanded",
"if",
"rorg_func",
"is",
"not",
"None",
"and",
"rorg_type",... | Parse EEP based on FUNC and TYPE | [
"Parse",
"EEP",
"based",
"on",
"FUNC",
"and",
"TYPE"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/packet.py#L264-L272 | train | 207,927 |
kipe/enocean | enocean/protocol/packet.py | Packet.set_eep | def set_eep(self, data):
''' Update packet data based on EEP. Input data is a dictionary with keys corresponding to the EEP. '''
self._bit_data, self._bit_status = self.eep.set_values(self._profile, self._bit_data, self._bit_status, data) | python | def set_eep(self, data):
''' Update packet data based on EEP. Input data is a dictionary with keys corresponding to the EEP. '''
self._bit_data, self._bit_status = self.eep.set_values(self._profile, self._bit_data, self._bit_status, data) | [
"def",
"set_eep",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_bit_data",
",",
"self",
".",
"_bit_status",
"=",
"self",
".",
"eep",
".",
"set_values",
"(",
"self",
".",
"_profile",
",",
"self",
".",
"_bit_data",
",",
"self",
".",
"_bit_status",
... | Update packet data based on EEP. Input data is a dictionary with keys corresponding to the EEP. | [
"Update",
"packet",
"data",
"based",
"on",
"EEP",
".",
"Input",
"data",
"is",
"a",
"dictionary",
"with",
"keys",
"corresponding",
"to",
"the",
"EEP",
"."
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/packet.py#L274-L276 | train | 207,928 |
kipe/enocean | enocean/protocol/packet.py | Packet.build | def build(self):
''' Build Packet for sending to EnOcean controller '''
data_length = len(self.data)
ords = [0x55, (data_length >> 8) & 0xFF, data_length & 0xFF, len(self.optional), int(self.packet_type)]
ords.append(crc8.calc(ords[1:5]))
ords.extend(self.data)
ords.extend(self.optional)
ords.append(crc8.calc(ords[6:]))
return ords | python | def build(self):
''' Build Packet for sending to EnOcean controller '''
data_length = len(self.data)
ords = [0x55, (data_length >> 8) & 0xFF, data_length & 0xFF, len(self.optional), int(self.packet_type)]
ords.append(crc8.calc(ords[1:5]))
ords.extend(self.data)
ords.extend(self.optional)
ords.append(crc8.calc(ords[6:]))
return ords | [
"def",
"build",
"(",
"self",
")",
":",
"data_length",
"=",
"len",
"(",
"self",
".",
"data",
")",
"ords",
"=",
"[",
"0x55",
",",
"(",
"data_length",
">>",
"8",
")",
"&",
"0xFF",
",",
"data_length",
"&",
"0xFF",
",",
"len",
"(",
"self",
".",
"optio... | Build Packet for sending to EnOcean controller | [
"Build",
"Packet",
"for",
"sending",
"to",
"EnOcean",
"controller"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/protocol/packet.py#L278-L286 | train | 207,929 |
kipe/enocean | enocean/communicators/communicator.py | Communicator._get_from_send_queue | def _get_from_send_queue(self):
''' Get message from send queue, if one exists '''
try:
packet = self.transmit.get(block=False)
self.logger.info('Sending packet')
self.logger.debug(packet)
return packet
except queue.Empty:
pass
return None | python | def _get_from_send_queue(self):
''' Get message from send queue, if one exists '''
try:
packet = self.transmit.get(block=False)
self.logger.info('Sending packet')
self.logger.debug(packet)
return packet
except queue.Empty:
pass
return None | [
"def",
"_get_from_send_queue",
"(",
"self",
")",
":",
"try",
":",
"packet",
"=",
"self",
".",
"transmit",
".",
"get",
"(",
"block",
"=",
"False",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Sending packet'",
")",
"self",
".",
"logger",
".",
"debug"... | Get message from send queue, if one exists | [
"Get",
"message",
"from",
"send",
"queue",
"if",
"one",
"exists"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/communicators/communicator.py#L39-L48 | train | 207,930 |
kipe/enocean | enocean/communicators/communicator.py | Communicator.parse | def parse(self):
''' Parses messages and puts them to receive queue '''
# Loop while we get new messages
while True:
status, self._buffer, packet = Packet.parse_msg(self._buffer)
# If message is incomplete -> break the loop
if status == PARSE_RESULT.INCOMPLETE:
return status
# If message is OK, add it to receive queue or send to the callback method
if status == PARSE_RESULT.OK and packet:
packet.received = datetime.datetime.now()
if isinstance(packet, UTETeachInPacket) and self.teach_in:
response_packet = packet.create_response_packet(self.base_id)
self.logger.info('Sending response to UTE teach-in.')
self.send(response_packet)
if self.__callback is None:
self.receive.put(packet)
else:
self.__callback(packet)
self.logger.debug(packet) | python | def parse(self):
''' Parses messages and puts them to receive queue '''
# Loop while we get new messages
while True:
status, self._buffer, packet = Packet.parse_msg(self._buffer)
# If message is incomplete -> break the loop
if status == PARSE_RESULT.INCOMPLETE:
return status
# If message is OK, add it to receive queue or send to the callback method
if status == PARSE_RESULT.OK and packet:
packet.received = datetime.datetime.now()
if isinstance(packet, UTETeachInPacket) and self.teach_in:
response_packet = packet.create_response_packet(self.base_id)
self.logger.info('Sending response to UTE teach-in.')
self.send(response_packet)
if self.__callback is None:
self.receive.put(packet)
else:
self.__callback(packet)
self.logger.debug(packet) | [
"def",
"parse",
"(",
"self",
")",
":",
"# Loop while we get new messages",
"while",
"True",
":",
"status",
",",
"self",
".",
"_buffer",
",",
"packet",
"=",
"Packet",
".",
"parse_msg",
"(",
"self",
".",
"_buffer",
")",
"# If message is incomplete -> break the loop"... | Parses messages and puts them to receive queue | [
"Parses",
"messages",
"and",
"puts",
"them",
"to",
"receive",
"queue"
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/communicators/communicator.py#L60-L82 | train | 207,931 |
kipe/enocean | enocean/communicators/communicator.py | Communicator.base_id | def base_id(self):
''' Fetches Base ID from the transmitter, if required. Otherwise returns the currently set Base ID. '''
# If base id is already set, return it.
if self._base_id is not None:
return self._base_id
# Send COMMON_COMMAND 0x08, CO_RD_IDBASE request to the module
self.send(Packet(PACKET.COMMON_COMMAND, data=[0x08]))
# Loop over 10 times, to make sure we catch the response.
# Thanks to timeout, shouldn't take more than a second.
# Unfortunately, all other messages received during this time are ignored.
for i in range(0, 10):
try:
packet = self.receive.get(block=True, timeout=0.1)
# We're only interested in responses to the request in question.
if packet.packet_type == PACKET.RESPONSE and packet.response == RETURN_CODE.OK and len(packet.response_data) == 4:
# Base ID is set in the response data.
self._base_id = packet.response_data
# Put packet back to the Queue, so the user can also react to it if required...
self.receive.put(packet)
break
# Put other packets back to the Queue.
self.receive.put(packet)
except queue.Empty:
continue
# Return the current Base ID (might be None).
return self._base_id | python | def base_id(self):
''' Fetches Base ID from the transmitter, if required. Otherwise returns the currently set Base ID. '''
# If base id is already set, return it.
if self._base_id is not None:
return self._base_id
# Send COMMON_COMMAND 0x08, CO_RD_IDBASE request to the module
self.send(Packet(PACKET.COMMON_COMMAND, data=[0x08]))
# Loop over 10 times, to make sure we catch the response.
# Thanks to timeout, shouldn't take more than a second.
# Unfortunately, all other messages received during this time are ignored.
for i in range(0, 10):
try:
packet = self.receive.get(block=True, timeout=0.1)
# We're only interested in responses to the request in question.
if packet.packet_type == PACKET.RESPONSE and packet.response == RETURN_CODE.OK and len(packet.response_data) == 4:
# Base ID is set in the response data.
self._base_id = packet.response_data
# Put packet back to the Queue, so the user can also react to it if required...
self.receive.put(packet)
break
# Put other packets back to the Queue.
self.receive.put(packet)
except queue.Empty:
continue
# Return the current Base ID (might be None).
return self._base_id | [
"def",
"base_id",
"(",
"self",
")",
":",
"# If base id is already set, return it.",
"if",
"self",
".",
"_base_id",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_base_id",
"# Send COMMON_COMMAND 0x08, CO_RD_IDBASE request to the module",
"self",
".",
"send",
"(",
... | Fetches Base ID from the transmitter, if required. Otherwise returns the currently set Base ID. | [
"Fetches",
"Base",
"ID",
"from",
"the",
"transmitter",
"if",
"required",
".",
"Otherwise",
"returns",
"the",
"currently",
"set",
"Base",
"ID",
"."
] | 99fa03f47004eef74c7987545c33ecd01af0de07 | https://github.com/kipe/enocean/blob/99fa03f47004eef74c7987545c33ecd01af0de07/enocean/communicators/communicator.py#L85-L111 | train | 207,932 |
samuelcolvin/arq | arq/cli.py | cli | def cli(*, worker_settings, burst, check, watch, verbose):
"""
Job queues in python with asyncio and redis.
CLI to run the arq worker.
"""
sys.path.append(os.getcwd())
worker_settings = import_string(worker_settings)
logging.config.dictConfig(default_log_config(verbose))
if check:
exit(check_health(worker_settings))
else:
kwargs = {} if burst is None else {'burst': burst}
if watch:
loop = asyncio.get_event_loop()
loop.run_until_complete(watch_reload(watch, worker_settings, loop))
else:
run_worker(worker_settings, **kwargs) | python | def cli(*, worker_settings, burst, check, watch, verbose):
"""
Job queues in python with asyncio and redis.
CLI to run the arq worker.
"""
sys.path.append(os.getcwd())
worker_settings = import_string(worker_settings)
logging.config.dictConfig(default_log_config(verbose))
if check:
exit(check_health(worker_settings))
else:
kwargs = {} if burst is None else {'burst': burst}
if watch:
loop = asyncio.get_event_loop()
loop.run_until_complete(watch_reload(watch, worker_settings, loop))
else:
run_worker(worker_settings, **kwargs) | [
"def",
"cli",
"(",
"*",
",",
"worker_settings",
",",
"burst",
",",
"check",
",",
"watch",
",",
"verbose",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
"worker_settings",
"=",
"import_string",
"(",
"worker_settin... | Job queues in python with asyncio and redis.
CLI to run the arq worker. | [
"Job",
"queues",
"in",
"python",
"with",
"asyncio",
"and",
"redis",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/cli.py#L27-L45 | train | 207,933 |
samuelcolvin/arq | arq/cron.py | next_cron | def next_cron(
previous_dt: datetime,
*,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
):
"""
Find the next datetime matching the given parameters.
"""
dt = previous_dt + timedelta(seconds=1)
if isinstance(weekday, str):
weekday = weekdays.index(weekday.lower())
options = dict(
month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond
)
while True:
next_dt = _get_next_dt(dt, options)
# print(dt, next_dt)
if next_dt is None:
return dt
dt = next_dt | python | def next_cron(
previous_dt: datetime,
*,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
):
"""
Find the next datetime matching the given parameters.
"""
dt = previous_dt + timedelta(seconds=1)
if isinstance(weekday, str):
weekday = weekdays.index(weekday.lower())
options = dict(
month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond
)
while True:
next_dt = _get_next_dt(dt, options)
# print(dt, next_dt)
if next_dt is None:
return dt
dt = next_dt | [
"def",
"next_cron",
"(",
"previous_dt",
":",
"datetime",
",",
"*",
",",
"month",
":",
"Union",
"[",
"None",
",",
"set",
",",
"int",
"]",
"=",
"None",
",",
"day",
":",
"Union",
"[",
"None",
",",
"set",
",",
"int",
"]",
"=",
"None",
",",
"weekday",... | Find the next datetime matching the given parameters. | [
"Find",
"the",
"next",
"datetime",
"matching",
"the",
"given",
"parameters",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/cron.py#L65-L91 | train | 207,934 |
samuelcolvin/arq | arq/cron.py | cron | def cron(
coroutine: Union[str, Callable],
*,
name: Optional[str] = None,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
run_at_startup: bool = False,
unique: bool = True,
timeout: Optional[SecondsTimedelta] = None,
keep_result: Optional[float] = 0,
max_tries: Optional[int] = 1,
) -> CronJob:
"""
Create a cron job, eg. it should be executed at specific times.
Workers will enqueue this job at or just after the set times. If ``unique`` is true (the default) the
job will only be run once even if multiple workers are running.
:param coroutine: coroutine function to run
:param name: name of the job, if None, the name of the coroutine is used
:param month: month(s) to run the job on, 1 - 12
:param day: day(s) to run the job on, 1 - 31
:param weekday: week day(s) to run the job on, 0 - 6 or mon - sun
:param hour: hour(s) to run the job on, 0 - 23
:param minute: minute(s) to run the job on, 0 - 59
:param second: second(s) to run the job on, 0 - 59
:param microsecond: microsecond(s) to run the job on,
defaults to 123456 as the world is busier at the top of a second, 0 - 1e6
:param run_at_startup: whether to run as worker starts
:param unique: whether the job should be only be executed once at each time
:param timeout: job timeout
:param keep_result: how long to keep the result for
:param max_tries: maximum number of tries for the job
"""
if isinstance(coroutine, str):
name = name or 'cron:' + coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return CronJob(
name or 'cron:' + coroutine.__qualname__,
coroutine,
month,
day,
weekday,
hour,
minute,
second,
microsecond,
run_at_startup,
unique,
timeout,
keep_result,
max_tries,
) | python | def cron(
coroutine: Union[str, Callable],
*,
name: Optional[str] = None,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
run_at_startup: bool = False,
unique: bool = True,
timeout: Optional[SecondsTimedelta] = None,
keep_result: Optional[float] = 0,
max_tries: Optional[int] = 1,
) -> CronJob:
"""
Create a cron job, eg. it should be executed at specific times.
Workers will enqueue this job at or just after the set times. If ``unique`` is true (the default) the
job will only be run once even if multiple workers are running.
:param coroutine: coroutine function to run
:param name: name of the job, if None, the name of the coroutine is used
:param month: month(s) to run the job on, 1 - 12
:param day: day(s) to run the job on, 1 - 31
:param weekday: week day(s) to run the job on, 0 - 6 or mon - sun
:param hour: hour(s) to run the job on, 0 - 23
:param minute: minute(s) to run the job on, 0 - 59
:param second: second(s) to run the job on, 0 - 59
:param microsecond: microsecond(s) to run the job on,
defaults to 123456 as the world is busier at the top of a second, 0 - 1e6
:param run_at_startup: whether to run as worker starts
:param unique: whether the job should be only be executed once at each time
:param timeout: job timeout
:param keep_result: how long to keep the result for
:param max_tries: maximum number of tries for the job
"""
if isinstance(coroutine, str):
name = name or 'cron:' + coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return CronJob(
name or 'cron:' + coroutine.__qualname__,
coroutine,
month,
day,
weekday,
hour,
minute,
second,
microsecond,
run_at_startup,
unique,
timeout,
keep_result,
max_tries,
) | [
"def",
"cron",
"(",
"coroutine",
":",
"Union",
"[",
"str",
",",
"Callable",
"]",
",",
"*",
",",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"month",
":",
"Union",
"[",
"None",
",",
"set",
",",
"int",
"]",
"=",
"None",
",",
"day"... | Create a cron job, eg. it should be executed at specific times.
Workers will enqueue this job at or just after the set times. If ``unique`` is true (the default) the
job will only be run once even if multiple workers are running.
:param coroutine: coroutine function to run
:param name: name of the job, if None, the name of the coroutine is used
:param month: month(s) to run the job on, 1 - 12
:param day: day(s) to run the job on, 1 - 31
:param weekday: week day(s) to run the job on, 0 - 6 or mon - sun
:param hour: hour(s) to run the job on, 0 - 23
:param minute: minute(s) to run the job on, 0 - 59
:param second: second(s) to run the job on, 0 - 59
:param microsecond: microsecond(s) to run the job on,
defaults to 123456 as the world is busier at the top of a second, 0 - 1e6
:param run_at_startup: whether to run as worker starts
:param unique: whether the job should be only be executed once at each time
:param timeout: job timeout
:param keep_result: how long to keep the result for
:param max_tries: maximum number of tries for the job | [
"Create",
"a",
"cron",
"job",
"eg",
".",
"it",
"should",
"be",
"executed",
"at",
"specific",
"times",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/cron.py#L128-L191 | train | 207,935 |
samuelcolvin/arq | arq/utils.py | to_unix_ms | def to_unix_ms(dt: datetime) -> int:
"""
convert a datetime to number of milliseconds since 1970 and calculate timezone offset
"""
utcoffset = dt.utcoffset()
ep = epoch if utcoffset is None else epoch_tz
return as_int((dt - ep).total_seconds() * 1000) | python | def to_unix_ms(dt: datetime) -> int:
"""
convert a datetime to number of milliseconds since 1970 and calculate timezone offset
"""
utcoffset = dt.utcoffset()
ep = epoch if utcoffset is None else epoch_tz
return as_int((dt - ep).total_seconds() * 1000) | [
"def",
"to_unix_ms",
"(",
"dt",
":",
"datetime",
")",
"->",
"int",
":",
"utcoffset",
"=",
"dt",
".",
"utcoffset",
"(",
")",
"ep",
"=",
"epoch",
"if",
"utcoffset",
"is",
"None",
"else",
"epoch_tz",
"return",
"as_int",
"(",
"(",
"dt",
"-",
"ep",
")",
... | convert a datetime to number of milliseconds since 1970 and calculate timezone offset | [
"convert",
"a",
"datetime",
"to",
"number",
"of",
"milliseconds",
"since",
"1970",
"and",
"calculate",
"timezone",
"offset"
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/utils.py#L23-L29 | train | 207,936 |
samuelcolvin/arq | arq/connections.py | create_pool | async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis:
"""
Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.
Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,
thus allowing job enqueuing.
"""
settings = settings or RedisSettings()
addr = settings.host, settings.port
try:
pool = await aioredis.create_redis_pool(
addr,
db=settings.database,
password=settings.password,
timeout=settings.conn_timeout,
encoding='utf8',
commands_factory=ArqRedis,
)
except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:
if _retry < settings.conn_retries:
logger.warning(
'redis connection error %s:%s %s %s, %d retries remaining...',
settings.host,
settings.port,
e.__class__.__name__,
e,
settings.conn_retries - _retry,
)
await asyncio.sleep(settings.conn_retry_delay)
else:
raise
else:
if _retry > 0:
logger.info('redis connection successful')
return pool
# recursively attempt to create the pool outside the except block to avoid
# "During handling of the above exception..." madness
return await create_pool(settings, _retry=_retry + 1) | python | async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis:
"""
Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.
Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,
thus allowing job enqueuing.
"""
settings = settings or RedisSettings()
addr = settings.host, settings.port
try:
pool = await aioredis.create_redis_pool(
addr,
db=settings.database,
password=settings.password,
timeout=settings.conn_timeout,
encoding='utf8',
commands_factory=ArqRedis,
)
except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:
if _retry < settings.conn_retries:
logger.warning(
'redis connection error %s:%s %s %s, %d retries remaining...',
settings.host,
settings.port,
e.__class__.__name__,
e,
settings.conn_retries - _retry,
)
await asyncio.sleep(settings.conn_retry_delay)
else:
raise
else:
if _retry > 0:
logger.info('redis connection successful')
return pool
# recursively attempt to create the pool outside the except block to avoid
# "During handling of the above exception..." madness
return await create_pool(settings, _retry=_retry + 1) | [
"async",
"def",
"create_pool",
"(",
"settings",
":",
"RedisSettings",
"=",
"None",
",",
"*",
",",
"_retry",
":",
"int",
"=",
"0",
")",
"->",
"ArqRedis",
":",
"settings",
"=",
"settings",
"or",
"RedisSettings",
"(",
")",
"addr",
"=",
"settings",
".",
"h... | Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.
Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,
thus allowing job enqueuing. | [
"Create",
"a",
"new",
"redis",
"pool",
"retrying",
"up",
"to",
"conn_retries",
"times",
"if",
"the",
"connection",
"fails",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/connections.py#L125-L163 | train | 207,937 |
samuelcolvin/arq | arq/connections.py | ArqRedis.enqueue_job | async def enqueue_job(
self,
function: str,
*args: Any,
_job_id: Optional[str] = None,
_defer_until: Optional[datetime] = None,
_defer_by: Union[None, int, float, timedelta] = None,
_expires: Union[None, int, float, timedelta] = None,
_job_try: Optional[int] = None,
**kwargs: Any,
) -> Optional[Job]:
"""
Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists
"""
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms)
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self) | python | async def enqueue_job(
self,
function: str,
*args: Any,
_job_id: Optional[str] = None,
_defer_until: Optional[datetime] = None,
_defer_by: Union[None, int, float, timedelta] = None,
_expires: Union[None, int, float, timedelta] = None,
_job_try: Optional[int] = None,
**kwargs: Any,
) -> Optional[Job]:
"""
Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists
"""
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms)
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self) | [
"async",
"def",
"enqueue_job",
"(",
"self",
",",
"function",
":",
"str",
",",
"*",
"args",
":",
"Any",
",",
"_job_id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"_defer_until",
":",
"Optional",
"[",
"datetime",
"]",
"=",
"None",
",",
"_defe... | Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists | [
"Enqueue",
"a",
"job",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/connections.py#L48-L107 | train | 207,938 |
samuelcolvin/arq | arq/connections.py | ArqRedis.all_job_results | async def all_job_results(self) -> List[JobResult]:
"""
Get results for all jobs in redis.
"""
keys = await self.keys(result_key_prefix + '*')
results = await asyncio.gather(*[self._get_job_result(k) for k in keys])
return sorted(results, key=attrgetter('enqueue_time')) | python | async def all_job_results(self) -> List[JobResult]:
"""
Get results for all jobs in redis.
"""
keys = await self.keys(result_key_prefix + '*')
results = await asyncio.gather(*[self._get_job_result(k) for k in keys])
return sorted(results, key=attrgetter('enqueue_time')) | [
"async",
"def",
"all_job_results",
"(",
"self",
")",
"->",
"List",
"[",
"JobResult",
"]",
":",
"keys",
"=",
"await",
"self",
".",
"keys",
"(",
"result_key_prefix",
"+",
"'*'",
")",
"results",
"=",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"[",
"self... | Get results for all jobs in redis. | [
"Get",
"results",
"for",
"all",
"jobs",
"in",
"redis",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/connections.py#L116-L122 | train | 207,939 |
samuelcolvin/arq | arq/worker.py | func | def func(
coroutine: Union[str, Function, Callable],
*,
name: Optional[str] = None,
keep_result: Optional[SecondsTimedelta] = None,
timeout: Optional[SecondsTimedelta] = None,
max_tries: Optional[int] = None,
) -> Function:
"""
Wrapper for a job function which lets you configure more settings.
:param coroutine: coroutine function to call, can be a string to import
:param name: name for function, if None, ``coroutine.__qualname__`` is used
:param keep_result: duration to keep the result for, if 0 the result is not kept
:param timeout: maximum time the job should take
:param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying
"""
if isinstance(coroutine, Function):
return coroutine
if isinstance(coroutine, str):
name = name or coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return Function(name or coroutine.__qualname__, coroutine, timeout, keep_result, max_tries) | python | def func(
coroutine: Union[str, Function, Callable],
*,
name: Optional[str] = None,
keep_result: Optional[SecondsTimedelta] = None,
timeout: Optional[SecondsTimedelta] = None,
max_tries: Optional[int] = None,
) -> Function:
"""
Wrapper for a job function which lets you configure more settings.
:param coroutine: coroutine function to call, can be a string to import
:param name: name for function, if None, ``coroutine.__qualname__`` is used
:param keep_result: duration to keep the result for, if 0 the result is not kept
:param timeout: maximum time the job should take
:param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying
"""
if isinstance(coroutine, Function):
return coroutine
if isinstance(coroutine, str):
name = name or coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return Function(name or coroutine.__qualname__, coroutine, timeout, keep_result, max_tries) | [
"def",
"func",
"(",
"coroutine",
":",
"Union",
"[",
"str",
",",
"Function",
",",
"Callable",
"]",
",",
"*",
",",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"keep_result",
":",
"Optional",
"[",
"SecondsTimedelta",
"]",
"=",
"None",
",... | Wrapper for a job function which lets you configure more settings.
:param coroutine: coroutine function to call, can be a string to import
:param name: name for function, if None, ``coroutine.__qualname__`` is used
:param keep_result: duration to keep the result for, if 0 the result is not kept
:param timeout: maximum time the job should take
:param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying | [
"Wrapper",
"for",
"a",
"job",
"function",
"which",
"lets",
"you",
"configure",
"more",
"settings",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L53-L81 | train | 207,940 |
samuelcolvin/arq | arq/worker.py | Worker.run | def run(self) -> None:
"""
Sync function to run the worker, finally closes worker connections.
"""
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close()) | python | def run(self) -> None:
"""
Sync function to run the worker, finally closes worker connections.
"""
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close()) | [
"def",
"run",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"main_task",
"=",
"self",
".",
"loop",
".",
"create_task",
"(",
"self",
".",
"main",
"(",
")",
")",
"try",
":",
"self",
".",
"loop",
".",
"run_until_complete",
"(",
"self",
".",
"main_... | Sync function to run the worker, finally closes worker connections. | [
"Sync",
"function",
"to",
"run",
"the",
"worker",
"finally",
"closes",
"worker",
"connections",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L193-L204 | train | 207,941 |
samuelcolvin/arq | arq/worker.py | Worker.async_run | async def async_run(self) -> None:
"""
Asynchronously run the worker, does not close connections. Useful when testing.
"""
self.main_task = self.loop.create_task(self.main())
await self.main_task | python | async def async_run(self) -> None:
"""
Asynchronously run the worker, does not close connections. Useful when testing.
"""
self.main_task = self.loop.create_task(self.main())
await self.main_task | [
"async",
"def",
"async_run",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"main_task",
"=",
"self",
".",
"loop",
".",
"create_task",
"(",
"self",
".",
"main",
"(",
")",
")",
"await",
"self",
".",
"main_task"
] | Asynchronously run the worker, does not close connections. Useful when testing. | [
"Asynchronously",
"run",
"the",
"worker",
"does",
"not",
"close",
"connections",
".",
"Useful",
"when",
"testing",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L206-L211 | train | 207,942 |
samuelcolvin/arq | arq/jobs.py | Job.result | async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any:
"""
Get the result of the job, including waiting if it's not yet available. If the job raised an exception,
it will be raised here.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param pole_delay: how often to poll redis for the job result
"""
async for delay in poll(pole_delay):
info = await self.result_info()
if info:
result = info.result
if info.success:
return result
else:
raise result
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError() | python | async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any:
"""
Get the result of the job, including waiting if it's not yet available. If the job raised an exception,
it will be raised here.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param pole_delay: how often to poll redis for the job result
"""
async for delay in poll(pole_delay):
info = await self.result_info()
if info:
result = info.result
if info.success:
return result
else:
raise result
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError() | [
"async",
"def",
"result",
"(",
"self",
",",
"timeout",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
"*",
",",
"pole_delay",
":",
"float",
"=",
"0.5",
")",
"->",
"Any",
":",
"async",
"for",
"delay",
"in",
"poll",
"(",
"pole_delay",
")",
":"... | Get the result of the job, including waiting if it's not yet available. If the job raised an exception,
it will be raised here.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param pole_delay: how often to poll redis for the job result | [
"Get",
"the",
"result",
"of",
"the",
"job",
"including",
"waiting",
"if",
"it",
"s",
"not",
"yet",
"available",
".",
"If",
"the",
"job",
"raised",
"an",
"exception",
"it",
"will",
"be",
"raised",
"here",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L62-L79 | train | 207,943 |
samuelcolvin/arq | arq/jobs.py | Job.info | async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info | python | async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info | [
"async",
"def",
"info",
"(",
"self",
")",
"->",
"Optional",
"[",
"JobDef",
"]",
":",
"info",
"=",
"await",
"self",
".",
"result_info",
"(",
")",
"if",
"not",
"info",
":",
"v",
"=",
"await",
"self",
".",
"_redis",
".",
"get",
"(",
"job_key_prefix",
... | All information on a job, including its result if it's available, does not wait for the result. | [
"All",
"information",
"on",
"a",
"job",
"including",
"its",
"result",
"if",
"it",
"s",
"available",
"does",
"not",
"wait",
"for",
"the",
"result",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L81-L92 | train | 207,944 |
samuelcolvin/arq | arq/jobs.py | Job.result_info | async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)
if v:
return unpickle_result(v) | python | async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)
if v:
return unpickle_result(v) | [
"async",
"def",
"result_info",
"(",
"self",
")",
"->",
"Optional",
"[",
"JobResult",
"]",
":",
"v",
"=",
"await",
"self",
".",
"_redis",
".",
"get",
"(",
"result_key_prefix",
"+",
"self",
".",
"job_id",
",",
"encoding",
"=",
"None",
")",
"if",
"v",
"... | Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one. | [
"Information",
"about",
"the",
"job",
"result",
"if",
"available",
"does",
"not",
"wait",
"for",
"the",
"result",
".",
"Does",
"not",
"raise",
"an",
"exception",
"even",
"if",
"the",
"job",
"raised",
"one",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L94-L101 | train | 207,945 |
samuelcolvin/arq | arq/jobs.py | Job.status | async def status(self) -> JobStatus:
"""
Status of the job.
"""
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued | python | async def status(self) -> JobStatus:
"""
Status of the job.
"""
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued | [
"async",
"def",
"status",
"(",
"self",
")",
"->",
"JobStatus",
":",
"if",
"await",
"self",
".",
"_redis",
".",
"exists",
"(",
"result_key_prefix",
"+",
"self",
".",
"job_id",
")",
":",
"return",
"JobStatus",
".",
"complete",
"elif",
"await",
"self",
".",... | Status of the job. | [
"Status",
"of",
"the",
"job",
"."
] | 1434646b48c45bd27e392f0162976404e4d8021d | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L103-L115 | train | 207,946 |
hfaran/slack-export-viewer | slackviewer/user.py | User.email | def email(self):
"""
Shortcut property for finding the e-mail address or bot URL.
"""
if "profile" in self._raw:
email = self._raw["profile"].get("email")
elif "bot_url" in self._raw:
email = self._raw["bot_url"]
else:
email = None
if not email:
logging.debug("No email found for %s", self._raw.get("name"))
return email | python | def email(self):
"""
Shortcut property for finding the e-mail address or bot URL.
"""
if "profile" in self._raw:
email = self._raw["profile"].get("email")
elif "bot_url" in self._raw:
email = self._raw["bot_url"]
else:
email = None
if not email:
logging.debug("No email found for %s", self._raw.get("name"))
return email | [
"def",
"email",
"(",
"self",
")",
":",
"if",
"\"profile\"",
"in",
"self",
".",
"_raw",
":",
"email",
"=",
"self",
".",
"_raw",
"[",
"\"profile\"",
"]",
".",
"get",
"(",
"\"email\"",
")",
"elif",
"\"bot_url\"",
"in",
"self",
".",
"_raw",
":",
"email",... | Shortcut property for finding the e-mail address or bot URL. | [
"Shortcut",
"property",
"for",
"finding",
"the",
"e",
"-",
"mail",
"address",
"or",
"bot",
"URL",
"."
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/user.py#L33-L45 | train | 207,947 |
hfaran/slack-export-viewer | slackviewer/user.py | User.image_url | def image_url(self, pixel_size=None):
"""
Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image.
"""
if "profile" not in self._raw:
return
profile = self._raw["profile"]
if (pixel_size):
img_key = "image_%s" % pixel_size
if img_key in profile:
return profile[img_key]
return profile[self._DEFAULT_IMAGE_KEY] | python | def image_url(self, pixel_size=None):
"""
Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image.
"""
if "profile" not in self._raw:
return
profile = self._raw["profile"]
if (pixel_size):
img_key = "image_%s" % pixel_size
if img_key in profile:
return profile[img_key]
return profile[self._DEFAULT_IMAGE_KEY] | [
"def",
"image_url",
"(",
"self",
",",
"pixel_size",
"=",
"None",
")",
":",
"if",
"\"profile\"",
"not",
"in",
"self",
".",
"_raw",
":",
"return",
"profile",
"=",
"self",
".",
"_raw",
"[",
"\"profile\"",
"]",
"if",
"(",
"pixel_size",
")",
":",
"img_key",... | Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image. | [
"Get",
"the",
"URL",
"for",
"the",
"user",
"icon",
"in",
"the",
"desired",
"pixel",
"size",
"if",
"it",
"exists",
".",
"If",
"no",
"size",
"is",
"supplied",
"give",
"the",
"URL",
"for",
"the",
"full",
"-",
"size",
"image",
"."
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/user.py#L47-L59 | train | 207,948 |
hfaran/slack-export-viewer | slackviewer/message.py | LinkAttachment.fields | def fields(self):
"""
Fetch the "fields" list, and process the text within each field, including markdown
processing if the message indicates that the fields contain markdown.
Only present on attachments, not files--this abstraction isn't 100% awesome.'
"""
process_markdown = ("fields" in self._raw.get("mrkdwn_in", []))
fields = self._raw.get("fields", [])
if fields:
logging.debug("Rendering with markdown markdown %s for %s", process_markdown, fields)
return [
{"title": e["title"], "short": e["short"], "value": self._formatter.render_text(e["value"], process_markdown)}
for e in fields
] | python | def fields(self):
"""
Fetch the "fields" list, and process the text within each field, including markdown
processing if the message indicates that the fields contain markdown.
Only present on attachments, not files--this abstraction isn't 100% awesome.'
"""
process_markdown = ("fields" in self._raw.get("mrkdwn_in", []))
fields = self._raw.get("fields", [])
if fields:
logging.debug("Rendering with markdown markdown %s for %s", process_markdown, fields)
return [
{"title": e["title"], "short": e["short"], "value": self._formatter.render_text(e["value"], process_markdown)}
for e in fields
] | [
"def",
"fields",
"(",
"self",
")",
":",
"process_markdown",
"=",
"(",
"\"fields\"",
"in",
"self",
".",
"_raw",
".",
"get",
"(",
"\"mrkdwn_in\"",
",",
"[",
"]",
")",
")",
"fields",
"=",
"self",
".",
"_raw",
".",
"get",
"(",
"\"fields\"",
",",
"[",
"... | Fetch the "fields" list, and process the text within each field, including markdown
processing if the message indicates that the fields contain markdown.
Only present on attachments, not files--this abstraction isn't 100% awesome.' | [
"Fetch",
"the",
"fields",
"list",
"and",
"process",
"the",
"text",
"within",
"each",
"field",
"including",
"markdown",
"processing",
"if",
"the",
"message",
"indicates",
"that",
"the",
"fields",
"contain",
"markdown",
"."
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/message.py#L183-L197 | train | 207,949 |
hfaran/slack-export-viewer | slackviewer/reader.py | Reader.compile_dm_users | def compile_dm_users(self):
"""
Gets the info for the members within the dm
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
id: <id>
users: [<user_id>]
}
"""
dm_data = self._read_from_json("dms.json")
dms = dm_data.values()
all_dms_users = []
for dm in dms:
# checks if messages actually exsist
if dm["id"] not in self._EMPTY_DMS:
# added try catch for users from shared workspaces not in current workspace
try:
dm_members = {"id": dm["id"], "users": [self.__USER_DATA[m] for m in dm["members"]]}
all_dms_users.append(dm_members)
except KeyError:
dm_members = None
return all_dms_users | python | def compile_dm_users(self):
"""
Gets the info for the members within the dm
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
id: <id>
users: [<user_id>]
}
"""
dm_data = self._read_from_json("dms.json")
dms = dm_data.values()
all_dms_users = []
for dm in dms:
# checks if messages actually exsist
if dm["id"] not in self._EMPTY_DMS:
# added try catch for users from shared workspaces not in current workspace
try:
dm_members = {"id": dm["id"], "users": [self.__USER_DATA[m] for m in dm["members"]]}
all_dms_users.append(dm_members)
except KeyError:
dm_members = None
return all_dms_users | [
"def",
"compile_dm_users",
"(",
"self",
")",
":",
"dm_data",
"=",
"self",
".",
"_read_from_json",
"(",
"\"dms.json\"",
")",
"dms",
"=",
"dm_data",
".",
"values",
"(",
")",
"all_dms_users",
"=",
"[",
"]",
"for",
"dm",
"in",
"dms",
":",
"# checks if messages... | Gets the info for the members within the dm
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
id: <id>
users: [<user_id>]
} | [
"Gets",
"the",
"info",
"for",
"the",
"members",
"within",
"the",
"dm"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/reader.py#L51-L79 | train | 207,950 |
hfaran/slack-export-viewer | slackviewer/reader.py | Reader.compile_mpim_users | def compile_mpim_users(self):
"""
Gets the info for the members within the multiple person instant message
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
name: <name>
users: [<user_id>]
}
"""
mpim_data = self._read_from_json("mpims.json")
mpims = [c for c in mpim_data.values()]
all_mpim_users = []
for mpim in mpims:
mpim_members = {"name": mpim["name"], "users": [self.__USER_DATA[m] for m in mpim["members"]]}
all_mpim_users.append(mpim_members)
return all_mpim_users | python | def compile_mpim_users(self):
"""
Gets the info for the members within the multiple person instant message
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
name: <name>
users: [<user_id>]
}
"""
mpim_data = self._read_from_json("mpims.json")
mpims = [c for c in mpim_data.values()]
all_mpim_users = []
for mpim in mpims:
mpim_members = {"name": mpim["name"], "users": [self.__USER_DATA[m] for m in mpim["members"]]}
all_mpim_users.append(mpim_members)
return all_mpim_users | [
"def",
"compile_mpim_users",
"(",
"self",
")",
":",
"mpim_data",
"=",
"self",
".",
"_read_from_json",
"(",
"\"mpims.json\"",
")",
"mpims",
"=",
"[",
"c",
"for",
"c",
"in",
"mpim_data",
".",
"values",
"(",
")",
"]",
"all_mpim_users",
"=",
"[",
"]",
"for",... | Gets the info for the members within the multiple person instant message
Returns a list of all dms with the members that have ever existed
:rtype: [object]
{
name: <name>
users: [<user_id>]
} | [
"Gets",
"the",
"info",
"for",
"the",
"members",
"within",
"the",
"multiple",
"person",
"instant",
"message"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/reader.py#L89-L111 | train | 207,951 |
hfaran/slack-export-viewer | slackviewer/reader.py | Reader._create_messages | def _create_messages(self, names, data, isDms=False):
"""
Creates object of arrays of messages from each json file specified by the names or ids
:param [str] names: names of each group of messages
:param [object] data: array of objects detailing where to get the messages from in
the directory structure
:param bool isDms: boolean value used to tell if the data is dm data so the function can
collect the empty dm directories and store them in memory only
:return: object of arrays of messages
:rtype: object
"""
chats = {}
empty_dms = []
formatter = SlackFormatter(self.__USER_DATA, data)
for name in names:
# gets path to dm directory that holds the json archive
dir_path = os.path.join(self._PATH, name)
messages = []
# array of all days archived
day_files = glob.glob(os.path.join(dir_path, "*.json"))
# this is where it's skipping the empty directories
if not day_files:
if isDms:
empty_dms.append(name)
continue
for day in sorted(day_files):
with io.open(os.path.join(self._PATH, day), encoding="utf8") as f:
# loads all messages
day_messages = json.load(f)
messages.extend([Message(formatter, d) for d in day_messages])
chats[name] = messages
if isDms:
self._EMPTY_DMS = empty_dms
return chats | python | def _create_messages(self, names, data, isDms=False):
"""
Creates object of arrays of messages from each json file specified by the names or ids
:param [str] names: names of each group of messages
:param [object] data: array of objects detailing where to get the messages from in
the directory structure
:param bool isDms: boolean value used to tell if the data is dm data so the function can
collect the empty dm directories and store them in memory only
:return: object of arrays of messages
:rtype: object
"""
chats = {}
empty_dms = []
formatter = SlackFormatter(self.__USER_DATA, data)
for name in names:
# gets path to dm directory that holds the json archive
dir_path = os.path.join(self._PATH, name)
messages = []
# array of all days archived
day_files = glob.glob(os.path.join(dir_path, "*.json"))
# this is where it's skipping the empty directories
if not day_files:
if isDms:
empty_dms.append(name)
continue
for day in sorted(day_files):
with io.open(os.path.join(self._PATH, day), encoding="utf8") as f:
# loads all messages
day_messages = json.load(f)
messages.extend([Message(formatter, d) for d in day_messages])
chats[name] = messages
if isDms:
self._EMPTY_DMS = empty_dms
return chats | [
"def",
"_create_messages",
"(",
"self",
",",
"names",
",",
"data",
",",
"isDms",
"=",
"False",
")",
":",
"chats",
"=",
"{",
"}",
"empty_dms",
"=",
"[",
"]",
"formatter",
"=",
"SlackFormatter",
"(",
"self",
".",
"__USER_DATA",
",",
"data",
")",
"for",
... | Creates object of arrays of messages from each json file specified by the names or ids
:param [str] names: names of each group of messages
:param [object] data: array of objects detailing where to get the messages from in
the directory structure
:param bool isDms: boolean value used to tell if the data is dm data so the function can
collect the empty dm directories and store them in memory only
:return: object of arrays of messages
:rtype: object | [
"Creates",
"object",
"of",
"arrays",
"of",
"messages",
"from",
"each",
"json",
"file",
"specified",
"by",
"the",
"names",
"or",
"ids"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/reader.py#L118-L164 | train | 207,952 |
hfaran/slack-export-viewer | slackviewer/reader.py | Reader._read_from_json | def _read_from_json(self, file):
"""
Reads the file specified from json and creates an object based on the id of each element
:param str file: Path to file of json to read
:return: object of data read from json file
:rtype: object
"""
try:
with io.open(os.path.join(self._PATH, file), encoding="utf8") as f:
return {u["id"]: u for u in json.load(f)}
except IOError:
return {} | python | def _read_from_json(self, file):
"""
Reads the file specified from json and creates an object based on the id of each element
:param str file: Path to file of json to read
:return: object of data read from json file
:rtype: object
"""
try:
with io.open(os.path.join(self._PATH, file), encoding="utf8") as f:
return {u["id"]: u for u in json.load(f)}
except IOError:
return {} | [
"def",
"_read_from_json",
"(",
"self",
",",
"file",
")",
":",
"try",
":",
"with",
"io",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_PATH",
",",
"file",
")",
",",
"encoding",
"=",
"\"utf8\"",
")",
"as",
"f",
":",
"return"... | Reads the file specified from json and creates an object based on the id of each element
:param str file: Path to file of json to read
:return: object of data read from json file
:rtype: object | [
"Reads",
"the",
"file",
"specified",
"from",
"json",
"and",
"creates",
"an",
"object",
"based",
"on",
"the",
"id",
"of",
"each",
"element"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/reader.py#L166-L181 | train | 207,953 |
hfaran/slack-export-viewer | slackviewer/utils/six.py | to_bytes | def to_bytes(s, encoding="utf8"):
"""Converts str s to bytes"""
if PY_VERSION == 2:
b = bytes(s)
elif PY_VERSION == 3:
b = bytes(s, encoding)
else:
raise ValueError("Is Python 4 out already?")
return b | python | def to_bytes(s, encoding="utf8"):
"""Converts str s to bytes"""
if PY_VERSION == 2:
b = bytes(s)
elif PY_VERSION == 3:
b = bytes(s, encoding)
else:
raise ValueError("Is Python 4 out already?")
return b | [
"def",
"to_bytes",
"(",
"s",
",",
"encoding",
"=",
"\"utf8\"",
")",
":",
"if",
"PY_VERSION",
"==",
"2",
":",
"b",
"=",
"bytes",
"(",
"s",
")",
"elif",
"PY_VERSION",
"==",
"3",
":",
"b",
"=",
"bytes",
"(",
"s",
",",
"encoding",
")",
"else",
":",
... | Converts str s to bytes | [
"Converts",
"str",
"s",
"to",
"bytes"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/utils/six.py#L21-L30 | train | 207,954 |
hfaran/slack-export-viewer | slackviewer/archive.py | SHA1_file | def SHA1_file(filepath, extra=b''):
"""
Returns hex digest of SHA1 hash of file at filepath
:param str filepath: File to hash
:param bytes extra: Extra content added to raw read of file before taking hash
:return: hex digest of hash
:rtype: str
"""
h = hashlib.sha1()
with io.open(filepath, 'rb') as f:
for chunk in iter(lambda: f.read(h.block_size), b''):
h.update(chunk)
h.update(extra)
return h.hexdigest() | python | def SHA1_file(filepath, extra=b''):
"""
Returns hex digest of SHA1 hash of file at filepath
:param str filepath: File to hash
:param bytes extra: Extra content added to raw read of file before taking hash
:return: hex digest of hash
:rtype: str
"""
h = hashlib.sha1()
with io.open(filepath, 'rb') as f:
for chunk in iter(lambda: f.read(h.block_size), b''):
h.update(chunk)
h.update(extra)
return h.hexdigest() | [
"def",
"SHA1_file",
"(",
"filepath",
",",
"extra",
"=",
"b''",
")",
":",
"h",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"with",
"io",
".",
"open",
"(",
"filepath",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
... | Returns hex digest of SHA1 hash of file at filepath
:param str filepath: File to hash
:param bytes extra: Extra content added to raw read of file before taking hash
:return: hex digest of hash
:rtype: str | [
"Returns",
"hex",
"digest",
"of",
"SHA1",
"hash",
"of",
"file",
"at",
"filepath"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/archive.py#L14-L31 | train | 207,955 |
hfaran/slack-export-viewer | slackviewer/archive.py | extract_archive | def extract_archive(filepath):
"""
Returns the path of the archive
:param str filepath: Path to file to extract or read
:return: path of the archive
:rtype: str
"""
# Checks if file path is a directory
if os.path.isdir(filepath):
path = os.path.abspath(filepath)
print("Archive already extracted. Viewing from {}...".format(path))
return path
# Checks if the filepath is a zipfile and continues to extract if it is
# if not it raises an error
elif not zipfile.is_zipfile(filepath):
# Misuse of TypeError? :P
raise TypeError("{} is not a zipfile".format(filepath))
archive_sha = SHA1_file(
filepath=filepath,
# Add version of slackviewer to hash as well so we can invalidate the cached copy
# if there are new features added
extra=to_bytes(slackviewer.__version__)
)
extracted_path = os.path.join(SLACKVIEWER_TEMP_PATH, archive_sha)
if os.path.exists(extracted_path):
print("{} already exists".format(extracted_path))
else:
# Extract zip
with zipfile.ZipFile(filepath) as zip:
print("{} extracting to {}...".format(filepath, extracted_path))
zip.extractall(path=extracted_path)
print("{} extracted to {}".format(filepath, extracted_path))
# Add additional file with archive info
create_archive_info(filepath, extracted_path, archive_sha)
return extracted_path | python | def extract_archive(filepath):
"""
Returns the path of the archive
:param str filepath: Path to file to extract or read
:return: path of the archive
:rtype: str
"""
# Checks if file path is a directory
if os.path.isdir(filepath):
path = os.path.abspath(filepath)
print("Archive already extracted. Viewing from {}...".format(path))
return path
# Checks if the filepath is a zipfile and continues to extract if it is
# if not it raises an error
elif not zipfile.is_zipfile(filepath):
# Misuse of TypeError? :P
raise TypeError("{} is not a zipfile".format(filepath))
archive_sha = SHA1_file(
filepath=filepath,
# Add version of slackviewer to hash as well so we can invalidate the cached copy
# if there are new features added
extra=to_bytes(slackviewer.__version__)
)
extracted_path = os.path.join(SLACKVIEWER_TEMP_PATH, archive_sha)
if os.path.exists(extracted_path):
print("{} already exists".format(extracted_path))
else:
# Extract zip
with zipfile.ZipFile(filepath) as zip:
print("{} extracting to {}...".format(filepath, extracted_path))
zip.extractall(path=extracted_path)
print("{} extracted to {}".format(filepath, extracted_path))
# Add additional file with archive info
create_archive_info(filepath, extracted_path, archive_sha)
return extracted_path | [
"def",
"extract_archive",
"(",
"filepath",
")",
":",
"# Checks if file path is a directory",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filepath",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filepath",
")",
"print",
"(",
"\"Archive alre... | Returns the path of the archive
:param str filepath: Path to file to extract or read
:return: path of the archive
:rtype: str | [
"Returns",
"the",
"path",
"of",
"the",
"archive"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/archive.py#L34-L79 | train | 207,956 |
hfaran/slack-export-viewer | slackviewer/archive.py | create_archive_info | def create_archive_info(filepath, extracted_path, archive_sha=None):
"""
Saves archive info to a json file
:param str filepath: Path to directory of archive
:param str extracted_path: Path to directory of archive
:param str archive_sha: SHA string created when archive was extracted from zip
"""
archive_info = {
"sha1": archive_sha,
"filename": os.path.split(filepath)[1],
}
with io.open(
os.path.join(
extracted_path,
".slackviewer_archive_info.json",
), 'w+', encoding="utf-8"
) as f:
s = json.dumps(archive_info, ensure_ascii=False)
s = to_unicode(s)
f.write(s) | python | def create_archive_info(filepath, extracted_path, archive_sha=None):
"""
Saves archive info to a json file
:param str filepath: Path to directory of archive
:param str extracted_path: Path to directory of archive
:param str archive_sha: SHA string created when archive was extracted from zip
"""
archive_info = {
"sha1": archive_sha,
"filename": os.path.split(filepath)[1],
}
with io.open(
os.path.join(
extracted_path,
".slackviewer_archive_info.json",
), 'w+', encoding="utf-8"
) as f:
s = json.dumps(archive_info, ensure_ascii=False)
s = to_unicode(s)
f.write(s) | [
"def",
"create_archive_info",
"(",
"filepath",
",",
"extracted_path",
",",
"archive_sha",
"=",
"None",
")",
":",
"archive_info",
"=",
"{",
"\"sha1\"",
":",
"archive_sha",
",",
"\"filename\"",
":",
"os",
".",
"path",
".",
"split",
"(",
"filepath",
")",
"[",
... | Saves archive info to a json file
:param str filepath: Path to directory of archive
:param str extracted_path: Path to directory of archive
:param str archive_sha: SHA string created when archive was extracted from zip | [
"Saves",
"archive",
"info",
"to",
"a",
"json",
"file"
] | bbe97f5cd9f72a0cc41c7395cef23860b44918f8 | https://github.com/hfaran/slack-export-viewer/blob/bbe97f5cd9f72a0cc41c7395cef23860b44918f8/slackviewer/archive.py#L85-L109 | train | 207,957 |
pazz/alot | alot/db/envelope.py | Envelope.get_all | def get_all(self, key, fallback=None):
"""returns all header values for given key"""
if key in self.headers:
value = self.headers[key]
else:
value = fallback or []
return value | python | def get_all(self, key, fallback=None):
"""returns all header values for given key"""
if key in self.headers:
value = self.headers[key]
else:
value = fallback or []
return value | [
"def",
"get_all",
"(",
"self",
",",
"key",
",",
"fallback",
"=",
"None",
")",
":",
"if",
"key",
"in",
"self",
".",
"headers",
":",
"value",
"=",
"self",
".",
"headers",
"[",
"key",
"]",
"else",
":",
"value",
"=",
"fallback",
"or",
"[",
"]",
"retu... | returns all header values for given key | [
"returns",
"all",
"header",
"values",
"for",
"given",
"key"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/envelope.py#L136-L142 | train | 207,958 |
pazz/alot | alot/db/envelope.py | Envelope.add | def add(self, key, value):
"""add header value"""
if key not in self.headers:
self.headers[key] = []
self.headers[key].append(value)
if self.sent_time:
self.modified_since_sent = True | python | def add(self, key, value):
"""add header value"""
if key not in self.headers:
self.headers[key] = []
self.headers[key].append(value)
if self.sent_time:
self.modified_since_sent = True | [
"def",
"add",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"headers",
":",
"self",
".",
"headers",
"[",
"key",
"]",
"=",
"[",
"]",
"self",
".",
"headers",
"[",
"key",
"]",
".",
"append",
"(",
"value",
... | add header value | [
"add",
"header",
"value"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/envelope.py#L144-L151 | train | 207,959 |
pazz/alot | alot/db/envelope.py | Envelope.attach | def attach(self, attachment, filename=None, ctype=None):
"""
attach a file
:param attachment: File to attach, given as
:class:`~alot.db.attachment.Attachment` object or path to a file.
:type attachment: :class:`~alot.db.attachment.Attachment` or str
:param filename: filename to use in content-disposition.
Will be ignored if `path` matches multiple files
:param ctype: force content-type to be used for this attachment
:type ctype: str
"""
if isinstance(attachment, Attachment):
self.attachments.append(attachment)
elif isinstance(attachment, str):
path = os.path.expanduser(attachment)
part = helper.mimewrap(path, filename, ctype)
self.attachments.append(Attachment(part))
else:
raise TypeError('attach accepts an Attachment or str')
if self.sent_time:
self.modified_since_sent = True | python | def attach(self, attachment, filename=None, ctype=None):
"""
attach a file
:param attachment: File to attach, given as
:class:`~alot.db.attachment.Attachment` object or path to a file.
:type attachment: :class:`~alot.db.attachment.Attachment` or str
:param filename: filename to use in content-disposition.
Will be ignored if `path` matches multiple files
:param ctype: force content-type to be used for this attachment
:type ctype: str
"""
if isinstance(attachment, Attachment):
self.attachments.append(attachment)
elif isinstance(attachment, str):
path = os.path.expanduser(attachment)
part = helper.mimewrap(path, filename, ctype)
self.attachments.append(Attachment(part))
else:
raise TypeError('attach accepts an Attachment or str')
if self.sent_time:
self.modified_since_sent = True | [
"def",
"attach",
"(",
"self",
",",
"attachment",
",",
"filename",
"=",
"None",
",",
"ctype",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"attachment",
",",
"Attachment",
")",
":",
"self",
".",
"attachments",
".",
"append",
"(",
"attachment",
")",
"... | attach a file
:param attachment: File to attach, given as
:class:`~alot.db.attachment.Attachment` object or path to a file.
:type attachment: :class:`~alot.db.attachment.Attachment` or str
:param filename: filename to use in content-disposition.
Will be ignored if `path` matches multiple files
:param ctype: force content-type to be used for this attachment
:type ctype: str | [
"attach",
"a",
"file"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/envelope.py#L153-L176 | train | 207,960 |
pazz/alot | alot/db/envelope.py | Envelope.parse_template | def parse_template(self, tmp, reset=False, only_body=False):
"""parses a template or user edited string to fills this envelope.
:param tmp: the string to parse.
:type tmp: str
:param reset: remove previous envelope content
:type reset: bool
"""
logging.debug('GoT: """\n%s\n"""', tmp)
if self.sent_time:
self.modified_since_sent = True
if only_body:
self.body = tmp
else:
m = re.match(r'(?P<h>([a-zA-Z0-9_-]+:.+\n)*)\n?(?P<b>(\s*.*)*)',
tmp)
assert m
d = m.groupdict()
headertext = d['h']
self.body = d['b']
# remove existing content
if reset:
self.headers = {}
# go through multiline, utf-8 encoded headers
# we decode the edited text ourselves here as
# email.message_from_file can't deal with raw utf8 header values
key = value = None
for line in headertext.splitlines():
if re.match('[a-zA-Z0-9_-]+:', line): # new k/v pair
if key and value: # save old one from stack
self.add(key, value) # save
key, value = line.strip().split(':', 1) # parse new pair
# strip spaces, otherwise we end up having " foo" as value
# of "Subject: foo"
value = value.strip()
elif key and value: # append new line without key prefix
value += line
if key and value: # save last one if present
self.add(key, value)
# interpret 'Attach' pseudo header
if 'Attach' in self:
to_attach = []
for line in self.get_all('Attach'):
gpath = os.path.expanduser(line.strip())
to_attach += [g for g in glob.glob(gpath)
if os.path.isfile(g)]
logging.debug('Attaching: %s', to_attach)
for path in to_attach:
self.attach(path)
del self['Attach'] | python | def parse_template(self, tmp, reset=False, only_body=False):
"""parses a template or user edited string to fills this envelope.
:param tmp: the string to parse.
:type tmp: str
:param reset: remove previous envelope content
:type reset: bool
"""
logging.debug('GoT: """\n%s\n"""', tmp)
if self.sent_time:
self.modified_since_sent = True
if only_body:
self.body = tmp
else:
m = re.match(r'(?P<h>([a-zA-Z0-9_-]+:.+\n)*)\n?(?P<b>(\s*.*)*)',
tmp)
assert m
d = m.groupdict()
headertext = d['h']
self.body = d['b']
# remove existing content
if reset:
self.headers = {}
# go through multiline, utf-8 encoded headers
# we decode the edited text ourselves here as
# email.message_from_file can't deal with raw utf8 header values
key = value = None
for line in headertext.splitlines():
if re.match('[a-zA-Z0-9_-]+:', line): # new k/v pair
if key and value: # save old one from stack
self.add(key, value) # save
key, value = line.strip().split(':', 1) # parse new pair
# strip spaces, otherwise we end up having " foo" as value
# of "Subject: foo"
value = value.strip()
elif key and value: # append new line without key prefix
value += line
if key and value: # save last one if present
self.add(key, value)
# interpret 'Attach' pseudo header
if 'Attach' in self:
to_attach = []
for line in self.get_all('Attach'):
gpath = os.path.expanduser(line.strip())
to_attach += [g for g in glob.glob(gpath)
if os.path.isfile(g)]
logging.debug('Attaching: %s', to_attach)
for path in to_attach:
self.attach(path)
del self['Attach'] | [
"def",
"parse_template",
"(",
"self",
",",
"tmp",
",",
"reset",
"=",
"False",
",",
"only_body",
"=",
"False",
")",
":",
"logging",
".",
"debug",
"(",
"'GoT: \"\"\"\\n%s\\n\"\"\"'",
",",
"tmp",
")",
"if",
"self",
".",
"sent_time",
":",
"self",
".",
"modif... | parses a template or user edited string to fills this envelope.
:param tmp: the string to parse.
:type tmp: str
:param reset: remove previous envelope content
:type reset: bool | [
"parses",
"a",
"template",
"or",
"user",
"edited",
"string",
"to",
"fills",
"this",
"envelope",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/envelope.py#L293-L348 | train | 207,961 |
pazz/alot | alot/helper.py | split_commandline | def split_commandline(s, comments=False, posix=True):
"""
splits semi-colon separated commandlines
"""
# shlex seems to remove unescaped quotes and backslashes
s = s.replace('\\', '\\\\')
s = s.replace('\'', '\\\'')
s = s.replace('\"', '\\\"')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex) | python | def split_commandline(s, comments=False, posix=True):
"""
splits semi-colon separated commandlines
"""
# shlex seems to remove unescaped quotes and backslashes
s = s.replace('\\', '\\\\')
s = s.replace('\'', '\\\'')
s = s.replace('\"', '\\\"')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex) | [
"def",
"split_commandline",
"(",
"s",
",",
"comments",
"=",
"False",
",",
"posix",
"=",
"True",
")",
":",
"# shlex seems to remove unescaped quotes and backslashes",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"s",
"=",
"s",
".",
"r... | splits semi-colon separated commandlines | [
"splits",
"semi",
"-",
"colon",
"separated",
"commandlines"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L29-L42 | train | 207,962 |
pazz/alot | alot/helper.py | string_sanitize | def string_sanitize(string, tab_width=8):
r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
' foobar '
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar'
"""
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines) | python | def string_sanitize(string, tab_width=8):
r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
' foobar '
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar'
"""
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines) | [
"def",
"string_sanitize",
"(",
"string",
",",
"tab_width",
"=",
"8",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"'\\r'",
",",
"''",
")",
"lines",
"=",
"list",
"(",
")",
"for",
"line",
"in",
"string",
".",
"split",
"(",
"'\\n'",
")",
":... | r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
' foobar '
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar' | [
"r",
"strips",
"and",
"replaces",
"non",
"-",
"printable",
"characters"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L55-L92 | train | 207,963 |
pazz/alot | alot/helper.py | string_decode | def string_decode(string, enc='ascii'):
"""
safely decodes string to unicode bytestring, respecting `enc` as a hint.
:param string: the string to decode
:type string: str or unicode
:param enc: a hint what encoding is used in string ('ascii', 'utf-8', ...)
:type enc: str
:returns: the unicode decoded input string
:rtype: unicode
"""
if enc is None:
enc = 'ascii'
try:
string = str(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already str
pass
return string | python | def string_decode(string, enc='ascii'):
"""
safely decodes string to unicode bytestring, respecting `enc` as a hint.
:param string: the string to decode
:type string: str or unicode
:param enc: a hint what encoding is used in string ('ascii', 'utf-8', ...)
:type enc: str
:returns: the unicode decoded input string
:rtype: unicode
"""
if enc is None:
enc = 'ascii'
try:
string = str(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already str
pass
return string | [
"def",
"string_decode",
"(",
"string",
",",
"enc",
"=",
"'ascii'",
")",
":",
"if",
"enc",
"is",
"None",
":",
"enc",
"=",
"'ascii'",
"try",
":",
"string",
"=",
"str",
"(",
"string",
",",
"enc",
",",
"errors",
"=",
"'replace'",
")",
"except",
"LookupEr... | safely decodes string to unicode bytestring, respecting `enc` as a hint.
:param string: the string to decode
:type string: str or unicode
:param enc: a hint what encoding is used in string ('ascii', 'utf-8', ...)
:type enc: str
:returns: the unicode decoded input string
:rtype: unicode | [
"safely",
"decodes",
"string",
"to",
"unicode",
"bytestring",
"respecting",
"enc",
"as",
"a",
"hint",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L95-L116 | train | 207,964 |
pazz/alot | alot/helper.py | shorten | def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis"""
if 1 < maxlen < len(string):
string = string[:maxlen - 1] + u'…'
return string[:maxlen] | python | def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis"""
if 1 < maxlen < len(string):
string = string[:maxlen - 1] + u'…'
return string[:maxlen] | [
"def",
"shorten",
"(",
"string",
",",
"maxlen",
")",
":",
"if",
"1",
"<",
"maxlen",
"<",
"len",
"(",
"string",
")",
":",
"string",
"=",
"string",
"[",
":",
"maxlen",
"-",
"1",
"]",
"+",
"u'…'",
"return",
"string",
"[",
":",
"maxlen",
"]"
] | shortens string if longer than maxlen, appending ellipsis | [
"shortens",
"string",
"if",
"longer",
"than",
"maxlen",
"appending",
"ellipsis"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L119-L123 | train | 207,965 |
pazz/alot | alot/helper.py | call_cmd | def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str, bytes, or None
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
termenc = urwid.util.detected_encoding
if isinstance(stdin, str):
stdin = stdin.encode(termenc)
try:
logging.debug("Calling %s" % cmdlist)
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
else:
out, err = proc.communicate(stdin)
ret = proc.returncode
out = string_decode(out, termenc)
err = string_decode(err, termenc)
return out, err, ret | python | def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str, bytes, or None
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
termenc = urwid.util.detected_encoding
if isinstance(stdin, str):
stdin = stdin.encode(termenc)
try:
logging.debug("Calling %s" % cmdlist)
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
else:
out, err = proc.communicate(stdin)
ret = proc.returncode
out = string_decode(out, termenc)
err = string_decode(err, termenc)
return out, err, ret | [
"def",
"call_cmd",
"(",
"cmdlist",
",",
"stdin",
"=",
"None",
")",
":",
"termenc",
"=",
"urwid",
".",
"util",
".",
"detected_encoding",
"if",
"isinstance",
"(",
"stdin",
",",
"str",
")",
":",
"stdin",
"=",
"stdin",
".",
"encode",
"(",
"termenc",
")",
... | get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str, bytes, or None
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int | [
"get",
"a",
"shell",
"commands",
"output",
"error",
"message",
"and",
"return",
"value",
"and",
"immediately",
"return",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L258-L296 | train | 207,966 |
pazz/alot | alot/helper.py | call_cmd_async | async def call_cmd_async(cmdlist, stdin=None, env=None):
"""Given a command, call that command asynchronously and return the output.
This function only handles `OSError` when creating the subprocess, any
other exceptions raised either durring subprocess creation or while
exchanging data with the subprocess are the caller's responsibility to
handle.
If such an `OSError` is caught, then returncode will be set to 1, and the
error value will be set to the str() method fo the exception.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: Tuple of stdout, stderr, returncode
:rtype: tuple[str, str, int]
"""
termenc = urwid.util.detected_encoding
cmdlist = [s.encode(termenc) for s in cmdlist]
environment = os.environ.copy()
if env is not None:
environment.update(env)
logging.debug('ENV = %s', environment)
logging.debug('CMD = %s', cmdlist)
try:
proc = await asyncio.create_subprocess_exec(
*cmdlist,
env=environment,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE if stdin else None)
except OSError as e:
return ('', str(e), 1)
out, err = await proc.communicate(stdin.encode(termenc) if stdin else None)
return (out.decode(termenc), err.decode(termenc), proc.returncode) | python | async def call_cmd_async(cmdlist, stdin=None, env=None):
"""Given a command, call that command asynchronously and return the output.
This function only handles `OSError` when creating the subprocess, any
other exceptions raised either durring subprocess creation or while
exchanging data with the subprocess are the caller's responsibility to
handle.
If such an `OSError` is caught, then returncode will be set to 1, and the
error value will be set to the str() method fo the exception.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: Tuple of stdout, stderr, returncode
:rtype: tuple[str, str, int]
"""
termenc = urwid.util.detected_encoding
cmdlist = [s.encode(termenc) for s in cmdlist]
environment = os.environ.copy()
if env is not None:
environment.update(env)
logging.debug('ENV = %s', environment)
logging.debug('CMD = %s', cmdlist)
try:
proc = await asyncio.create_subprocess_exec(
*cmdlist,
env=environment,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE if stdin else None)
except OSError as e:
return ('', str(e), 1)
out, err = await proc.communicate(stdin.encode(termenc) if stdin else None)
return (out.decode(termenc), err.decode(termenc), proc.returncode) | [
"async",
"def",
"call_cmd_async",
"(",
"cmdlist",
",",
"stdin",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"termenc",
"=",
"urwid",
".",
"util",
".",
"detected_encoding",
"cmdlist",
"=",
"[",
"s",
".",
"encode",
"(",
"termenc",
")",
"for",
"s",
... | Given a command, call that command asynchronously and return the output.
This function only handles `OSError` when creating the subprocess, any
other exceptions raised either durring subprocess creation or while
exchanging data with the subprocess are the caller's responsibility to
handle.
If such an `OSError` is caught, then returncode will be set to 1, and the
error value will be set to the str() method fo the exception.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: Tuple of stdout, stderr, returncode
:rtype: tuple[str, str, int] | [
"Given",
"a",
"command",
"call",
"that",
"command",
"asynchronously",
"and",
"return",
"the",
"output",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L299-L334 | train | 207,967 |
pazz/alot | alot/helper.py | guess_mimetype | def guess_mimetype(blob):
"""
uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str
"""
mimetype = 'application/octet-stream'
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However, it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
# cf. issue #841
magictype = magic.from_buffer(blob, mime=True) or magictype
else:
raise Exception('Unknown magic API')
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype | python | def guess_mimetype(blob):
"""
uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str
"""
mimetype = 'application/octet-stream'
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However, it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
# cf. issue #841
magictype = magic.from_buffer(blob, mime=True) or magictype
else:
raise Exception('Unknown magic API')
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype | [
"def",
"guess_mimetype",
"(",
"blob",
")",
":",
"mimetype",
"=",
"'application/octet-stream'",
"# this is a bit of a hack to support different versions of python magic.",
"# Hopefully at some point this will no longer be necessary",
"#",
"# the version with open() is the bindings shipped wit... | uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str | [
"uses",
"file",
"magic",
"to",
"determine",
"the",
"mime",
"-",
"type",
"of",
"the",
"given",
"data",
"blob",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L337-L371 | train | 207,968 |
pazz/alot | alot/helper.py | guess_encoding | def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API') | python | def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API') | [
"def",
"guess_encoding",
"(",
"blob",
")",
":",
"# this is a bit of a hack to support different versions of python magic.",
"# Hopefully at some point this will no longer be necessary",
"#",
"# the version with open() is the bindings shipped with the file source from",
"# http://darwinsys.com/fi... | uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str | [
"uses",
"file",
"magic",
"to",
"determine",
"the",
"encoding",
"of",
"the",
"given",
"data",
"blob",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L374-L402 | train | 207,969 |
pazz/alot | alot/helper.py | libmagic_version_at_least | def libmagic_version_at_least(version):
"""
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
"""
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
return magic_wrapper.magic_version >= version | python | def libmagic_version_at_least(version):
"""
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
"""
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
return magic_wrapper.magic_version >= version | [
"def",
"libmagic_version_at_least",
"(",
"version",
")",
":",
"if",
"hasattr",
"(",
"magic",
",",
"'open'",
")",
":",
"magic_wrapper",
"=",
"magic",
".",
"_libraries",
"[",
"'magic'",
"]",
"elif",
"hasattr",
"(",
"magic",
",",
"'from_buffer'",
")",
":",
"m... | checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513 | [
"checks",
"if",
"the",
"libmagic",
"library",
"installed",
"is",
"more",
"recent",
"than",
"a",
"given",
"version",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L416-L436 | train | 207,970 |
pazz/alot | alot/helper.py | mimewrap | def mimewrap(path, filename=None, ctype=None):
"""Take the contents of the given path and wrap them into an email MIME
part according to the content type. The content type is auto detected from
the actual file contents and the file name if it is not given.
:param path: the path to the file contents
:type path: str
:param filename: the file name to use in the generated MIME part
:type filename: str or None
:param ctype: the content type of the file contents in path
:type ctype: str or None
:returns: the message MIME part storing the data from path
:rtype: subclasses of email.mime.base.MIMEBase
"""
with open(path, 'rb') as f:
content = f.read()
if not ctype:
ctype = guess_mimetype(content)
# libmagic < 5.12 incorrectly detects excel/powerpoint files as
# 'application/msword' (see #179 and #186 in libmagic bugtracker)
# This is a workaround, based on file extension, useful as long
# as distributions still ship libmagic 5.11.
if (ctype == 'application/msword' and
not libmagic_version_at_least(513)):
mimetype, _ = mimetypes.guess_type(path)
if mimetype:
ctype = mimetype
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part | python | def mimewrap(path, filename=None, ctype=None):
"""Take the contents of the given path and wrap them into an email MIME
part according to the content type. The content type is auto detected from
the actual file contents and the file name if it is not given.
:param path: the path to the file contents
:type path: str
:param filename: the file name to use in the generated MIME part
:type filename: str or None
:param ctype: the content type of the file contents in path
:type ctype: str or None
:returns: the message MIME part storing the data from path
:rtype: subclasses of email.mime.base.MIMEBase
"""
with open(path, 'rb') as f:
content = f.read()
if not ctype:
ctype = guess_mimetype(content)
# libmagic < 5.12 incorrectly detects excel/powerpoint files as
# 'application/msword' (see #179 and #186 in libmagic bugtracker)
# This is a workaround, based on file extension, useful as long
# as distributions still ship libmagic 5.11.
if (ctype == 'application/msword' and
not libmagic_version_at_least(513)):
mimetype, _ = mimetypes.guess_type(path)
if mimetype:
ctype = mimetype
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part | [
"def",
"mimewrap",
"(",
"path",
",",
"filename",
"=",
"None",
",",
"ctype",
"=",
"None",
")",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"if",
"not",
"ctype",
":",
"ctype",
"=... | Take the contents of the given path and wrap them into an email MIME
part according to the content type. The content type is auto detected from
the actual file contents and the file name if it is not given.
:param path: the path to the file contents
:type path: str
:param filename: the file name to use in the generated MIME part
:type filename: str or None
:param ctype: the content type of the file contents in path
:type ctype: str or None
:returns: the message MIME part storing the data from path
:rtype: subclasses of email.mime.base.MIMEBase | [
"Take",
"the",
"contents",
"of",
"the",
"given",
"path",
"and",
"wrap",
"them",
"into",
"an",
"email",
"MIME",
"part",
"according",
"to",
"the",
"content",
"type",
".",
"The",
"content",
"type",
"is",
"auto",
"detected",
"from",
"the",
"actual",
"file",
... | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L440-L488 | train | 207,971 |
pazz/alot | alot/helper.py | parse_mailcap_nametemplate | def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string"""
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix) | python | def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string"""
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix) | [
"def",
"parse_mailcap_nametemplate",
"(",
"tmplate",
"=",
"'%s'",
")",
":",
"nt_list",
"=",
"tmplate",
".",
"split",
"(",
"'%s'",
")",
"template_prefix",
"=",
"''",
"template_suffix",
"=",
"''",
"if",
"len",
"(",
"nt_list",
")",
"==",
"2",
":",
"template_s... | this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string | [
"this",
"returns",
"a",
"prefix",
"and",
"suffix",
"to",
"be",
"used",
"in",
"the",
"tempfile",
"module",
"for",
"a",
"given",
"mailcap",
"nametemplate",
"string"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L524-L535 | train | 207,972 |
pazz/alot | alot/helper.py | parse_mailto | def parse_mailto(mailto_str):
"""
Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str)
"""
if mailto_str.startswith('mailto:'):
import urllib.parse
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = u''
to = urllib.parse.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.parse.unquote(value)
elif value:
headers[key] = [urllib.parse.unquote(value)]
return (headers, body)
else:
return (None, None) | python | def parse_mailto(mailto_str):
"""
Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str)
"""
if mailto_str.startswith('mailto:'):
import urllib.parse
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = u''
to = urllib.parse.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.parse.unquote(value)
elif value:
headers[key] = [urllib.parse.unquote(value)]
return (headers, body)
else:
return (None, None) | [
"def",
"parse_mailto",
"(",
"mailto_str",
")",
":",
"if",
"mailto_str",
".",
"startswith",
"(",
"'mailto:'",
")",
":",
"import",
"urllib",
".",
"parse",
"to_str",
",",
"parms_str",
"=",
"mailto_str",
"[",
"7",
":",
"]",
".",
"partition",
"(",
"'?'",
")",... | Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str) | [
"Interpret",
"mailto",
"-",
"string"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L538-L567 | train | 207,973 |
pazz/alot | alot/addressbook/__init__.py | AddressBook.lookup | def lookup(self, query=''):
"""looks up all contacts where name or address match query"""
res = []
query = re.compile('.*%s.*' % re.escape(query), self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res | python | def lookup(self, query=''):
"""looks up all contacts where name or address match query"""
res = []
query = re.compile('.*%s.*' % re.escape(query), self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res | [
"def",
"lookup",
"(",
"self",
",",
"query",
"=",
"''",
")",
":",
"res",
"=",
"[",
"]",
"query",
"=",
"re",
".",
"compile",
"(",
"'.*%s.*'",
"%",
"re",
".",
"escape",
"(",
"query",
")",
",",
"self",
".",
"reflags",
")",
"for",
"name",
",",
"emai... | looks up all contacts where name or address match query | [
"looks",
"up",
"all",
"contacts",
"where",
"name",
"or",
"address",
"match",
"query"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/addressbook/__init__.py#L32-L39 | train | 207,974 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.set_focus | def set_focus(self, pos):
"Set the focus in the underlying body widget."
logging.debug('setting focus to %s ', pos)
self.body.set_focus(pos) | python | def set_focus(self, pos):
"Set the focus in the underlying body widget."
logging.debug('setting focus to %s ', pos)
self.body.set_focus(pos) | [
"def",
"set_focus",
"(",
"self",
",",
"pos",
")",
":",
"logging",
".",
"debug",
"(",
"'setting focus to %s '",
",",
"pos",
")",
"self",
".",
"body",
".",
"set_focus",
"(",
"pos",
")"
] | Set the focus in the underlying body widget. | [
"Set",
"the",
"focus",
"in",
"the",
"underlying",
"body",
"widget",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L178-L181 | train | 207,975 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_parent | def focus_parent(self):
"""move focus to parent of currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.parent_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | python | def focus_parent(self):
"""move focus to parent of currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.parent_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | [
"def",
"focus_parent",
"(",
"self",
")",
":",
"mid",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"self",
".",
"_tree",
".",
"parent_position",
"(",
"mid",
")",
"if",
"newpos",
"is",
"not",
"None",
":",
"newpos",
"=",
"self",
".",
"... | move focus to parent of currently focussed message | [
"move",
"focus",
"to",
"parent",
"of",
"currently",
"focussed",
"message"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L199-L205 | train | 207,976 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_first_reply | def focus_first_reply(self):
"""move focus to first reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.first_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | python | def focus_first_reply(self):
"""move focus to first reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.first_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | [
"def",
"focus_first_reply",
"(",
"self",
")",
":",
"mid",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"self",
".",
"_tree",
".",
"first_child_position",
"(",
"mid",
")",
"if",
"newpos",
"is",
"not",
"None",
":",
"newpos",
"=",
"self",
... | move focus to first reply to currently focussed message | [
"move",
"focus",
"to",
"first",
"reply",
"to",
"currently",
"focussed",
"message"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L207-L213 | train | 207,977 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_last_reply | def focus_last_reply(self):
"""move focus to last reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.last_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | python | def focus_last_reply(self):
"""move focus to last reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.last_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | [
"def",
"focus_last_reply",
"(",
"self",
")",
":",
"mid",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"self",
".",
"_tree",
".",
"last_child_position",
"(",
"mid",
")",
"if",
"newpos",
"is",
"not",
"None",
":",
"newpos",
"=",
"self",
... | move focus to last reply to currently focussed message | [
"move",
"focus",
"to",
"last",
"reply",
"to",
"currently",
"focussed",
"message"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L215-L221 | train | 207,978 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_next_sibling | def focus_next_sibling(self):
"""focus next sibling of currently focussed message in thread tree"""
mid = self.get_selected_mid()
newpos = self._tree.next_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | python | def focus_next_sibling(self):
"""focus next sibling of currently focussed message in thread tree"""
mid = self.get_selected_mid()
newpos = self._tree.next_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | [
"def",
"focus_next_sibling",
"(",
"self",
")",
":",
"mid",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"self",
".",
"_tree",
".",
"next_sibling_position",
"(",
"mid",
")",
"if",
"newpos",
"is",
"not",
"None",
":",
"newpos",
"=",
"self"... | focus next sibling of currently focussed message in thread tree | [
"focus",
"next",
"sibling",
"of",
"currently",
"focussed",
"message",
"in",
"thread",
"tree"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L223-L229 | train | 207,979 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_prev_sibling | def focus_prev_sibling(self):
"""
focus previous sibling of currently focussed message in thread tree
"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos) | python | def focus_prev_sibling(self):
"""
focus previous sibling of currently focussed message in thread tree
"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos) | [
"def",
"focus_prev_sibling",
"(",
"self",
")",
":",
"mid",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"localroot",
"=",
"self",
".",
"_sanitize_position",
"(",
"(",
"mid",
",",
")",
")",
"if",
"localroot",
"==",
"self",
".",
"get_focus",
"(",
")",
... | focus previous sibling of currently focussed message in thread tree | [
"focus",
"previous",
"sibling",
"of",
"currently",
"focussed",
"message",
"in",
"thread",
"tree"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L231-L244 | train | 207,980 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_next | def focus_next(self):
"""focus next message in depth first order"""
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | python | def focus_next(self):
"""focus next message in depth first order"""
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos) | [
"def",
"focus_next",
"(",
"self",
")",
":",
"mid",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"self",
".",
"_tree",
".",
"next_position",
"(",
"mid",
")",
"if",
"newpos",
"is",
"not",
"None",
":",
"newpos",
"=",
"self",
".",
"_san... | focus next message in depth first order | [
"focus",
"next",
"message",
"in",
"depth",
"first",
"order"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L246-L252 | train | 207,981 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_prev | def focus_prev(self):
"""focus previous message in depth first order"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos) | python | def focus_prev(self):
"""focus previous message in depth first order"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos) | [
"def",
"focus_prev",
"(",
"self",
")",
":",
"mid",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"localroot",
"=",
"self",
".",
"_sanitize_position",
"(",
"(",
"mid",
",",
")",
")",
"if",
"localroot",
"==",
"self",
".",
"get_focus",
"(",
")",
"[",
... | focus previous message in depth first order | [
"focus",
"previous",
"message",
"in",
"depth",
"first",
"order"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L254-L265 | train | 207,982 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_property | def focus_property(self, prop, direction):
"""does a walk in the given direction and focuses the
first message tree that matches the given property"""
newpos = self.get_selected_mid()
newpos = direction(newpos)
while newpos is not None:
MT = self._tree[newpos]
if prop(MT):
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
break
newpos = direction(newpos) | python | def focus_property(self, prop, direction):
"""does a walk in the given direction and focuses the
first message tree that matches the given property"""
newpos = self.get_selected_mid()
newpos = direction(newpos)
while newpos is not None:
MT = self._tree[newpos]
if prop(MT):
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
break
newpos = direction(newpos) | [
"def",
"focus_property",
"(",
"self",
",",
"prop",
",",
"direction",
")",
":",
"newpos",
"=",
"self",
".",
"get_selected_mid",
"(",
")",
"newpos",
"=",
"direction",
"(",
"newpos",
")",
"while",
"newpos",
"is",
"not",
"None",
":",
"MT",
"=",
"self",
"."... | does a walk in the given direction and focuses the
first message tree that matches the given property | [
"does",
"a",
"walk",
"in",
"the",
"given",
"direction",
"and",
"focuses",
"the",
"first",
"message",
"tree",
"that",
"matches",
"the",
"given",
"property"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L267-L278 | train | 207,983 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_next_matching | def focus_next_matching(self, querystring):
"""focus next matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.next_position) | python | def focus_next_matching(self, querystring):
"""focus next matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.next_position) | [
"def",
"focus_next_matching",
"(",
"self",
",",
"querystring",
")",
":",
"self",
".",
"focus_property",
"(",
"lambda",
"x",
":",
"x",
".",
"_message",
".",
"matches",
"(",
"querystring",
")",
",",
"self",
".",
"_tree",
".",
"next_position",
")"
] | focus next matching message in depth first order | [
"focus",
"next",
"matching",
"message",
"in",
"depth",
"first",
"order"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L280-L283 | train | 207,984 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_prev_matching | def focus_prev_matching(self, querystring):
"""focus previous matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.prev_position) | python | def focus_prev_matching(self, querystring):
"""focus previous matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.prev_position) | [
"def",
"focus_prev_matching",
"(",
"self",
",",
"querystring",
")",
":",
"self",
".",
"focus_property",
"(",
"lambda",
"x",
":",
"x",
".",
"_message",
".",
"matches",
"(",
"querystring",
")",
",",
"self",
".",
"_tree",
".",
"prev_position",
")"
] | focus previous matching message in depth first order | [
"focus",
"previous",
"matching",
"message",
"in",
"depth",
"first",
"order"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L285-L288 | train | 207,985 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_next_unfolded | def focus_next_unfolded(self):
"""focus next unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.next_position) | python | def focus_next_unfolded(self):
"""focus next unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.next_position) | [
"def",
"focus_next_unfolded",
"(",
"self",
")",
":",
"self",
".",
"focus_property",
"(",
"lambda",
"x",
":",
"not",
"x",
".",
"is_collapsed",
"(",
"x",
".",
"root",
")",
",",
"self",
".",
"_tree",
".",
"next_position",
")"
] | focus next unfolded message in depth first order | [
"focus",
"next",
"unfolded",
"message",
"in",
"depth",
"first",
"order"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L290-L293 | train | 207,986 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.focus_prev_unfolded | def focus_prev_unfolded(self):
"""focus previous unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.prev_position) | python | def focus_prev_unfolded(self):
"""focus previous unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.prev_position) | [
"def",
"focus_prev_unfolded",
"(",
"self",
")",
":",
"self",
".",
"focus_property",
"(",
"lambda",
"x",
":",
"not",
"x",
".",
"is_collapsed",
"(",
"x",
".",
"root",
")",
",",
"self",
".",
"_tree",
".",
"prev_position",
")"
] | focus previous unfolded message in depth first order | [
"focus",
"previous",
"unfolded",
"message",
"in",
"depth",
"first",
"order"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L295-L298 | train | 207,987 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.expand | def expand(self, msgpos):
"""expand message at given position"""
MT = self._tree[msgpos]
MT.expand(MT.root) | python | def expand(self, msgpos):
"""expand message at given position"""
MT = self._tree[msgpos]
MT.expand(MT.root) | [
"def",
"expand",
"(",
"self",
",",
"msgpos",
")",
":",
"MT",
"=",
"self",
".",
"_tree",
"[",
"msgpos",
"]",
"MT",
".",
"expand",
"(",
"MT",
".",
"root",
")"
] | expand message at given position | [
"expand",
"message",
"at",
"given",
"position"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L300-L303 | train | 207,988 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.collapse | def collapse(self, msgpos):
"""collapse message at given position"""
MT = self._tree[msgpos]
MT.collapse(MT.root)
self.focus_selected_message() | python | def collapse(self, msgpos):
"""collapse message at given position"""
MT = self._tree[msgpos]
MT.collapse(MT.root)
self.focus_selected_message() | [
"def",
"collapse",
"(",
"self",
",",
"msgpos",
")",
":",
"MT",
"=",
"self",
".",
"_tree",
"[",
"msgpos",
"]",
"MT",
".",
"collapse",
"(",
"MT",
".",
"root",
")",
"self",
".",
"focus_selected_message",
"(",
")"
] | collapse message at given position | [
"collapse",
"message",
"at",
"given",
"position"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L314-L318 | train | 207,989 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.collapse_all | def collapse_all(self):
"""collapse all messages in thread"""
for MT in self.messagetrees():
MT.collapse(MT.root)
self.focus_selected_message() | python | def collapse_all(self):
"""collapse all messages in thread"""
for MT in self.messagetrees():
MT.collapse(MT.root)
self.focus_selected_message() | [
"def",
"collapse_all",
"(",
"self",
")",
":",
"for",
"MT",
"in",
"self",
".",
"messagetrees",
"(",
")",
":",
"MT",
".",
"collapse",
"(",
"MT",
".",
"root",
")",
"self",
".",
"focus_selected_message",
"(",
")"
] | collapse all messages in thread | [
"collapse",
"all",
"messages",
"in",
"thread"
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L320-L324 | train | 207,990 |
pazz/alot | alot/buffers/thread.py | ThreadBuffer.unfold_matching | def unfold_matching(self, querystring, focus_first=True):
"""
expand all messages that match a given querystring.
:param querystring: query to match
:type querystring: str
:param focus_first: set the focus to the first matching message
:type focus_first: bool
"""
first = None
for MT in self.messagetrees():
msg = MT._message
if msg.matches(querystring):
MT.expand(MT.root)
if first is None:
first = (self._tree.position_of_messagetree(MT), MT.root)
self.body.set_focus(first)
else:
MT.collapse(MT.root)
self.body.refresh() | python | def unfold_matching(self, querystring, focus_first=True):
"""
expand all messages that match a given querystring.
:param querystring: query to match
:type querystring: str
:param focus_first: set the focus to the first matching message
:type focus_first: bool
"""
first = None
for MT in self.messagetrees():
msg = MT._message
if msg.matches(querystring):
MT.expand(MT.root)
if first is None:
first = (self._tree.position_of_messagetree(MT), MT.root)
self.body.set_focus(first)
else:
MT.collapse(MT.root)
self.body.refresh() | [
"def",
"unfold_matching",
"(",
"self",
",",
"querystring",
",",
"focus_first",
"=",
"True",
")",
":",
"first",
"=",
"None",
"for",
"MT",
"in",
"self",
".",
"messagetrees",
"(",
")",
":",
"msg",
"=",
"MT",
".",
"_message",
"if",
"msg",
".",
"matches",
... | expand all messages that match a given querystring.
:param querystring: query to match
:type querystring: str
:param focus_first: set the focus to the first matching message
:type focus_first: bool | [
"expand",
"all",
"messages",
"that",
"match",
"a",
"given",
"querystring",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L326-L345 | train | 207,991 |
pazz/alot | alot/widgets/globals.py | TagWidget.__cmp | def __cmp(self, other, comparitor):
"""Shared comparison method."""
if not isinstance(other, TagWidget):
return NotImplemented
self_len = len(self.translated)
oth_len = len(other.translated)
if (self_len == 1) is not (oth_len == 1):
return comparitor(self_len, oth_len)
return comparitor(self.translated.lower(), other.translated.lower()) | python | def __cmp(self, other, comparitor):
"""Shared comparison method."""
if not isinstance(other, TagWidget):
return NotImplemented
self_len = len(self.translated)
oth_len = len(other.translated)
if (self_len == 1) is not (oth_len == 1):
return comparitor(self_len, oth_len)
return comparitor(self.translated.lower(), other.translated.lower()) | [
"def",
"__cmp",
"(",
"self",
",",
"other",
",",
"comparitor",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"TagWidget",
")",
":",
"return",
"NotImplemented",
"self_len",
"=",
"len",
"(",
"self",
".",
"translated",
")",
"oth_len",
"=",
"len",
... | Shared comparison method. | [
"Shared",
"comparison",
"method",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/widgets/globals.py#L320-L330 | train | 207,992 |
pazz/alot | alot/db/utils.py | add_signature_headers | def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpg.results.Signature`
:param error_msg: An error message if there is one, or None
:type error_msg: :class:`str` or `None`
'''
sig_from = ''
sig_known = True
uid_trusted = False
assert error_msg is None or isinstance(error_msg, str)
if not sigs:
error_msg = error_msg or u'no signature found'
elif not error_msg:
try:
key = crypto.get_key(sigs[0].fpr)
for uid in key.uids:
if crypto.check_uid_validity(key, uid.email):
sig_from = uid.uid
uid_trusted = True
break
else:
# No trusted uid found, since we did not break from the loop.
sig_from = key.uids[0].uid
except GPGProblem:
sig_from = sigs[0].fpr
sig_known = False
if error_msg:
msg = 'Invalid: {}'.format(error_msg)
elif uid_trusted:
msg = 'Valid: {}'.format(sig_from)
else:
msg = 'Untrusted: {}'.format(sig_from)
mail.add_header(X_SIGNATURE_VALID_HEADER,
'False' if (error_msg or not sig_known) else 'True')
mail.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) | python | def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpg.results.Signature`
:param error_msg: An error message if there is one, or None
:type error_msg: :class:`str` or `None`
'''
sig_from = ''
sig_known = True
uid_trusted = False
assert error_msg is None or isinstance(error_msg, str)
if not sigs:
error_msg = error_msg or u'no signature found'
elif not error_msg:
try:
key = crypto.get_key(sigs[0].fpr)
for uid in key.uids:
if crypto.check_uid_validity(key, uid.email):
sig_from = uid.uid
uid_trusted = True
break
else:
# No trusted uid found, since we did not break from the loop.
sig_from = key.uids[0].uid
except GPGProblem:
sig_from = sigs[0].fpr
sig_known = False
if error_msg:
msg = 'Invalid: {}'.format(error_msg)
elif uid_trusted:
msg = 'Valid: {}'.format(sig_from)
else:
msg = 'Untrusted: {}'.format(sig_from)
mail.add_header(X_SIGNATURE_VALID_HEADER,
'False' if (error_msg or not sig_known) else 'True')
mail.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) | [
"def",
"add_signature_headers",
"(",
"mail",
",",
"sigs",
",",
"error_msg",
")",
":",
"sig_from",
"=",
"''",
"sig_known",
"=",
"True",
"uid_trusted",
"=",
"False",
"assert",
"error_msg",
"is",
"None",
"or",
"isinstance",
"(",
"error_msg",
",",
"str",
")",
... | Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpg.results.Signature`
:param error_msg: An error message if there is one, or None
:type error_msg: :class:`str` or `None` | [
"Add",
"pseudo",
"headers",
"to",
"the",
"mail",
"indicating",
"whether",
"the",
"signature",
"verification",
"was",
"successful",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L38-L79 | train | 207,993 |
pazz/alot | alot/db/utils.py | get_params | def get_params(mail, failobj=None, header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
failobj = failobj or []
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)} | python | def get_params(mail, failobj=None, header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
failobj = failobj or []
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)} | [
"def",
"get_params",
"(",
"mail",
",",
"failobj",
"=",
"None",
",",
"header",
"=",
"'content-type'",
",",
"unquote",
"=",
"True",
")",
":",
"failobj",
"=",
"failobj",
"or",
"[",
"]",
"return",
"{",
"k",
".",
"lower",
"(",
")",
":",
"v",
"for",
"k",... | Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters | [
"Get",
"Content",
"-",
"Type",
"parameters",
"as",
"dict",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L82-L95 | train | 207,994 |
pazz/alot | alot/db/utils.py | _handle_signatures | def _handle_signatures(original, message, params):
"""Shared code for handling message signatures.
RFC 3156 is quite strict:
* exactly two messages
* the second is of type 'application/pgp-signature'
* the second contains the detached signature
:param original: The original top-level mail. This is required to attache
special headers to
:type original: :class:`email.message.Message`
:param message: The multipart/signed payload to verify
:type message: :class:`email.message.Message`
:param params: the message parameters as returned by :func:`get_params`
:type params: dict[str, str]
"""
malformed = None
if len(message.get_payload()) != 2:
malformed = u'expected exactly two messages, got {0}'.format(
len(message.get_payload()))
else:
ct = message.get_payload(1).get_content_type()
if ct != _APP_PGP_SIG:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
_APP_PGP_SIG, ct)
# TODO: RFC 3156 says the alg has to be lower case, but I've seen a message
# with 'PGP-'. maybe we should be more permissive here, or maybe not, this
# is crypto stuff...
if not params.get('micalg', 'nothing').startswith('pgp-'):
malformed = u'expected micalg=pgp-..., got: {0}'.format(
params.get('micalg', 'nothing'))
sigs = []
if not malformed:
try:
sigs = crypto.verify_detached(
message.get_payload(0).as_bytes(policy=email.policy.SMTP),
message.get_payload(1).get_payload(decode=True))
except GPGProblem as e:
malformed = str(e)
add_signature_headers(original, sigs, malformed) | python | def _handle_signatures(original, message, params):
"""Shared code for handling message signatures.
RFC 3156 is quite strict:
* exactly two messages
* the second is of type 'application/pgp-signature'
* the second contains the detached signature
:param original: The original top-level mail. This is required to attache
special headers to
:type original: :class:`email.message.Message`
:param message: The multipart/signed payload to verify
:type message: :class:`email.message.Message`
:param params: the message parameters as returned by :func:`get_params`
:type params: dict[str, str]
"""
malformed = None
if len(message.get_payload()) != 2:
malformed = u'expected exactly two messages, got {0}'.format(
len(message.get_payload()))
else:
ct = message.get_payload(1).get_content_type()
if ct != _APP_PGP_SIG:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
_APP_PGP_SIG, ct)
# TODO: RFC 3156 says the alg has to be lower case, but I've seen a message
# with 'PGP-'. maybe we should be more permissive here, or maybe not, this
# is crypto stuff...
if not params.get('micalg', 'nothing').startswith('pgp-'):
malformed = u'expected micalg=pgp-..., got: {0}'.format(
params.get('micalg', 'nothing'))
sigs = []
if not malformed:
try:
sigs = crypto.verify_detached(
message.get_payload(0).as_bytes(policy=email.policy.SMTP),
message.get_payload(1).get_payload(decode=True))
except GPGProblem as e:
malformed = str(e)
add_signature_headers(original, sigs, malformed) | [
"def",
"_handle_signatures",
"(",
"original",
",",
"message",
",",
"params",
")",
":",
"malformed",
"=",
"None",
"if",
"len",
"(",
"message",
".",
"get_payload",
"(",
")",
")",
"!=",
"2",
":",
"malformed",
"=",
"u'expected exactly two messages, got {0}'",
".",... | Shared code for handling message signatures.
RFC 3156 is quite strict:
* exactly two messages
* the second is of type 'application/pgp-signature'
* the second contains the detached signature
:param original: The original top-level mail. This is required to attache
special headers to
:type original: :class:`email.message.Message`
:param message: The multipart/signed payload to verify
:type message: :class:`email.message.Message`
:param params: the message parameters as returned by :func:`get_params`
:type params: dict[str, str] | [
"Shared",
"code",
"for",
"handling",
"message",
"signatures",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L98-L140 | train | 207,995 |
pazz/alot | alot/db/utils.py | _handle_encrypted | def _handle_encrypted(original, message, session_keys=None):
"""Handle encrypted messages helper.
RFC 3156 is quite strict:
* exactly two messages
* the first is of type 'application/pgp-encrypted'
* the first contains 'Version: 1'
* the second is of type 'application/octet-stream'
* the second contains the encrypted and possibly signed data
:param original: The original top-level mail. This is required to attache
special headers to
:type original: :class:`email.message.Message`
:param message: The multipart/signed payload to verify
:type message: :class:`email.message.Message`
:param session_keys: a list OpenPGP session keys
:type session_keys: [str]
"""
malformed = False
ct = message.get_payload(0).get_content_type()
if ct != _APP_PGP_ENC:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
_APP_PGP_ENC, ct)
want = 'application/octet-stream'
ct = message.get_payload(1).get_content_type()
if ct != want:
malformed = u'expected Content-Type: {0}, got: {1}'.format(want, ct)
if not malformed:
# This should be safe because PGP uses US-ASCII characters only
payload = message.get_payload(1).get_payload().encode('ascii')
try:
sigs, d = crypto.decrypt_verify(payload, session_keys)
except GPGProblem as e:
# signature verification failures end up here too if the combined
# method is used, currently this prevents the interpretation of the
# recovered plain text mail. maybe that's a feature.
malformed = str(e)
else:
n = decrypted_message_from_bytes(d, session_keys)
# add the decrypted message to message. note that n contains all
# the attachments, no need to walk over n here.
original.attach(n)
original.defects.extend(n.defects)
# there are two methods for both signed and encrypted data, one is
# called 'RFC 1847 Encapsulation' by RFC 3156, and one is the
# 'Combined method'.
if not sigs:
# 'RFC 1847 Encapsulation', the signature is a detached
# signature found in the recovered mime message of type
# multipart/signed.
if X_SIGNATURE_VALID_HEADER in n:
for k in (X_SIGNATURE_VALID_HEADER,
X_SIGNATURE_MESSAGE_HEADER):
original[k] = n[k]
else:
# 'Combined method', the signatures are returned by the
# decrypt_verify function.
# note that if we reached this point, we know the signatures
# are valid. if they were not valid, the else block of the
# current try would not have been executed
add_signature_headers(original, sigs, '')
if malformed:
msg = u'Malformed OpenPGP message: {0}'.format(malformed)
content = email.message_from_string(msg, policy=email.policy.SMTP)
content.set_charset('utf-8')
original.attach(content) | python | def _handle_encrypted(original, message, session_keys=None):
"""Handle encrypted messages helper.
RFC 3156 is quite strict:
* exactly two messages
* the first is of type 'application/pgp-encrypted'
* the first contains 'Version: 1'
* the second is of type 'application/octet-stream'
* the second contains the encrypted and possibly signed data
:param original: The original top-level mail. This is required to attache
special headers to
:type original: :class:`email.message.Message`
:param message: The multipart/signed payload to verify
:type message: :class:`email.message.Message`
:param session_keys: a list OpenPGP session keys
:type session_keys: [str]
"""
malformed = False
ct = message.get_payload(0).get_content_type()
if ct != _APP_PGP_ENC:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
_APP_PGP_ENC, ct)
want = 'application/octet-stream'
ct = message.get_payload(1).get_content_type()
if ct != want:
malformed = u'expected Content-Type: {0}, got: {1}'.format(want, ct)
if not malformed:
# This should be safe because PGP uses US-ASCII characters only
payload = message.get_payload(1).get_payload().encode('ascii')
try:
sigs, d = crypto.decrypt_verify(payload, session_keys)
except GPGProblem as e:
# signature verification failures end up here too if the combined
# method is used, currently this prevents the interpretation of the
# recovered plain text mail. maybe that's a feature.
malformed = str(e)
else:
n = decrypted_message_from_bytes(d, session_keys)
# add the decrypted message to message. note that n contains all
# the attachments, no need to walk over n here.
original.attach(n)
original.defects.extend(n.defects)
# there are two methods for both signed and encrypted data, one is
# called 'RFC 1847 Encapsulation' by RFC 3156, and one is the
# 'Combined method'.
if not sigs:
# 'RFC 1847 Encapsulation', the signature is a detached
# signature found in the recovered mime message of type
# multipart/signed.
if X_SIGNATURE_VALID_HEADER in n:
for k in (X_SIGNATURE_VALID_HEADER,
X_SIGNATURE_MESSAGE_HEADER):
original[k] = n[k]
else:
# 'Combined method', the signatures are returned by the
# decrypt_verify function.
# note that if we reached this point, we know the signatures
# are valid. if they were not valid, the else block of the
# current try would not have been executed
add_signature_headers(original, sigs, '')
if malformed:
msg = u'Malformed OpenPGP message: {0}'.format(malformed)
content = email.message_from_string(msg, policy=email.policy.SMTP)
content.set_charset('utf-8')
original.attach(content) | [
"def",
"_handle_encrypted",
"(",
"original",
",",
"message",
",",
"session_keys",
"=",
"None",
")",
":",
"malformed",
"=",
"False",
"ct",
"=",
"message",
".",
"get_payload",
"(",
"0",
")",
".",
"get_content_type",
"(",
")",
"if",
"ct",
"!=",
"_APP_PGP_ENC"... | Handle encrypted messages helper.
RFC 3156 is quite strict:
* exactly two messages
* the first is of type 'application/pgp-encrypted'
* the first contains 'Version: 1'
* the second is of type 'application/octet-stream'
* the second contains the encrypted and possibly signed data
:param original: The original top-level mail. This is required to attache
special headers to
:type original: :class:`email.message.Message`
:param message: The multipart/signed payload to verify
:type message: :class:`email.message.Message`
:param session_keys: a list OpenPGP session keys
:type session_keys: [str] | [
"Handle",
"encrypted",
"messages",
"helper",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L143-L216 | train | 207,996 |
pazz/alot | alot/db/utils.py | decrypted_message_from_message | def decrypted_message_from_message(m, session_keys=None):
'''Detect and decrypt OpenPGP encrypted data in an email object. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param m: an email object
:param session_keys: a list OpenPGP session keys
:returns: :class:`email.message.Message` possibly augmented with
decrypted data
'''
# make sure no one smuggles a token in (data from m is untrusted)
del m[X_SIGNATURE_VALID_HEADER]
del m[X_SIGNATURE_MESSAGE_HEADER]
if m.is_multipart():
p = get_params(m)
# handle OpenPGP signed data
if (m.get_content_subtype() == 'signed' and
p.get('protocol') == _APP_PGP_SIG):
_handle_signatures(m, m, p)
# handle OpenPGP encrypted data
elif (m.get_content_subtype() == 'encrypted' and
p.get('protocol') == _APP_PGP_ENC and
'Version: 1' in m.get_payload(0).get_payload()):
_handle_encrypted(m, m, session_keys)
# It is also possible to put either of the abov into a multipart/mixed
# segment
elif m.get_content_subtype() == 'mixed':
sub = m.get_payload(0)
if sub.is_multipart():
p = get_params(sub)
if (sub.get_content_subtype() == 'signed' and
p.get('protocol') == _APP_PGP_SIG):
_handle_signatures(m, sub, p)
elif (sub.get_content_subtype() == 'encrypted' and
p.get('protocol') == _APP_PGP_ENC):
_handle_encrypted(m, sub, session_keys)
return m | python | def decrypted_message_from_message(m, session_keys=None):
'''Detect and decrypt OpenPGP encrypted data in an email object. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param m: an email object
:param session_keys: a list OpenPGP session keys
:returns: :class:`email.message.Message` possibly augmented with
decrypted data
'''
# make sure no one smuggles a token in (data from m is untrusted)
del m[X_SIGNATURE_VALID_HEADER]
del m[X_SIGNATURE_MESSAGE_HEADER]
if m.is_multipart():
p = get_params(m)
# handle OpenPGP signed data
if (m.get_content_subtype() == 'signed' and
p.get('protocol') == _APP_PGP_SIG):
_handle_signatures(m, m, p)
# handle OpenPGP encrypted data
elif (m.get_content_subtype() == 'encrypted' and
p.get('protocol') == _APP_PGP_ENC and
'Version: 1' in m.get_payload(0).get_payload()):
_handle_encrypted(m, m, session_keys)
# It is also possible to put either of the abov into a multipart/mixed
# segment
elif m.get_content_subtype() == 'mixed':
sub = m.get_payload(0)
if sub.is_multipart():
p = get_params(sub)
if (sub.get_content_subtype() == 'signed' and
p.get('protocol') == _APP_PGP_SIG):
_handle_signatures(m, sub, p)
elif (sub.get_content_subtype() == 'encrypted' and
p.get('protocol') == _APP_PGP_ENC):
_handle_encrypted(m, sub, session_keys)
return m | [
"def",
"decrypted_message_from_message",
"(",
"m",
",",
"session_keys",
"=",
"None",
")",
":",
"# make sure no one smuggles a token in (data from m is untrusted)",
"del",
"m",
"[",
"X_SIGNATURE_VALID_HEADER",
"]",
"del",
"m",
"[",
"X_SIGNATURE_MESSAGE_HEADER",
"]",
"if",
... | Detect and decrypt OpenPGP encrypted data in an email object. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param m: an email object
:param session_keys: a list OpenPGP session keys
:returns: :class:`email.message.Message` possibly augmented with
decrypted data | [
"Detect",
"and",
"decrypt",
"OpenPGP",
"encrypted",
"data",
"in",
"an",
"email",
"object",
".",
"If",
"this",
"succeeds",
"any",
"mime",
"messages",
"found",
"in",
"the",
"recovered",
"plaintext",
"message",
"are",
"added",
"to",
"the",
"returned",
"message",
... | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L235-L278 | train | 207,997 |
pazz/alot | alot/db/utils.py | decrypted_message_from_bytes | def decrypted_message_from_bytes(bytestring, session_keys=None):
"""Create a Message from bytes.
:param bytes bytestring: an email message as raw bytes
:param session_keys: a list OpenPGP session keys
"""
return decrypted_message_from_message(
email.message_from_bytes(bytestring, policy=email.policy.SMTP),
session_keys) | python | def decrypted_message_from_bytes(bytestring, session_keys=None):
"""Create a Message from bytes.
:param bytes bytestring: an email message as raw bytes
:param session_keys: a list OpenPGP session keys
"""
return decrypted_message_from_message(
email.message_from_bytes(bytestring, policy=email.policy.SMTP),
session_keys) | [
"def",
"decrypted_message_from_bytes",
"(",
"bytestring",
",",
"session_keys",
"=",
"None",
")",
":",
"return",
"decrypted_message_from_message",
"(",
"email",
".",
"message_from_bytes",
"(",
"bytestring",
",",
"policy",
"=",
"email",
".",
"policy",
".",
"SMTP",
"... | Create a Message from bytes.
:param bytes bytestring: an email message as raw bytes
:param session_keys: a list OpenPGP session keys | [
"Create",
"a",
"Message",
"from",
"bytes",
"."
] | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L294-L302 | train | 207,998 |
pazz/alot | alot/db/utils.py | render_part | def render_part(part, field_key='copiousoutput'):
"""
renders a non-multipart email part into displayable plaintext by piping its
payload through an external script. The handler itself is determined by
the mailcap entry for this part's ctype.
"""
ctype = part.get_content_type()
raw_payload = remove_cte(part)
rendered_payload = None
# get mime handler
_, entry = settings.mailcap_find_match(ctype, key=field_key)
if entry is not None:
tempfile_name = None
stdin = None
handler_raw_commandstring = entry['view']
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
# open tempfile, respect mailcaps nametemplate
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
with tempfile.NamedTemporaryFile(
delete=False, prefix=prefix, suffix=suffix) \
as tmpfile:
tmpfile.write(raw_payload)
tempfile_name = tmpfile.name
else:
stdin = raw_payload
# read parameter, create handler command
parms = tuple('='.join(p) for p in part.get_params())
# create and call external command
cmd = mailcap.subst(entry['view'], ctype,
filename=tempfile_name, plist=parms)
logging.debug('command: %s', cmd)
logging.debug('parms: %s', str(parms))
cmdlist = split_commandstring(cmd)
# call handler
stdout, _, _ = helper.call_cmd(cmdlist, stdin=stdin)
if stdout:
rendered_payload = stdout
# remove tempfile
if tempfile_name:
os.unlink(tempfile_name)
return rendered_payload | python | def render_part(part, field_key='copiousoutput'):
"""
renders a non-multipart email part into displayable plaintext by piping its
payload through an external script. The handler itself is determined by
the mailcap entry for this part's ctype.
"""
ctype = part.get_content_type()
raw_payload = remove_cte(part)
rendered_payload = None
# get mime handler
_, entry = settings.mailcap_find_match(ctype, key=field_key)
if entry is not None:
tempfile_name = None
stdin = None
handler_raw_commandstring = entry['view']
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
# open tempfile, respect mailcaps nametemplate
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
with tempfile.NamedTemporaryFile(
delete=False, prefix=prefix, suffix=suffix) \
as tmpfile:
tmpfile.write(raw_payload)
tempfile_name = tmpfile.name
else:
stdin = raw_payload
# read parameter, create handler command
parms = tuple('='.join(p) for p in part.get_params())
# create and call external command
cmd = mailcap.subst(entry['view'], ctype,
filename=tempfile_name, plist=parms)
logging.debug('command: %s', cmd)
logging.debug('parms: %s', str(parms))
cmdlist = split_commandstring(cmd)
# call handler
stdout, _, _ = helper.call_cmd(cmdlist, stdin=stdin)
if stdout:
rendered_payload = stdout
# remove tempfile
if tempfile_name:
os.unlink(tempfile_name)
return rendered_payload | [
"def",
"render_part",
"(",
"part",
",",
"field_key",
"=",
"'copiousoutput'",
")",
":",
"ctype",
"=",
"part",
".",
"get_content_type",
"(",
")",
"raw_payload",
"=",
"remove_cte",
"(",
"part",
")",
"rendered_payload",
"=",
"None",
"# get mime handler",
"_",
",",... | renders a non-multipart email part into displayable plaintext by piping its
payload through an external script. The handler itself is determined by
the mailcap entry for this part's ctype. | [
"renders",
"a",
"non",
"-",
"multipart",
"email",
"part",
"into",
"displayable",
"plaintext",
"by",
"piping",
"its",
"payload",
"through",
"an",
"external",
"script",
".",
"The",
"handler",
"itself",
"is",
"determined",
"by",
"the",
"mailcap",
"entry",
"for",
... | d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/db/utils.py#L327-L374 | train | 207,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.