Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line after this snippet: <|code_start|>logger = logging.getLogger("dnf")
class MultilineHelpFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if '\n' in text:
return text.splitlines()
return super(MultilineHelpFormatter, self)._split_lines(text, width)
class OptionParser(argparse.ArgumentParser):
"""ArgumentParser like class to do things the "yum way"."""
def __init__(self, reset_usage=True):
super(OptionParser, self).__init__(add_help=False,
formatter_class=MultilineHelpFormatter)
self.command_positional_parser = None
self.command_group = None
self._add_general_options()
if reset_usage:
self._cmd_usage = {} # names, summary for dnf commands, to build usage
self._cmd_groups = set() # cmd groups added (main, plugin)
def error(self, msg):
"""Output an error message, and exit the program.
This method overrides standard argparser's error
so that error output goes to the logger.
:param msg: the error message to output
"""
self.print_usage()
<|code_end|>
using the current file's imports:
from dnf.i18n import _
from dnf.util import _parse_specs
import argparse
import dnf.exceptions
import dnf.util
import dnf.rpm
import dnf.yum.misc
import logging
import os.path
import re
import sys
and any relevant context from other files:
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
#
# Path: dnf/util.py
# def _parse_specs(namespace, values):
# """
# Categorize :param values list into packages, groups and filenames
#
# :param namespace: argparse.Namespace, where specs will be stored
# :param values: list of specs, whether packages ('foo') or groups/modules ('@bar')
# or filenames ('*.rmp', 'http://*', ...)
#
# To access packages use: specs.pkg_specs,
# to access groups use: specs.grp_specs,
# to access filenames use: specs.filenames
# """
#
# setattr(namespace, "filenames", [])
# setattr(namespace, "grp_specs", [])
# setattr(namespace, "pkg_specs", [])
# tmp_set = set()
# for value in values:
# if value in tmp_set:
# continue
# tmp_set.add(value)
# schemes = dnf.pycomp.urlparse.urlparse(value)[0]
# if value.endswith('.rpm'):
# namespace.filenames.append(value)
# elif schemes and schemes in ('http', 'ftp', 'file', 'https'):
# namespace.filenames.append(value)
# elif value.startswith('@'):
# namespace.grp_specs.append(value[1:])
# else:
# namespace.pkg_specs.append(value)
. Output only the next line. | logger.critical(_("Command line error: %s"), msg) |
Continue the code snippet: <|code_start|> and repo_<setopts>."""
def __call__(self, parser, namespace, values, opt_str):
vals = values.split('=')
if len(vals) > 2:
logger.warning(_("Setopt argument has multiple values: %s"), values)
return
if len(vals) < 2:
logger.warning(_("Setopt argument has no value: %s"), values)
return
k, v = vals
period = k.rfind('.')
if period != -1:
repo = k[:period]
k = k[period+1:]
if hasattr(namespace, 'repo_setopts'):
repoopts = namespace.repo_setopts
else:
repoopts = {}
repoopts.setdefault(repo, {}).setdefault(k, []).append(v)
setattr(namespace, 'repo_' + self.dest, repoopts)
else:
if hasattr(namespace, 'main_setopts'):
mainopts = namespace.main_setopts
else:
mainopts = {}
mainopts.setdefault(k, []).append(v)
setattr(namespace, 'main_' + self.dest, mainopts)
class ParseSpecGroupFileCallback(argparse.Action):
def __call__(self, parser, namespace, values, opt_str):
<|code_end|>
. Use current file imports:
from dnf.i18n import _
from dnf.util import _parse_specs
import argparse
import dnf.exceptions
import dnf.util
import dnf.rpm
import dnf.yum.misc
import logging
import os.path
import re
import sys
and context (classes, functions, or code) from other files:
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
#
# Path: dnf/util.py
# def _parse_specs(namespace, values):
# """
# Categorize :param values list into packages, groups and filenames
#
# :param namespace: argparse.Namespace, where specs will be stored
# :param values: list of specs, whether packages ('foo') or groups/modules ('@bar')
# or filenames ('*.rmp', 'http://*', ...)
#
# To access packages use: specs.pkg_specs,
# to access groups use: specs.grp_specs,
# to access filenames use: specs.filenames
# """
#
# setattr(namespace, "filenames", [])
# setattr(namespace, "grp_specs", [])
# setattr(namespace, "pkg_specs", [])
# tmp_set = set()
# for value in values:
# if value in tmp_set:
# continue
# tmp_set.add(value)
# schemes = dnf.pycomp.urlparse.urlparse(value)[0]
# if value.endswith('.rpm'):
# namespace.filenames.append(value)
# elif schemes and schemes in ('http', 'ftp', 'file', 'https'):
# namespace.filenames.append(value)
# elif value.startswith('@'):
# namespace.grp_specs.append(value[1:])
# else:
# namespace.pkg_specs.append(value)
. Output only the next line. | _parse_specs(namespace, values) |
Continue the code snippet: <|code_start|> if libdnf.conf.ConfigParser.substitute(sect, substitutions) == section_id:
section_id = sect
for name, value in modify.items():
if isinstance(value, list):
value = ' '.join(value)
parser.setValue(section_id, name, value)
parser.write(filename, False)
class MainConf(BaseConfig):
# :api
"""Configuration option definitions for dnf.conf's [main] section."""
def __init__(self, section='main', parser=None):
# pylint: disable=R0915
config = libdnf.conf.ConfigMain()
super(MainConf, self).__init__(config, section, parser)
self._set_value('pluginpath', [dnf.const.PLUGINPATH], PRIO_DEFAULT)
self._set_value('pluginconfpath', [dnf.const.PLUGINCONFPATH], PRIO_DEFAULT)
self.substitutions = dnf.conf.substitutions.Substitutions()
self.arch = hawkey.detect_arch()
self._config.system_cachedir().set(PRIO_DEFAULT, dnf.const.SYSTEM_CACHEDIR)
# setup different cache and log for non-privileged users
if dnf.util.am_i_root():
cachedir = dnf.const.SYSTEM_CACHEDIR
logdir = '/var/log'
else:
try:
<|code_end|>
. Use current file imports:
from dnf.yum import misc
from dnf.i18n import ucd, _
from dnf.pycomp import basestring, urlparse
import fnmatch
import dnf.conf.substitutions
import dnf.const
import dnf.exceptions
import dnf.pycomp
import dnf.util
import hawkey
import logging
import os
import libdnf.conf
import libdnf.repo
import tempfile
and context (classes, functions, or code) from other files:
# Path: dnf/yum/misc.py
# def re_glob(s):
# def re_full_search_needed(s):
# def get_default_chksum_type():
# def __init__(self, iter=None):
# def __iter__(self):
# def __getitem__(self, item):
# def all_lists(self):
# def merge_lists(self, other):
# def procgpgkey(rawkey):
# def keyInstalled(ts, keyid, timestamp):
# def import_key_to_pubring(rawkey, keyid, gpgdir=None, make_ro_copy=True):
# def getCacheDir():
# def seq_max_split(seq, max_entries):
# def unlink_f(filename):
# def stat_f(filename, ignore_EACCES=False):
# def _getloginuid():
# def getloginuid():
# def decompress(filename, dest=None, check_timestamps=False):
# def read_in_items_from_dot_dir(thisglob, line_as_list=True):
# class GenericHolder(object):
#
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
. Output only the next line. | cachedir = logdir = misc.getCacheDir() |
Here is a snippet: <|code_start|>PRIO_PLUGINDEFAULT = libdnf.conf.Option.Priority_PLUGINDEFAULT
PRIO_PLUGINCONFIG = libdnf.conf.Option.Priority_PLUGINCONFIG
PRIO_COMMANDLINE = libdnf.conf.Option.Priority_COMMANDLINE
PRIO_RUNTIME = libdnf.conf.Option.Priority_RUNTIME
logger = logging.getLogger('dnf')
class BaseConfig(object):
"""Base class for storing configuration definitions.
Subclass when creating your own definitions.
"""
def __init__(self, config=None, section=None, parser=None):
self.__dict__["_config"] = config
self._section = section
def __getattr__(self, name):
if "_config" not in self.__dict__:
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__, name))
option = getattr(self._config, name)
if option is None:
return None
try:
value = option().getValue()
except Exception as ex:
return None
if isinstance(value, str):
<|code_end|>
. Write the next line using the current file imports:
from dnf.yum import misc
from dnf.i18n import ucd, _
from dnf.pycomp import basestring, urlparse
import fnmatch
import dnf.conf.substitutions
import dnf.const
import dnf.exceptions
import dnf.pycomp
import dnf.util
import hawkey
import logging
import os
import libdnf.conf
import libdnf.repo
import tempfile
and context from other files:
# Path: dnf/yum/misc.py
# def re_glob(s):
# def re_full_search_needed(s):
# def get_default_chksum_type():
# def __init__(self, iter=None):
# def __iter__(self):
# def __getitem__(self, item):
# def all_lists(self):
# def merge_lists(self, other):
# def procgpgkey(rawkey):
# def keyInstalled(ts, keyid, timestamp):
# def import_key_to_pubring(rawkey, keyid, gpgdir=None, make_ro_copy=True):
# def getCacheDir():
# def seq_max_split(seq, max_entries):
# def unlink_f(filename):
# def stat_f(filename, ignore_EACCES=False):
# def _getloginuid():
# def getloginuid():
# def decompress(filename, dest=None, check_timestamps=False):
# def read_in_items_from_dot_dir(thisglob, line_as_list=True):
# class GenericHolder(object):
#
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
, which may include functions, classes, or code. Output only the next line. | return ucd(value) |
Using the snippet: <|code_start|>
def _get_priority(self, name):
method = getattr(self._config, name, None)
if method is None:
return None
return method().getPriority()
def _set_value(self, name, value, priority=PRIO_RUNTIME):
"""Set option's value if priority is equal or higher
than current priority."""
method = getattr(self._config, name, None)
if method is None:
raise Exception("Option \"" + name + "\" does not exists")
option = method()
if value is None:
try:
option.set(priority, value)
except Exception:
pass
else:
try:
if isinstance(value, list) or isinstance(value, tuple):
option.set(priority, libdnf.conf.VectorString(value))
elif (isinstance(option, libdnf.conf.OptionBool)
or isinstance(option, libdnf.conf.OptionChildBool)
) and isinstance(value, int):
option.set(priority, bool(value))
else:
option.set(priority, value)
except RuntimeError as e:
<|code_end|>
, determine the next line of code. You have imports:
from dnf.yum import misc
from dnf.i18n import ucd, _
from dnf.pycomp import basestring, urlparse
import fnmatch
import dnf.conf.substitutions
import dnf.const
import dnf.exceptions
import dnf.pycomp
import dnf.util
import hawkey
import logging
import os
import libdnf.conf
import libdnf.repo
import tempfile
and context (class names, function names, or code) available:
# Path: dnf/yum/misc.py
# def re_glob(s):
# def re_full_search_needed(s):
# def get_default_chksum_type():
# def __init__(self, iter=None):
# def __iter__(self):
# def __getitem__(self, item):
# def all_lists(self):
# def merge_lists(self, other):
# def procgpgkey(rawkey):
# def keyInstalled(ts, keyid, timestamp):
# def import_key_to_pubring(rawkey, keyid, gpgdir=None, make_ro_copy=True):
# def getCacheDir():
# def seq_max_split(seq, max_entries):
# def unlink_f(filename):
# def stat_f(filename, ignore_EACCES=False):
# def _getloginuid():
# def getloginuid():
# def decompress(filename, dest=None, check_timestamps=False):
# def read_in_items_from_dot_dir(thisglob, line_as_list=True):
# class GenericHolder(object):
#
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
. Output only the next line. | raise dnf.exceptions.ConfigError(_("Error parsing '%s': %s") |
Given snippet: <|code_start|># This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
logger = logging.getLogger("dnf")
class Sack(hawkey.Sack):
# :api
def __init__(self, *args, **kwargs):
super(Sack, self).__init__(*args, **kwargs)
def _configure(self, installonly=None, installonly_limit=0, allow_vendor_change=None):
if installonly:
self.installonly = installonly
self.installonly_limit = installonly_limit
if allow_vendor_change is not None:
self.allow_vendor_change = allow_vendor_change
if allow_vendor_change is False:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import dnf.util
import dnf.package
import dnf.query
import logging
import hawkey
import os
from dnf.pycomp import basestring
from dnf.i18n import _
and context:
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
which might include code, classes, or functions. Output only the next line. | logger.warning(_("allow_vendor_change is disabled. This option is currently not supported for downgrade and distro-sync commands")) |
Given snippet: <|code_start|># Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
def _by_provides(sack, patterns, ignore_case=False, get_query=False):
if isinstance(patterns, basestring):
patterns = [patterns]
q = sack.query()
flags = []
if ignore_case:
flags.append(hawkey.ICASE)
q.filterm(*flags, provides__glob=patterns)
if get_query:
return q
return q.run()
def _per_nevra_dict(pkg_list):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import hawkey
from hawkey import Query
from dnf.i18n import ucd
from dnf.pycomp import basestring
and context:
# Path: dnf/i18n.py
# def ucd(obj):
# # :api, deprecated in 2.0.0, will be erased when python2 is abandoned
# """ Like the builtin unicode() but tries to use a reasonable encoding. """
# if dnf.pycomp.PY3:
# if dnf.pycomp.is_py3bytes(obj):
# return str(obj, _guess_encoding(), errors='ignore')
# elif isinstance(obj, str):
# return obj
# return str(obj)
# else:
# if isinstance(obj, dnf.pycomp.unicode):
# return obj
# if hasattr(obj, '__unicode__'):
# # see the doc for the unicode() built-in. The logic here is: if obj
# # implements __unicode__, let it take a crack at it, but handle the
# # situation if it fails:
# try:
# return dnf.pycomp.unicode(obj)
# except UnicodeError:
# pass
# return dnf.pycomp.unicode(str(obj), _guess_encoding(), errors='ignore')
which might include code, classes, or functions. Output only the next line. | return {ucd(pkg):pkg for pkg in pkg_list} |
Given the following code snippet before the placeholder: <|code_start|>#
# Copyright (C) 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
class UpgradeMinimalCommand(UpgradeCommand):
"""A class containing methods needed by the cli to execute the check
command.
"""
aliases = ('upgrade-minimal', 'update-minimal', 'up-min')
<|code_end|>
, predict the next line using imports from the current file:
from dnf.i18n import _
from dnf.cli.commands.upgrade import UpgradeCommand
and context including class names, function names, and sometimes code from other files:
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
. Output only the next line. | summary = _("upgrade, but only 'newest' package match which fixes a problem" |
Given snippet: <|code_start|># Copyright (C) 2013-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
logger = dnf.util.logger
class RepoDict(dict):
# :api
def add(self, repo):
# :api
id_ = repo.id
if id_ in self:
msg = 'Repository %s is listed more than once in the configuration'
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from dnf.exceptions import ConfigError
from dnf.i18n import _
import dnf.util
import libdnf.conf
import fnmatch
import os
and context:
# Path: dnf/exceptions.py
# class ConfigError(Error):
# def __init__(self, value=None, raw_error=None):
# super(ConfigError, self).__init__(value)
# self.raw_error = ucd(raw_error) if raw_error is not None else None
#
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
which might include code, classes, or functions. Output only the next line. | raise ConfigError(msg % id_) |
Predict the next line after this snippet: <|code_start|>
logger = dnf.util.logger
class RepoDict(dict):
# :api
def add(self, repo):
# :api
id_ = repo.id
if id_ in self:
msg = 'Repository %s is listed more than once in the configuration'
raise ConfigError(msg % id_)
try:
repo._repo.verify()
except RuntimeError as e:
raise ConfigError("{0}".format(e))
self[id_] = repo
def all(self):
# :api
return dnf.util.MultiCallList(self.values())
def _any_enabled(self):
return not dnf.util.empty(self.iter_enabled())
def _enable_sub_repos(self, sub_name_fn):
for repo in self.iter_enabled():
for found in self.get_matching(sub_name_fn(repo.id)):
if not found.enabled:
<|code_end|>
using the current file's imports:
from dnf.exceptions import ConfigError
from dnf.i18n import _
import dnf.util
import libdnf.conf
import fnmatch
import os
and any relevant context from other files:
# Path: dnf/exceptions.py
# class ConfigError(Error):
# def __init__(self, value=None, raw_error=None):
# super(ConfigError, self).__init__(value)
# self.raw_error = ucd(raw_error) if raw_error is not None else None
#
# Path: dnf/i18n.py
# class UnicodeStream(object):
# def __init__(self, stream, encoding):
# def write(self, s):
# def __getattr__(self, name):
# def _full_ucd_support(encoding):
# def _guess_encoding():
# def setup_locale():
# def setup_stdout():
# def ucd_input(ucstring):
# def ucd(obj):
# def _exact_width_char(uchar):
# def chop_str(msg, chop=None):
# def exact_width(msg):
# def fill_exact_width(msg, fill, chop=None, left=True, prefix='', suffix=''):
# def textwrap_fill(text, width=70, initial_indent='', subsequent_indent=''):
# def _indent_at_beg(line):
# def select_short_long(width, msg_short, msg_long):
# def translation(name):
# def ucd_wrapper(fnc):
# def pgettext(context, message):
# _, P_ = translation("dnf")
# C_ = pgettext
. Output only the next line. | logger.info(_('enabling %s repository'), found.id) |
Given snippet: <|code_start|> 'sylph': 8,
'valefor': 9,
'alexander': 10,
'leviathan': 11,
'odin': 12,
'ifrit': 13,
'diabolos': 14,
'caitsith': 15,
'quetzalcoatl': 16,
'siren': 17,
'unicorn': 18,
'gilgamesh': 19,
'ragnarok': 20,
'pandemonium': 21,
'garuda': 22,
'cerberus': 23,
'kujata': 24,
'bismarck': 25,
'seraph': 26,
'lakshmi': 27,
'asura': 28,
'midgardsormr': 29,
'fairy': 30,
'remora': 31,
'hades': 32
}
ID_SERVER = {v: k for k, v in SERVER_ID.items()}
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .scrubber import Scrubber
import concurrent.futures
import warnings
import pickle
import re
import os
and context:
# Path: pydarkstar/scrubbing/scrubber.py
# class Scrubber(DarkObject):
# def __init__(self):
# super(Scrubber, self).__init__()
#
# def scrub(self):
# """
# Get item metadata.
# """
# return {}
#
# # noinspection PyBroadException
# @staticmethod
# def soup(url, absolute: bool = False, **kwargs):
# """
# Open URL and create tag soup.
#
# :param url: website string
# :type url: str
#
# :param absolute: perform double get request to find absolute url
# :type absolute: bool
# """
# handle = ''
# max_tries = 10
# for i in range(max_tries):
# # noinspection PyPep8
# try:
# if absolute:
# url = requests.get(url).url
# handle = requests.get(url, params=kwargs).text
# break
# except Exception:
# logging.exception('urlopen failed (attempt %d)', i + 1)
# if i == max_tries - 1:
# logging.error('the maximum urlopen attempts have been reached')
# raise
# time.sleep(1)
#
# try:
# s = BeautifulSoup(handle, features='html5lib')
# except bs4.FeatureNotFound:
# s = BeautifulSoup(handle, features='html.parser')
#
# return s
which might include code, classes, or functions. Output only the next line. | class FFXIAHScrubber(Scrubber): |
Here is a snippet: <|code_start|>"""
TestCase for SQL.
"""
# sql database parameters
mysql_params = dict(
hostname='127.0.0.1',
database='dspdb',
username='root',
password='cisco',
)
class TestSQL(unittest.TestCase):
def setUp(self):
<|code_end|>
. Write the next line using the current file imports:
from pydarkstar.database import Database
import sqlalchemy.exc
import unittest
import logging
and context from other files:
# Path: pydarkstar/database.py
# class Database(DarkObject):
# """
# Database connection using sqlalchemy.
#
# :param url: sql database connection url
# """
#
# def __init__(self, url, **kwargs):
# super(Database, self).__init__()
#
# # connect
# self.engine = sqlalchemy.create_engine(url, **kwargs)
#
# # create Session object
# self._Session = sqlalchemy.orm.sessionmaker(bind=self.engine)
#
# def session(self, **kwargs):
# """
# Create session.
# """
# return self._Session(**kwargs)
#
# @contextlib.contextmanager
# def scoped_session(self, rollback=True, fail=False):
# """
# Provide a transactional scope around a series of operations.
#
# :param rollback: rollback transactions after catch
# :param fail: raise error after catch
#
# :type rollback: bool
# :type fail: bool
# """
# session = self._Session()
# try:
# yield session
#
# # commit transactions
# session.commit()
#
# # catch errors
# except sqlalchemy.exc.SQLAlchemyError:
# # log the error
# logging.exception('caught SQL exception')
#
# # rollback transactions
# if rollback:
# session.rollback()
#
# # reraise error
# if fail:
# raise RuntimeError('SQL Failed')
#
# # cleanup
# finally:
# session.close()
#
# @classmethod
# def pymysql(cls, hostname, database, username, password):
# """
# Alternate constructor. dialect=mysql, driver=pymysql
#
# :param hostname: database connection parameter
# :param database: database connection parameter
# :param username: database connection parameter
# :param password: database connection parameter
# """
# url = cls.format_url('mysql', 'pymysql', hostname, database, username, password)
# obj = cls(url)
# return obj
#
# @staticmethod
# def format_url(dialect, driver, hostname, database, username, password):
# """
# Create connection url.
# """
# return '{}://{u}:{p}@{h}/{d}'.format('+'.join([dialect, driver]),
# h=hostname, d=database, u=username, p=password)
#
# def __str__(self):
# return repr(self.engine)
, which may include functions, classes, or code. Output only the next line. | self.db = Database.pymysql(**mysql_params) |
Predict the next line after this snippet: <|code_start|>
class Worker(DarkObject):
"""
Base class for Auction House objects.
:param db: database object
"""
def __init__(self, db, rollback=True, fail=False):
super(Worker, self).__init__()
<|code_end|>
using the current file's imports:
from ..darkobject import DarkObject
from ..database import Database
import contextlib
and any relevant context from other files:
# Path: pydarkstar/database.py
# class Database(DarkObject):
# """
# Database connection using sqlalchemy.
#
# :param url: sql database connection url
# """
#
# def __init__(self, url, **kwargs):
# super(Database, self).__init__()
#
# # connect
# self.engine = sqlalchemy.create_engine(url, **kwargs)
#
# # create Session object
# self._Session = sqlalchemy.orm.sessionmaker(bind=self.engine)
#
# def session(self, **kwargs):
# """
# Create session.
# """
# return self._Session(**kwargs)
#
# @contextlib.contextmanager
# def scoped_session(self, rollback=True, fail=False):
# """
# Provide a transactional scope around a series of operations.
#
# :param rollback: rollback transactions after catch
# :param fail: raise error after catch
#
# :type rollback: bool
# :type fail: bool
# """
# session = self._Session()
# try:
# yield session
#
# # commit transactions
# session.commit()
#
# # catch errors
# except sqlalchemy.exc.SQLAlchemyError:
# # log the error
# logging.exception('caught SQL exception')
#
# # rollback transactions
# if rollback:
# session.rollback()
#
# # reraise error
# if fail:
# raise RuntimeError('SQL Failed')
#
# # cleanup
# finally:
# session.close()
#
# @classmethod
# def pymysql(cls, hostname, database, username, password):
# """
# Alternate constructor. dialect=mysql, driver=pymysql
#
# :param hostname: database connection parameter
# :param database: database connection parameter
# :param username: database connection parameter
# :param password: database connection parameter
# """
# url = cls.format_url('mysql', 'pymysql', hostname, database, username, password)
# obj = cls(url)
# return obj
#
# @staticmethod
# def format_url(dialect, driver, hostname, database, username, password):
# """
# Create connection url.
# """
# return '{}://{u}:{p}@{h}/{d}'.format('+'.join([dialect, driver]),
# h=hostname, d=database, u=username, p=password)
#
# def __str__(self):
# return repr(self.engine)
. Output only the next line. | assert isinstance(db, Database) |
Predict the next line after this snippet: <|code_start|> self.exclude('itemids')
self.exclude('urls')
def __after__(self):
super(Options, self).__after__()
urls = []
for obj in self.urls:
if isinstance(obj, list):
urls.extend(obj)
else:
urls.append(obj)
self.urls = urls
if not self.urls:
self.urls = None
itemids = []
for obj in self.itemids:
if isinstance(obj, list):
itemids.extend(obj)
else:
itemids.append(obj)
self.itemids = itemids
if not self.itemids:
self.itemids = None
try:
self.server = int(self.server)
<|code_end|>
using the current file's imports:
from ...options.basic import BasicOptions
from ...options.output import OutputOptions
from ...scrubbing.ffxiah import SERVER_ID
and any relevant context from other files:
# Path: pydarkstar/scrubbing/ffxiah.py
# SERVER_ID = {
# 'bahamut': 1,
# 'shiva': 2,
# 'titan': 3,
# 'ramuh': 4,
# 'phoenix': 5,
# 'carbuncle': 6,
# 'fenrir': 7,
# 'sylph': 8,
# 'valefor': 9,
# 'alexander': 10,
# 'leviathan': 11,
# 'odin': 12,
# 'ifrit': 13,
# 'diabolos': 14,
# 'caitsith': 15,
# 'quetzalcoatl': 16,
# 'siren': 17,
# 'unicorn': 18,
# 'gilgamesh': 19,
# 'ragnarok': 20,
# 'pandemonium': 21,
# 'garuda': 22,
# 'cerberus': 23,
# 'kujata': 24,
# 'bismarck': 25,
# 'seraph': 26,
# 'lakshmi': 27,
# 'asura': 28,
# 'midgardsormr': 29,
# 'fairy': 30,
# 'remora': 31,
# 'hades': 32
# }
. Output only the next line. | if self.server not in SERVER_ID.values(): |
Predict the next line after this snippet: <|code_start|>"""
This script demonstrates using the crab client directly or through the
:func:`crabpy.client.crab_request` function.
"""
crab = crab_factory()
res = crab.service.ListGemeentenByGewestId(1)
print(res)
res = crab.service.ListPostkantonsByGemeenteId(71)
print(res)
<|code_end|>
using the current file's imports:
from crabpy.client import crab_factory, crab_request
and any relevant context from other files:
# Path: crabpy/client.py
# def crab_factory(**kwargs):
# """
# Factory that generates a CRAB client.
#
# A few parameters will be handled by the factory, other parameters will
# be passed on to the client.
#
# :param wsdl: `Optional.` Allows overriding the default CRAB wsdl url.
# :param proxy: `Optional.` A dictionary of proxy information that is passed
# to the underlying :class:`suds.client.Client`
# :rtype: :class:`suds.client.Client`
# """
# if "wsdl" in kwargs:
# wsdl = kwargs["wsdl"]
# del kwargs["wsdl"]
# else:
# wsdl = "http://crab.agiv.be/wscrab/wscrab.svc?wsdl"
# log.info("Creating CRAB client with wsdl: %s", wsdl)
# c = Client(wsdl, **kwargs)
# return c
#
# def crab_request(client, action, *args):
# """
# Utility function that helps making requests to the CRAB service.
#
# :param client: A :class:`suds.client.Client` for the CRAB service.
# :param string action: Which method to call, eg. `ListGewesten`
# :returns: Result of the SOAP call.
#
# .. versionadded:: 0.3.0
# """
# log.debug("Calling %s on CRAB service.", action)
# return getattr(client.service, action)(*args)
. Output only the next line. | res = crab_request(crab, 'ListGemeentenByGewestId', 1) |
Predict the next line after this snippet: <|code_start|>"""
This script demonstrates using the capakey client through the
:func:`crabpy.client.capakey_request` function.
"""
capakey = capakey_factory(
user='USER',
password='PASSWORD'
)
<|code_end|>
using the current file's imports:
from crabpy.client import capakey_factory, capakey_request
and any relevant context from other files:
# Path: crabpy/client.py
# def crab_factory(**kwargs):
# def crab_request(client, action, *args):
# def __init__(self, base_url, api_key):
# def _get_list(self, url, response_key, params=None):
# def _get(self, url, params=None):
# def get_gemeente(self, gemeente_id):
# def get_gemeenten(self, gemeentenaam=None, status=None):
# def get_postinfo(self, postinfo_id):
# def get_postinfos(self, gemeentenaam=None):
# def get_straatnaam(self, straatnaam_id):
# def get_straatnamen(
# self, straatnaam=None, gemeentenaam=None, niscode=None, status=None
# ):
# def get_adres_match(
# self,
# gemeentenaam=None,
# niscode=None,
# postcode=None,
# kadaster_straatcode=None,
# rr_straatcode=None,
# straatnaam=None,
# huisnummer=None,
# index=None,
# busnummer=None,
# ):
# def get_adres(self, adres_id):
# def get_adressen(
# self,
# gemeentenaam=None,
# postcode=None,
# straatnaam=None,
# homoniem_toevoeging=None,
# huisnummer=None,
# busnummer=None,
# niscode=None,
# status=None,
# ):
# def get_perceel(self, perceel_id):
# def get_percelen(self, status=None):
# def get_gebouw(self, gebouw_id):
# def get_gebouwen(self, status=None):
# class AdressenRegisterClientException(Exception):
# class AdressenRegisterClient:
. Output only the next line. | res = capakey_request(capakey, 'ListAdmGemeenten', 1) |
Given snippet: <|code_start|>
try:
except:
def connection_error(url, headers={}, params={}):
raise requests.exceptions.ConnectionError
def request_exception(url, headers={}, params={}):
raise requests.exceptions.RequestException
class TestCapakeyRestGateway:
def test_list_gemeenten(self, capakey_rest_gateway,
municipalities_response):
res = capakey_rest_gateway.list_gemeenten()
assert isinstance(res, list)
def test_get_gemeente_by_id(self, capakey_rest_gateway,
municipality_response):
res = capakey_rest_gateway.get_gemeente_by_id(44021)
assert isinstance(res, Gemeente)
assert res.id == 44021
def test_get_gemeente_by_invalid_id(self, capakey_rest_gateway,
mocked_responses):
url = re.compile(
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import unittest2 as unittest
import unittest # noqa
import pytest
import requests
from tests.conftest import CAPAKEY_URL
from crabpy.gateway.capakey import (
Gemeente,
Afdeling,
Sectie,
Perceel,
capakey_rest_gateway_request,
GatewayRuntimeException
)
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock, patch
from crabpy.gateway.exception import GatewayResourceNotFoundException
and context:
# Path: tests/conftest.py
# CAPAKEY_URL = 'https://geoservices.informatievlaanderen.be/capakey/api/v2'
#
# Path: crabpy/gateway/capakey.py
# def capakey_rest_gateway_request(url, headers={}, params={}):
# def __init__(self, **kwargs):
# def _parse_centroid(center):
# def _parse_bounding_box(bounding_box):
# def list_gemeenten(self, sort=1):
# def creator():
# def get_gemeente_by_id(self, id):
# def creator():
# def list_kadastrale_afdelingen(self):
# def creator():
# def list_kadastrale_afdelingen_by_gemeente(self, gemeente, sort=1):
# def creator():
# def get_kadastrale_afdeling_by_id(self, aid):
# def creator():
# def list_secties_by_afdeling(self, afdeling):
# def creator():
# def get_sectie_by_id_and_afdeling(self, id, afdeling):
# def creator():
# def parse_percid(self, capakey):
# def parse_capakey(self, percid):
# def list_percelen_by_sectie(self, sectie):
# def creator():
# def get_perceel_by_id_and_sectie(self, id, sectie):
# def creator():
# def _get_perceel_by(self, url, cache_key):
# def creator():
# def get_perceel_by_capakey(self, capakey):
# def get_perceel_by_coordinates(self, x, y):
# def get_perceel_by_percid(self, percid):
# def __init__(self, **kwargs):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def check_gateway(self):
# def __str__(self):
# def check_lazy_load_gemeente(f):
# def wrapper(self):
# def __init__(
# self, id, naam=None,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def naam(self):
# def centroid(self):
# def bounding_box(self):
# def afdelingen(self):
# def __unicode__(self):
# def __repr__(self):
# def check_lazy_load_afdeling(f):
# def wrapper(self):
# def __init__(
# self, id, naam=None, gemeente=None,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def naam(self):
# def gemeente(self):
# def centroid(self):
# def bounding_box(self):
# def secties(self):
# def __unicode__(self):
# def __repr__(self):
# def check_lazy_load_sectie(f):
# def wrapper(self):
# def __init__(
# self, id, afdeling,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def centroid(self):
# def bounding_box(self):
# def percelen(self):
# def __unicode__(self):
# def __repr__(self):
# def check_lazy_load_perceel(f):
# def wrapper(self):
# def __init__(
# self, id, sectie, capakey, percid, adres=None,
# capatype=None, cashkey=None,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def get_percid_from_capakey(capakey):
# def get_capakey_from_percid(percid):
# def _split_capakey(self):
# def centroid(self):
# def bounding_box(self):
# def capatype(self):
# def cashkey(self):
# def __unicode__(self):
# def __repr__(self):
# class CapakeyRestGateway:
# class GatewayObject:
# class Gemeente(GatewayObject):
# class Afdeling(GatewayObject):
# class Sectie(GatewayObject):
# class Perceel(GatewayObject):
which might include code, classes, or functions. Output only the next line. | fr'{CAPAKEY_URL}/municipality/[^/]+\?' |
Using the snippet: <|code_start|>
try:
except ImportError: # pragma NO COVER
try:
except:
def connection_error(url, headers={}, params={}):
raise requests.exceptions.ConnectionError
def request_exception(url, headers={}, params={}):
raise requests.exceptions.RequestException
class TestCapakeyRestGateway:
def test_list_gemeenten(self, capakey_rest_gateway,
municipalities_response):
res = capakey_rest_gateway.list_gemeenten()
assert isinstance(res, list)
def test_get_gemeente_by_id(self, capakey_rest_gateway,
municipality_response):
res = capakey_rest_gateway.get_gemeente_by_id(44021)
<|code_end|>
, determine the next line of code. You have imports:
import re
import unittest2 as unittest
import unittest # noqa
import pytest
import requests
from tests.conftest import CAPAKEY_URL
from crabpy.gateway.capakey import (
Gemeente,
Afdeling,
Sectie,
Perceel,
capakey_rest_gateway_request,
GatewayRuntimeException
)
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock, patch
from crabpy.gateway.exception import GatewayResourceNotFoundException
and context (class names, function names, or code) available:
# Path: tests/conftest.py
# CAPAKEY_URL = 'https://geoservices.informatievlaanderen.be/capakey/api/v2'
#
# Path: crabpy/gateway/capakey.py
# def capakey_rest_gateway_request(url, headers={}, params={}):
# def __init__(self, **kwargs):
# def _parse_centroid(center):
# def _parse_bounding_box(bounding_box):
# def list_gemeenten(self, sort=1):
# def creator():
# def get_gemeente_by_id(self, id):
# def creator():
# def list_kadastrale_afdelingen(self):
# def creator():
# def list_kadastrale_afdelingen_by_gemeente(self, gemeente, sort=1):
# def creator():
# def get_kadastrale_afdeling_by_id(self, aid):
# def creator():
# def list_secties_by_afdeling(self, afdeling):
# def creator():
# def get_sectie_by_id_and_afdeling(self, id, afdeling):
# def creator():
# def parse_percid(self, capakey):
# def parse_capakey(self, percid):
# def list_percelen_by_sectie(self, sectie):
# def creator():
# def get_perceel_by_id_and_sectie(self, id, sectie):
# def creator():
# def _get_perceel_by(self, url, cache_key):
# def creator():
# def get_perceel_by_capakey(self, capakey):
# def get_perceel_by_coordinates(self, x, y):
# def get_perceel_by_percid(self, percid):
# def __init__(self, **kwargs):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def check_gateway(self):
# def __str__(self):
# def check_lazy_load_gemeente(f):
# def wrapper(self):
# def __init__(
# self, id, naam=None,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def naam(self):
# def centroid(self):
# def bounding_box(self):
# def afdelingen(self):
# def __unicode__(self):
# def __repr__(self):
# def check_lazy_load_afdeling(f):
# def wrapper(self):
# def __init__(
# self, id, naam=None, gemeente=None,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def naam(self):
# def gemeente(self):
# def centroid(self):
# def bounding_box(self):
# def secties(self):
# def __unicode__(self):
# def __repr__(self):
# def check_lazy_load_sectie(f):
# def wrapper(self):
# def __init__(
# self, id, afdeling,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def centroid(self):
# def bounding_box(self):
# def percelen(self):
# def __unicode__(self):
# def __repr__(self):
# def check_lazy_load_perceel(f):
# def wrapper(self):
# def __init__(
# self, id, sectie, capakey, percid, adres=None,
# capatype=None, cashkey=None,
# centroid=None, bounding_box=None,
# shape=None, **kwargs
# ):
# def set_gateway(self, gateway):
# def clear_gateway(self):
# def get_percid_from_capakey(capakey):
# def get_capakey_from_percid(percid):
# def _split_capakey(self):
# def centroid(self):
# def bounding_box(self):
# def capatype(self):
# def cashkey(self):
# def __unicode__(self):
# def __repr__(self):
# class CapakeyRestGateway:
# class GatewayObject:
# class Gemeente(GatewayObject):
# class Afdeling(GatewayObject):
# class Sectie(GatewayObject):
# class Perceel(GatewayObject):
. Output only the next line. | assert isinstance(res, Gemeente) |
Here is a snippet: <|code_start|>"""
This module contains an opionated gateway for the crab webservice.
.. versionadded:: 0.3.0
"""
log = logging.getLogger(__name__)
parent_dir = os.path.dirname(__file__)
data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
deelgemeenten_json = json.load(
open(os.path.join(data_dir, "deelgemeenten.json"),
encoding='utf-8')
)
def crab_gateway_request(client, method, *args):
"""
Utility function that helps making requests to the CRAB service.
This is a specialised version of :func:`crabpy.client.crab_request` that
allows adding extra functionality for the calls made by the gateway.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
"""
try:
<|code_end|>
. Write the next line using the current file imports:
import json
import logging
import math
import os
from dogpile.cache import make_region
from suds import WebFault
from crabpy.client import crab_request
from crabpy.gateway.exception import GatewayResourceNotFoundException
from crabpy.gateway.exception import GatewayRuntimeException
and context from other files:
# Path: crabpy/client.py
# def crab_request(client, action, *args):
# """
# Utility function that helps making requests to the CRAB service.
#
# :param client: A :class:`suds.client.Client` for the CRAB service.
# :param string action: Which method to call, eg. `ListGewesten`
# :returns: Result of the SOAP call.
#
# .. versionadded:: 0.3.0
# """
# log.debug("Calling %s on CRAB service.", action)
# return getattr(client.service, action)(*args)
#
# Path: crabpy/gateway/exception.py
# class GatewayResourceNotFoundException(GatewayException):
# """
# An exception that signifies that no results where found.
# """
# def __init__(self):
# GatewayException.__init__(self, 'This resource was not found.')
#
# Path: crabpy/gateway/exception.py
# class GatewayRuntimeException(GatewayException):
# """
# An exception that signifies a soap request went wrong.
#
# """
#
# soapfault = None
# """
# The soapfault that was generated by the service.
# """
#
# def __init__(self, message, soapfault):
# GatewayException.__init__(self, message)
# self.soapfault = soapfault
, which may include functions, classes, or code. Output only the next line. | return crab_request(client, method, *args) |
Based on the snippet: <|code_start|> v
)for k, v in tmp.items()
]
if self.caches['permanent'].is_configured:
key = 'ListGewesten#%s' % sort
gewesten = self.caches['permanent'].get_or_create(key, creator)
else:
gewesten = creator()
for g in gewesten:
g.set_gateway(self)
return gewesten
def get_gewest_by_id(self, id):
"""
Get a `gewest` by id.
:param integer id: The id of a `gewest`.
:rtype: A :class:`Gewest`.
"""
def creator():
nl = crab_gateway_request(
self.client, 'GetGewestByGewestIdAndTaalCode', id, 'nl'
)
fr = crab_gateway_request(
self.client, 'GetGewestByGewestIdAndTaalCode', id, 'fr'
)
de = crab_gateway_request(
self.client, 'GetGewestByGewestIdAndTaalCode', id, 'de'
)
if nl == None:
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import logging
import math
import os
from dogpile.cache import make_region
from suds import WebFault
from crabpy.client import crab_request
from crabpy.gateway.exception import GatewayResourceNotFoundException
from crabpy.gateway.exception import GatewayRuntimeException
and context (classes, functions, sometimes code) from other files:
# Path: crabpy/client.py
# def crab_request(client, action, *args):
# """
# Utility function that helps making requests to the CRAB service.
#
# :param client: A :class:`suds.client.Client` for the CRAB service.
# :param string action: Which method to call, eg. `ListGewesten`
# :returns: Result of the SOAP call.
#
# .. versionadded:: 0.3.0
# """
# log.debug("Calling %s on CRAB service.", action)
# return getattr(client.service, action)(*args)
#
# Path: crabpy/gateway/exception.py
# class GatewayResourceNotFoundException(GatewayException):
# """
# An exception that signifies that no results where found.
# """
# def __init__(self):
# GatewayException.__init__(self, 'This resource was not found.')
#
# Path: crabpy/gateway/exception.py
# class GatewayRuntimeException(GatewayException):
# """
# An exception that signifies a soap request went wrong.
#
# """
#
# soapfault = None
# """
# The soapfault that was generated by the service.
# """
#
# def __init__(self, message, soapfault):
# GatewayException.__init__(self, message)
# self.soapfault = soapfault
. Output only the next line. | raise GatewayResourceNotFoundException() |
Next line prediction: <|code_start|>
.. versionadded:: 0.3.0
"""
log = logging.getLogger(__name__)
parent_dir = os.path.dirname(__file__)
data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
deelgemeenten_json = json.load(
open(os.path.join(data_dir, "deelgemeenten.json"),
encoding='utf-8')
)
def crab_gateway_request(client, method, *args):
"""
Utility function that helps making requests to the CRAB service.
This is a specialised version of :func:`crabpy.client.crab_request` that
allows adding extra functionality for the calls made by the gateway.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
"""
try:
return crab_request(client, method, *args)
except WebFault as wf:
<|code_end|>
. Use current file imports:
(import json
import logging
import math
import os
from dogpile.cache import make_region
from suds import WebFault
from crabpy.client import crab_request
from crabpy.gateway.exception import GatewayResourceNotFoundException
from crabpy.gateway.exception import GatewayRuntimeException)
and context including class names, function names, or small code snippets from other files:
# Path: crabpy/client.py
# def crab_request(client, action, *args):
# """
# Utility function that helps making requests to the CRAB service.
#
# :param client: A :class:`suds.client.Client` for the CRAB service.
# :param string action: Which method to call, eg. `ListGewesten`
# :returns: Result of the SOAP call.
#
# .. versionadded:: 0.3.0
# """
# log.debug("Calling %s on CRAB service.", action)
# return getattr(client.service, action)(*args)
#
# Path: crabpy/gateway/exception.py
# class GatewayResourceNotFoundException(GatewayException):
# """
# An exception that signifies that no results where found.
# """
# def __init__(self):
# GatewayException.__init__(self, 'This resource was not found.')
#
# Path: crabpy/gateway/exception.py
# class GatewayRuntimeException(GatewayException):
# """
# An exception that signifies a soap request went wrong.
#
# """
#
# soapfault = None
# """
# The soapfault that was generated by the service.
# """
#
# def __init__(self, message, soapfault):
# GatewayException.__init__(self, message)
# self.soapfault = soapfault
. Output only the next line. | err = GatewayRuntimeException( |
Predict the next line after this snippet: <|code_start|>This module contains an opionated gateway for the capakey webservice.
.. versionadded:: 0.2.0
"""
log = logging.getLogger(__name__)
def capakey_rest_gateway_request(url, headers={}, params={}):
"""
Utility function that helps making requests to the CAPAKEY REST service.
:param string url: URL to request.
:param dict headers: Headers to send with the URL.
:param dict params: Parameters to send with the URL.
:returns: Result of the call.
"""
try:
res = requests.get(url, headers=headers, params=params)
res.raise_for_status()
return res
except requests.ConnectionError as ce:
raise GatewayRuntimeException(
'Could not execute request due to connection problems:\n%s' % repr(ce),
ce
)
except requests.HTTPError as he:
<|code_end|>
using the current file's imports:
import json
import logging
import requests
import re
import re
import re
import re
import re
from dogpile.cache import make_region
from crabpy.gateway.exception import GatewayResourceNotFoundException
from crabpy.gateway.exception import GatewayRuntimeException
and any relevant context from other files:
# Path: crabpy/gateway/exception.py
# class GatewayResourceNotFoundException(GatewayException):
# """
# An exception that signifies that no results where found.
# """
# def __init__(self):
# GatewayException.__init__(self, 'This resource was not found.')
#
# Path: crabpy/gateway/exception.py
# class GatewayRuntimeException(GatewayException):
# """
# An exception that signifies a soap request went wrong.
#
# """
#
# soapfault = None
# """
# The soapfault that was generated by the service.
# """
#
# def __init__(self, message, soapfault):
# GatewayException.__init__(self, message)
# self.soapfault = soapfault
. Output only the next line. | raise GatewayResourceNotFoundException() |
Predict the next line after this snippet: <|code_start|>"""
This module contains an opionated gateway for the capakey webservice.
.. versionadded:: 0.2.0
"""
log = logging.getLogger(__name__)
def capakey_rest_gateway_request(url, headers={}, params={}):
"""
Utility function that helps making requests to the CAPAKEY REST service.
:param string url: URL to request.
:param dict headers: Headers to send with the URL.
:param dict params: Parameters to send with the URL.
:returns: Result of the call.
"""
try:
res = requests.get(url, headers=headers, params=params)
res.raise_for_status()
return res
except requests.ConnectionError as ce:
<|code_end|>
using the current file's imports:
import json
import logging
import requests
import re
import re
import re
import re
import re
from dogpile.cache import make_region
from crabpy.gateway.exception import GatewayResourceNotFoundException
from crabpy.gateway.exception import GatewayRuntimeException
and any relevant context from other files:
# Path: crabpy/gateway/exception.py
# class GatewayResourceNotFoundException(GatewayException):
# """
# An exception that signifies that no results where found.
# """
# def __init__(self):
# GatewayException.__init__(self, 'This resource was not found.')
#
# Path: crabpy/gateway/exception.py
# class GatewayRuntimeException(GatewayException):
# """
# An exception that signifies a soap request went wrong.
#
# """
#
# soapfault = None
# """
# The soapfault that was generated by the service.
# """
#
# def __init__(self, message, soapfault):
# GatewayException.__init__(self, message)
# self.soapfault = soapfault
. Output only the next line. | raise GatewayRuntimeException( |
Here is a snippet: <|code_start|>
class ElementTests(unittest.TestCase):
def testAction(self):
a = Action('http://soap.test.org/test')
<|code_end|>
. Write the next line using the current file imports:
import unittest
from suds.sax.element import Element
from crabpy.wsa import wsa
from crabpy.wsa import Action
from crabpy.wsa import To
from crabpy.wsa import MessageID
and context from other files:
# Path: crabpy/wsa.py
# class Action(Object):
# class MessageID(Object):
# class To(Object):
# def __init__(self, action):
# def xml(self):
# def xml(self):
# def __init__(self, location):
# def xml(self):
, which may include functions, classes, or code. Output only the next line. | act = Element('Action', ns=wsa) |
Next line prediction: <|code_start|> return self._data
class Viewer:
"""
This class implements the main protein viewer interface and lets you add
new structures to be displayed as well as configure their appearance. The
interface is closely based on the JS pv interface with a few adjustments
to make it more pythonic.
"""
def __init__(self):
self._style = 'phong'
self._width = 500
self._height = 500
self._commands = []
def show(self):
replacements = {
'id': str(uuid.uuid4()),
'width': self._width,
'height': self._height,
'style': self._style
}
begin = _VIEWER_SCAFFOLD_BEGIN % replacements
commands_text = '\n'.join([cmd.to_js() for cmd in self._commands])
complete_text = '\n'.join((begin, commands_text, _VIEWER_SCAFFOLD_END))
return Rendered(complete_text)
def _add_viewer_command(self, command, *args, **kwargs):
<|code_end|>
. Use current file imports:
(import uuid
from .command import Command)
and context including class names, function names, or small code snippets from other files:
# Path: pvviewer/command.py
# class Command:
# """
# Simple object for holding the receiver, method and arguments of a method
# call that can be translated to JS.
# """
#
# def __init__(self, receiver, command, args, kwargs, terminate=False):
# self._receiver = receiver
# self._command = command
# self._args = args
# self._kwargs = kwargs
# self._terminate = terminate
#
# def to_js(self):
# all_args = [', '.join(encode(arg) for arg in self._args)]
# if self._kwargs:
# all_args.append(encode(self._kwargs))
#
# args_string = ', '.join(all_args)
# t = self._terminate and ';' or ''
# if not self._receiver:
# call = self._command
# else:
# call = '%s.%s' % (self._receiver, self._command)
# return '%s(%s)%s' % (call, args_string, t)
. Output only the next line. | self._commands.append(Command('viewer', command, args, kwargs)) |
Next line prediction: <|code_start|>
# BUILDOUT_DIR is for access to the "surrounding" buildout,
# for instance for BUILDOUT_DIR/var/static files to give
# django-staticfiles a proper place to place all collected
# static files.
BUILDOUT_DIR = os.path.abspath(os.path.join(SETTINGS_DIR, '..'))
# Absolute path to the directory that holds user-uploaded
# media.
MEDIA_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'media')
# Absolute path to the directory where django-staticfiles'
# "bin/django build_static" places all collected static
# files from all applications' /media directory.
STATIC_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'static')
# URL that handles the media served from MEDIA_ROOT. Make
# sure to use a trailing slash if there is a path component
# (optional in other cases).
MEDIA_URL = '/media/'
# URL for the per-application /media static files collected
# by django-staticfiles. Use it in templates like "{{
# MEDIA_URL }}mypackage/my.css".
STATIC_URL = '/static_media/'
# URL prefix for admin media -- CSS, JavaScript and
# images. Make sure to use a trailing slash. Uses
# STATIC_URL as django-staticfiles nicely collects admin's
# static media into STATIC_ROOT/admin.
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
STATICFILES_FINDERS = STATICFILES_FINDERS
<|code_end|>
. Use current file imports:
(import os
from lizard_ui.settingshelper import setup_logging
from lizard_ui.settingshelper import STATICFILES_FINDERS)
and context including class names, function names, or small code snippets from other files:
# Path: lizard_ui/settingshelper.py
# def setup_logging(buildout_dir,
# console_level='DEBUG',
# file_level='WARN',
# sentry_level=None,
# sql=False):
# """Return configuration dict for logging.
#
# Some keyword arguments can be used to configure the logging.
#
# - ``console_level='DEBUG'`` sets the console level. None means quiet.
#
# - ``file_level='WARN'`` sets the var/log/django.log level. None means
# quiet.
#
# - ``sentry_level=None`` sets the sentry level. None means sentry logging
# is removed from the logging.
#
# - ``sql=False`` switches sql statement logging on or off.
#
# """
# result = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'formatters': {
# 'verbose': {
# 'format': '%(asctime)s %(name)s %(levelname)s\n%(message)s',
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console': {
# 'level': console_level,
# 'class': 'logging.StreamHandler',
# 'formatter': 'simple'
# },
# 'logfile': {
# 'level': file_level,
# 'class': 'logging.FileHandler',
# 'formatter': 'verbose',
# 'filename': os.path.join(buildout_dir,
# 'var', 'log', 'django.log'),
# },
# 'sentry': {
# 'level': sentry_level,
# 'class': 'raven.contrib.django.handlers.SentryHandler',
# },
# },
# 'loggers': {
# '': {
# 'handlers': [],
# 'propagate': True,
# 'level': 'DEBUG',
# },
# 'django.db.backends': {
# 'handlers': ['null'], # Quiet by default!
# 'propagate': False,
# 'level': 'DEBUG',
# },
# },
# }
# if console_level is not None:
# result['loggers']['']['handlers'].append('console')
# if file_level is not None:
# result['loggers']['']['handlers'].append('logfile')
# if sentry_level is not None:
# result['loggers']['']['handlers'].append('sentry')
# else:
# # When sentry is still in the handlers sentry needs to be installed
# # which gave import errors in Django 1.4.
# del result['handlers']['sentry']
# if sql:
# result['loggers']['django.db.backends']['handlers'] = [
# 'console', 'logfile']
# return result
#
# Path: lizard_ui/settingshelper.py
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# # Enable support for django-compressor.
# 'compressor.finders.CompressorFinder',
# )
. Output only the next line. | LOGGING = setup_logging(BUILDOUT_DIR) |
Given snippet: <|code_start|>SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
# BUILDOUT_DIR is for access to the "surrounding" buildout,
# for instance for BUILDOUT_DIR/var/static files to give
# django-staticfiles a proper place to place all collected
# static files.
BUILDOUT_DIR = os.path.abspath(os.path.join(SETTINGS_DIR, '..'))
# Absolute path to the directory that holds user-uploaded
# media.
MEDIA_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'media')
# Absolute path to the directory where django-staticfiles'
# "bin/django build_static" places all collected static
# files from all applications' /media directory.
STATIC_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'static')
# URL that handles the media served from MEDIA_ROOT. Make
# sure to use a trailing slash if there is a path component
# (optional in other cases).
MEDIA_URL = '/media/'
# URL for the per-application /media static files collected
# by django-staticfiles. Use it in templates like "{{
# MEDIA_URL }}mypackage/my.css".
STATIC_URL = '/static_media/'
# URL prefix for admin media -- CSS, JavaScript and
# images. Make sure to use a trailing slash. Uses
# STATIC_URL as django-staticfiles nicely collects admin's
# static media into STATIC_ROOT/admin.
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
from lizard_ui.settingshelper import setup_logging
from lizard_ui.settingshelper import STATICFILES_FINDERS
and context:
# Path: lizard_ui/settingshelper.py
# def setup_logging(buildout_dir,
# console_level='DEBUG',
# file_level='WARN',
# sentry_level=None,
# sql=False):
# """Return configuration dict for logging.
#
# Some keyword arguments can be used to configure the logging.
#
# - ``console_level='DEBUG'`` sets the console level. None means quiet.
#
# - ``file_level='WARN'`` sets the var/log/django.log level. None means
# quiet.
#
# - ``sentry_level=None`` sets the sentry level. None means sentry logging
# is removed from the logging.
#
# - ``sql=False`` switches sql statement logging on or off.
#
# """
# result = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'formatters': {
# 'verbose': {
# 'format': '%(asctime)s %(name)s %(levelname)s\n%(message)s',
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console': {
# 'level': console_level,
# 'class': 'logging.StreamHandler',
# 'formatter': 'simple'
# },
# 'logfile': {
# 'level': file_level,
# 'class': 'logging.FileHandler',
# 'formatter': 'verbose',
# 'filename': os.path.join(buildout_dir,
# 'var', 'log', 'django.log'),
# },
# 'sentry': {
# 'level': sentry_level,
# 'class': 'raven.contrib.django.handlers.SentryHandler',
# },
# },
# 'loggers': {
# '': {
# 'handlers': [],
# 'propagate': True,
# 'level': 'DEBUG',
# },
# 'django.db.backends': {
# 'handlers': ['null'], # Quiet by default!
# 'propagate': False,
# 'level': 'DEBUG',
# },
# },
# }
# if console_level is not None:
# result['loggers']['']['handlers'].append('console')
# if file_level is not None:
# result['loggers']['']['handlers'].append('logfile')
# if sentry_level is not None:
# result['loggers']['']['handlers'].append('sentry')
# else:
# # When sentry is still in the handlers sentry needs to be installed
# # which gave import errors in Django 1.4.
# del result['handlers']['sentry']
# if sql:
# result['loggers']['django.db.backends']['handlers'] = [
# 'console', 'logfile']
# return result
#
# Path: lizard_ui/settingshelper.py
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# # Enable support for django-compressor.
# 'compressor.finders.CompressorFinder',
# )
which might include code, classes, or functions. Output only the next line. | STATICFILES_FINDERS = STATICFILES_FINDERS |
Based on the snippet: <|code_start|> """View mixin that adds next url redirect parsing.
This can be used for login or logout functionality.
"""
default_redirect = '/'
def next_url(self):
# Used to fill the hidden field in the LoginForm
return self.request.GET.get('next', self.default_redirect)
def check_url(self, next_url=None):
"""Check if the next url is valid."""
if next_url is None:
next_url = self.default_redirect
netloc = urlparse.urlparse(next_url)[1]
# Security check -- don't allow redirection to a different
# host.
if netloc and netloc != self.request.get_host():
return self.default_redirect
return next_url
class LoginView(ViewContextMixin, FormView, ViewNextURLMixin):
"""Logs in the user."""
template_name = 'lizard_ui/login.html'
<|code_end|>
, predict the immediate next line with the help of imports:
from copy import copy
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseServerError
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template import Context, loader
from django.utils.translation import check_for_language
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from django.views.generic.edit import FormView
from lizard_ui.forms import LoginForm, ChangeLanguageForm
from lizard_ui.layout import Action
from lizard_ui.models import ApplicationScreen
from lizard_ui.models import ApplicationIcon
from lizard_ui.models import CustomerLogo
from lizard_ui import uisettings
import logging
import urlparse
import urllib
import json
and context (classes, functions, sometimes code) from other files:
# Path: lizard_ui/forms.py
# class LoginForm(AuthenticationForm):
# username = forms.CharField(
# max_length=100,
# label=_('Username'),
# required=True)
# password = forms.CharField(
# max_length=100,
# label=_('Password'),
# widget=forms.PasswordInput(),
# required=True)
# next_url = forms.CharField(
# max_length=100,
# required=False,
# widget=forms.HiddenInput())
#
# class ChangeLanguageForm(forms.Form):
# language = forms.ChoiceField(
# choices=settings.LANGUAGES
# )
#
# Path: lizard_ui/layout.py
# class Action(object):
# """Small wrapper for actions, just to make the attributes explicit.
#
# A dictionary with keys instead of attributes is just as fine for the
# templates, but for documentation purposes this class is handier.
#
# - **name**: text of the link.
#
# - **element_id**: id the element will get in the HTML. The id is prepended
# with 'action-'.
#
# - **icon**: icon class according to
# http://twitter.github.com/bootstrap/base-css.html#icons, optional.
#
# - **url**: where the link points at.
#
# - **description**: optional description for helpful popup.
#
# - **klass**: optional CSS class to give to the link. This allows you to
# tie custom javascript handling to the link.
#
# - **data_attributes**: optional dictionary to set data-xyz attributes on
# the action that can be used by css that reacts on the actions.
#
# """
# def __init__(self, name=None, element_id=None, icon=None, url='#',
# description=None, klass=None, data_attributes=None,
# target=None):
# self.name = name
# if element_id is not None:
# self.element_id = 'action-%s' % element_id
# self.icon = icon
# self.url = url
# self.description = description
# self.klass = klass
# self.data_attributes = data_attributes
# self.target = target # &$%^!, this is for opening in a new tab.
#
# Path: lizard_ui/uisettings.py
# SITE_TITLE = getattr(settings, 'UI_SITE_TITLE', 'Lizard')
# SHOW_LOGIN = getattr(settings, 'UI_SHOW_LOGIN', True)
# SITE_ACTIONS = getattr(settings, 'UI_SITE_ACTIONS', [])
# GAUGES_SITE_ID = getattr(settings, 'UI_GAUGES_SITE_ID', None)
# SHOW_LANGUAGE_PICKER = getattr(settings, 'UI_SHOW_LANGUAGE_PICKER', False)
. Output only the next line. | form_class = LoginForm |
Next line prediction: <|code_start|> 'error_message': errors}),
mimetype='application/json')
return self.form_invalid(form)
class LogoutView(View, ViewNextURLMixin):
"""
Logout for ajax and regualar GET/POSTS.
This View does a logout for the user,
redirects to the next url when it's given.
When the request is done via Ajax an empty response is returned.
"""
def get(self, request, *args, **kwargs):
logout(request)
if request.is_ajax():
return HttpResponse("")
redirect_to = self.check_url(self.next_url())
return HttpResponseRedirect(redirect_to)
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
class ChangeLanguageView(ViewContextMixin, FormView, ViewNextURLMixin):
"""Shows a change language modal form."""
template_name = 'lizard_ui/change_language.html'
<|code_end|>
. Use current file imports:
(from copy import copy
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseServerError
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template import Context, loader
from django.utils.translation import check_for_language
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from django.views.generic.edit import FormView
from lizard_ui.forms import LoginForm, ChangeLanguageForm
from lizard_ui.layout import Action
from lizard_ui.models import ApplicationScreen
from lizard_ui.models import ApplicationIcon
from lizard_ui.models import CustomerLogo
from lizard_ui import uisettings
import logging
import urlparse
import urllib
import json)
and context including class names, function names, or small code snippets from other files:
# Path: lizard_ui/forms.py
# class LoginForm(AuthenticationForm):
# username = forms.CharField(
# max_length=100,
# label=_('Username'),
# required=True)
# password = forms.CharField(
# max_length=100,
# label=_('Password'),
# widget=forms.PasswordInput(),
# required=True)
# next_url = forms.CharField(
# max_length=100,
# required=False,
# widget=forms.HiddenInput())
#
# class ChangeLanguageForm(forms.Form):
# language = forms.ChoiceField(
# choices=settings.LANGUAGES
# )
#
# Path: lizard_ui/layout.py
# class Action(object):
# """Small wrapper for actions, just to make the attributes explicit.
#
# A dictionary with keys instead of attributes is just as fine for the
# templates, but for documentation purposes this class is handier.
#
# - **name**: text of the link.
#
# - **element_id**: id the element will get in the HTML. The id is prepended
# with 'action-'.
#
# - **icon**: icon class according to
# http://twitter.github.com/bootstrap/base-css.html#icons, optional.
#
# - **url**: where the link points at.
#
# - **description**: optional description for helpful popup.
#
# - **klass**: optional CSS class to give to the link. This allows you to
# tie custom javascript handling to the link.
#
# - **data_attributes**: optional dictionary to set data-xyz attributes on
# the action that can be used by css that reacts on the actions.
#
# """
# def __init__(self, name=None, element_id=None, icon=None, url='#',
# description=None, klass=None, data_attributes=None,
# target=None):
# self.name = name
# if element_id is not None:
# self.element_id = 'action-%s' % element_id
# self.icon = icon
# self.url = url
# self.description = description
# self.klass = klass
# self.data_attributes = data_attributes
# self.target = target # &$%^!, this is for opening in a new tab.
#
# Path: lizard_ui/uisettings.py
# SITE_TITLE = getattr(settings, 'UI_SITE_TITLE', 'Lizard')
# SHOW_LOGIN = getattr(settings, 'UI_SHOW_LOGIN', True)
# SITE_ACTIONS = getattr(settings, 'UI_SITE_ACTIONS', [])
# GAUGES_SITE_ID = getattr(settings, 'UI_GAUGES_SITE_ID', None)
# SHOW_LANGUAGE_PICKER = getattr(settings, 'UI_SHOW_LANGUAGE_PICKER', False)
. Output only the next line. | form_class = ChangeLanguageForm |
Here is a snippet: <|code_start|> @property
def title(self):
"""Return title for use in 'head' tag.
By default it uses the ``page_title`` attribute, followed by
``UI_SITE_TITLE`` (which is 'lizard' by default).
"""
return ' - '.join([self.page_title, uisettings.SITE_TITLE])
@property
def site_actions(self):
"""Return site actions.
``UI_SITE_ACTIONS`` are on the left, a login link (if
``UI_SHOW_LOGIN`` is True) on the right.
"""
actions = copy(uisettings.SITE_ACTIONS)
if uisettings.SHOW_LANGUAGE_PICKER:
# Deprecated. It is now a admin-configurable setting
# (``show_language_picker``) in lizard-map
pass
if uisettings.SHOW_LOGIN:
query_string = urllib.urlencode({'next': self.request.path_info})
if self.request.user.is_authenticated():
# Name of the user. TODO: link to profile page.
# The action is just text-with-an-icon right now.
<|code_end|>
. Write the next line using the current file imports:
from copy import copy
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseServerError
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template import Context, loader
from django.utils.translation import check_for_language
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from django.views.generic.edit import FormView
from lizard_ui.forms import LoginForm, ChangeLanguageForm
from lizard_ui.layout import Action
from lizard_ui.models import ApplicationScreen
from lizard_ui.models import ApplicationIcon
from lizard_ui.models import CustomerLogo
from lizard_ui import uisettings
import logging
import urlparse
import urllib
import json
and context from other files:
# Path: lizard_ui/forms.py
# class LoginForm(AuthenticationForm):
# username = forms.CharField(
# max_length=100,
# label=_('Username'),
# required=True)
# password = forms.CharField(
# max_length=100,
# label=_('Password'),
# widget=forms.PasswordInput(),
# required=True)
# next_url = forms.CharField(
# max_length=100,
# required=False,
# widget=forms.HiddenInput())
#
# class ChangeLanguageForm(forms.Form):
# language = forms.ChoiceField(
# choices=settings.LANGUAGES
# )
#
# Path: lizard_ui/layout.py
# class Action(object):
# """Small wrapper for actions, just to make the attributes explicit.
#
# A dictionary with keys instead of attributes is just as fine for the
# templates, but for documentation purposes this class is handier.
#
# - **name**: text of the link.
#
# - **element_id**: id the element will get in the HTML. The id is prepended
# with 'action-'.
#
# - **icon**: icon class according to
# http://twitter.github.com/bootstrap/base-css.html#icons, optional.
#
# - **url**: where the link points at.
#
# - **description**: optional description for helpful popup.
#
# - **klass**: optional CSS class to give to the link. This allows you to
# tie custom javascript handling to the link.
#
# - **data_attributes**: optional dictionary to set data-xyz attributes on
# the action that can be used by css that reacts on the actions.
#
# """
# def __init__(self, name=None, element_id=None, icon=None, url='#',
# description=None, klass=None, data_attributes=None,
# target=None):
# self.name = name
# if element_id is not None:
# self.element_id = 'action-%s' % element_id
# self.icon = icon
# self.url = url
# self.description = description
# self.klass = klass
# self.data_attributes = data_attributes
# self.target = target # &$%^!, this is for opening in a new tab.
#
# Path: lizard_ui/uisettings.py
# SITE_TITLE = getattr(settings, 'UI_SITE_TITLE', 'Lizard')
# SHOW_LOGIN = getattr(settings, 'UI_SHOW_LOGIN', True)
# SITE_ACTIONS = getattr(settings, 'UI_SITE_ACTIONS', [])
# GAUGES_SITE_ID = getattr(settings, 'UI_GAUGES_SITE_ID', None)
# SHOW_LANGUAGE_PICKER = getattr(settings, 'UI_SHOW_LANGUAGE_PICKER', False)
, which may include functions, classes, or code. Output only the next line. | action = Action(icon='icon-user') |
Based on the snippet: <|code_start|>
def dispatch(self, request, *args, **kwargs):
if self.required_permission:
if not request.user.has_perm(self.required_permission):
return HttpResponseRedirect(
settings.LOGIN_URL + '?next=%s' % request.path)
# open / close sidebar when requested
def str2bool(something):
if something is None: return None
if something.lower() == 'true': return True
elif something.lower() == 'false': return False
else: return None
sidebar_is_collapsed = str2bool(request.GET.get('sidebar_is_collapsed'))
if sidebar_is_collapsed is not None:
self.sidebar_is_collapsed = sidebar_is_collapsed
rightbar_is_collapsed = str2bool(request.GET.get('rightbar_is_collapsed'))
if rightbar_is_collapsed is not None:
self.rightbar_is_collapsed = rightbar_is_collapsed
secondary_sidebar_is_collapsed = str2bool(request.GET.get('secondary_sidebar_is_collapsed'))
if secondary_sidebar_is_collapsed is not None:
self.secondary_sidebar_is_collapsed = secondary_sidebar_is_collapsed
return super(UiView, self).dispatch(request, *args, **kwargs)
@property
def gauges_site_id(self):
"""Return gaug.es tracking code (unless we're in debug mode)."""
if settings.DEBUG:
return
<|code_end|>
, predict the immediate next line with the help of imports:
from copy import copy
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseServerError
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template import Context, loader
from django.utils.translation import check_for_language
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from django.views.generic.edit import FormView
from lizard_ui.forms import LoginForm, ChangeLanguageForm
from lizard_ui.layout import Action
from lizard_ui.models import ApplicationScreen
from lizard_ui.models import ApplicationIcon
from lizard_ui.models import CustomerLogo
from lizard_ui import uisettings
import logging
import urlparse
import urllib
import json
and context (classes, functions, sometimes code) from other files:
# Path: lizard_ui/forms.py
# class LoginForm(AuthenticationForm):
# username = forms.CharField(
# max_length=100,
# label=_('Username'),
# required=True)
# password = forms.CharField(
# max_length=100,
# label=_('Password'),
# widget=forms.PasswordInput(),
# required=True)
# next_url = forms.CharField(
# max_length=100,
# required=False,
# widget=forms.HiddenInput())
#
# class ChangeLanguageForm(forms.Form):
# language = forms.ChoiceField(
# choices=settings.LANGUAGES
# )
#
# Path: lizard_ui/layout.py
# class Action(object):
# """Small wrapper for actions, just to make the attributes explicit.
#
# A dictionary with keys instead of attributes is just as fine for the
# templates, but for documentation purposes this class is handier.
#
# - **name**: text of the link.
#
# - **element_id**: id the element will get in the HTML. The id is prepended
# with 'action-'.
#
# - **icon**: icon class according to
# http://twitter.github.com/bootstrap/base-css.html#icons, optional.
#
# - **url**: where the link points at.
#
# - **description**: optional description for helpful popup.
#
# - **klass**: optional CSS class to give to the link. This allows you to
# tie custom javascript handling to the link.
#
# - **data_attributes**: optional dictionary to set data-xyz attributes on
# the action that can be used by css that reacts on the actions.
#
# """
# def __init__(self, name=None, element_id=None, icon=None, url='#',
# description=None, klass=None, data_attributes=None,
# target=None):
# self.name = name
# if element_id is not None:
# self.element_id = 'action-%s' % element_id
# self.icon = icon
# self.url = url
# self.description = description
# self.klass = klass
# self.data_attributes = data_attributes
# self.target = target # &$%^!, this is for opening in a new tab.
#
# Path: lizard_ui/uisettings.py
# SITE_TITLE = getattr(settings, 'UI_SITE_TITLE', 'Lizard')
# SHOW_LOGIN = getattr(settings, 'UI_SHOW_LOGIN', True)
# SITE_ACTIONS = getattr(settings, 'UI_SITE_ACTIONS', [])
# GAUGES_SITE_ID = getattr(settings, 'UI_GAUGES_SITE_ID', None)
# SHOW_LANGUAGE_PICKER = getattr(settings, 'UI_SHOW_LANGUAGE_PICKER', False)
. Output only the next line. | return uisettings.GAUGES_SITE_ID |
Continue the code snippet: <|code_start|> self.destination_collection_uri = "/buckets/%s/collections/%s" % (
self.destination["bucket"],
self.destination["collection"],
)
def sign_and_update_destination(
self,
request,
source_attributes,
next_source_status=STATUS.SIGNED,
previous_source_status=None,
push_records=True,
):
"""Sign the specified collection.
0. Create the destination bucket / collection
1. Get all the records of the collection
2. Send all records since the last_modified of the destination
3. Compute a hash of these records
4. Ask the signer for a signature
5. Send the signature to the destination.
"""
changes_count = 0
self.create_destination(request)
if push_records:
changes_count = self.push_records_to_destination(request)
records, timestamp = self.get_destination_records(empty_none=False)
<|code_end|>
. Use current file imports:
import datetime
import logging
from enum import Enum
from kinto.core.events import ACTIONS
from kinto.core.storage.exceptions import RecordNotFoundError
from pyramid.authorization import Everyone
from .serializer import canonical_json
from .utils import STATUS, ensure_resource_exists, notify_resource_event, records_diff
and context (classes, functions, or code) from other files:
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/serializer.py
# def canonical_json(records, last_modified):
# records = (r for r in records if not r.get("deleted", False))
# records = sorted(records, key=operator.itemgetter("id"))
#
# payload = {"data": records, "last_modified": "%s" % last_modified}
#
# dump = canonicaljson.dumps(payload)
#
# return dump
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/utils.py
# class STATUS(Enum):
# WORK_IN_PROGRESS = "work-in-progress"
# TO_SIGN = "to-sign"
# TO_REFRESH = "to-resign"
# TO_REVIEW = "to-review"
# TO_ROLLBACK = "to-rollback"
# SIGNED = "signed"
#
# def __eq__(self, other):
# if not hasattr(other, "value"):
# return self.value == other
# return super(STATUS, self).__eq__(other)
#
# def __ne__(self, other):
# return not self.__eq__(other)
#
# def ensure_resource_exists(
# request, resource_name, parent_id, obj, permissions, matchdict
# ):
# storage = request.registry.storage
# permission = request.registry.permission
# try:
# created = storage.create(
# resource_name=resource_name, parent_id=parent_id, obj=obj
# )
# object_uri = instance_uri(request, resource_name, **matchdict)
# permission.replace_object_permissions(object_uri, permissions)
# notify_resource_event(
# request,
# {"method": "PUT", "path": object_uri},
# matchdict=matchdict,
# resource_name=resource_name,
# parent_id=parent_id,
# obj=created,
# action=ACTIONS.CREATE,
# )
# except UnicityError:
# pass
#
# def notify_resource_event(
# request, request_options, matchdict, resource_name, parent_id, obj, action, old=None
# ):
# """Helper that triggers resource events as real requests."""
# fakerequest = build_request(request, request_options)
# fakerequest.matchdict = matchdict
# fakerequest.bound_data = request.bound_data
# fakerequest.authn_type, fakerequest.selected_userid = PLUGIN_USERID.split(":")
# fakerequest.current_resource_name = resource_name
#
# # When kinto-signer copies record from one place to another,
# # it simulates a resource event. Since kinto-attachment
# # prevents from updating attachment fields, it throws an error.
# # The following flag will disable the kinto-attachment check.
# # See https://github.com/Kinto/kinto-signer/issues/256
# # and https://bugzilla.mozilla.org/show_bug.cgi?id=1470812
# has_changed_attachment = (
# resource_name == "record"
# and action == ACTIONS.UPDATE
# and "attachment" in old
# and old["attachment"] != obj.get("attachment")
# )
# if has_changed_attachment:
# fakerequest._attachment_auto_save = True
#
# fakerequest.notify_resource_event(
# parent_id=parent_id,
# timestamp=obj[FIELD_LAST_MODIFIED],
# data=obj,
# action=action,
# old=old,
# )
#
# def records_diff(left, right):
# left_by_id = {r["id"]: r for r in left}
# results = []
# for r in right:
# rid = r["id"]
# left_record = left_by_id.pop(rid, None)
# if left_record is None:
# # In right, but not in left (deleted!)
# results.append({**r, "deleted": True})
# elif not records_equal(left_record, r):
# # Differ between left and right
# results.append(left_record)
# # In left, but not in right.
# results.extend(left_by_id.values())
# return results
. Output only the next line. | serialized_records = canonical_json(records, timestamp) |
Based on the snippet: <|code_start|> @property
def source(self):
return self._source
@source.setter
def source(self, source):
self._source = _ensure_resource(source)
self.source_bucket_uri = "/buckets/%s" % source["bucket"]
self.source_collection_uri = "/buckets/%s/collections/%s" % (
source["bucket"],
source["collection"],
)
@property
def destination(self):
return self._destination
@destination.setter
def destination(self, destination):
self._destination = _ensure_resource(destination)
self.destination_bucket_uri = "/buckets/%s" % (self.destination["bucket"])
self.destination_collection_uri = "/buckets/%s/collections/%s" % (
self.destination["bucket"],
self.destination["collection"],
)
def sign_and_update_destination(
self,
request,
source_attributes,
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
import logging
from enum import Enum
from kinto.core.events import ACTIONS
from kinto.core.storage.exceptions import RecordNotFoundError
from pyramid.authorization import Everyone
from .serializer import canonical_json
from .utils import STATUS, ensure_resource_exists, notify_resource_event, records_diff
and context (classes, functions, sometimes code) from other files:
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/serializer.py
# def canonical_json(records, last_modified):
# records = (r for r in records if not r.get("deleted", False))
# records = sorted(records, key=operator.itemgetter("id"))
#
# payload = {"data": records, "last_modified": "%s" % last_modified}
#
# dump = canonicaljson.dumps(payload)
#
# return dump
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/utils.py
# class STATUS(Enum):
# WORK_IN_PROGRESS = "work-in-progress"
# TO_SIGN = "to-sign"
# TO_REFRESH = "to-resign"
# TO_REVIEW = "to-review"
# TO_ROLLBACK = "to-rollback"
# SIGNED = "signed"
#
# def __eq__(self, other):
# if not hasattr(other, "value"):
# return self.value == other
# return super(STATUS, self).__eq__(other)
#
# def __ne__(self, other):
# return not self.__eq__(other)
#
# def ensure_resource_exists(
# request, resource_name, parent_id, obj, permissions, matchdict
# ):
# storage = request.registry.storage
# permission = request.registry.permission
# try:
# created = storage.create(
# resource_name=resource_name, parent_id=parent_id, obj=obj
# )
# object_uri = instance_uri(request, resource_name, **matchdict)
# permission.replace_object_permissions(object_uri, permissions)
# notify_resource_event(
# request,
# {"method": "PUT", "path": object_uri},
# matchdict=matchdict,
# resource_name=resource_name,
# parent_id=parent_id,
# obj=created,
# action=ACTIONS.CREATE,
# )
# except UnicityError:
# pass
#
# def notify_resource_event(
# request, request_options, matchdict, resource_name, parent_id, obj, action, old=None
# ):
# """Helper that triggers resource events as real requests."""
# fakerequest = build_request(request, request_options)
# fakerequest.matchdict = matchdict
# fakerequest.bound_data = request.bound_data
# fakerequest.authn_type, fakerequest.selected_userid = PLUGIN_USERID.split(":")
# fakerequest.current_resource_name = resource_name
#
# # When kinto-signer copies record from one place to another,
# # it simulates a resource event. Since kinto-attachment
# # prevents from updating attachment fields, it throws an error.
# # The following flag will disable the kinto-attachment check.
# # See https://github.com/Kinto/kinto-signer/issues/256
# # and https://bugzilla.mozilla.org/show_bug.cgi?id=1470812
# has_changed_attachment = (
# resource_name == "record"
# and action == ACTIONS.UPDATE
# and "attachment" in old
# and old["attachment"] != obj.get("attachment")
# )
# if has_changed_attachment:
# fakerequest._attachment_auto_save = True
#
# fakerequest.notify_resource_event(
# parent_id=parent_id,
# timestamp=obj[FIELD_LAST_MODIFIED],
# data=obj,
# action=action,
# old=old,
# )
#
# def records_diff(left, right):
# left_by_id = {r["id"]: r for r in left}
# results = []
# for r in right:
# rid = r["id"]
# left_record = left_by_id.pop(rid, None)
# if left_record is None:
# # In right, but not in left (deleted!)
# results.append({**r, "deleted": True})
# elif not records_equal(left_record, r):
# # Differ between left and right
# results.append(left_record)
# # In left, but not in right.
# results.extend(left_by_id.values())
# return results
. Output only the next line. | next_source_status=STATUS.SIGNED, |
Using the snippet: <|code_start|> parent_id=self.source_collection_uri,
obj=impacted,
action=action,
old=record_before,
)
if refresh_last_edit:
current_userid = request.prefixed_userid
current_date = datetime.datetime.now(datetime.timezone.utc).isoformat()
attrs = {
"status": STATUS.SIGNED.value,
"last_editor_comment": "",
"last_reviewer_comment": "",
}
attrs[TRACKING_FIELDS.LAST_EDIT_BY.value] = current_userid
attrs[TRACKING_FIELDS.LAST_EDIT_DATE.value] = current_date
self._update_source_attributes(request, **attrs)
return changed_count
def create_destination(self, request):
"""Create the destination bucket/collection if they don't already exist."""
# With the current implementation, the destination is not writable by
# anyone and readable by everyone.
# https://github.com/Kinto/kinto-signer/issues/55
bucket_name = self.destination["bucket"]
collection_name = self.destination["collection"]
# Destination bucket will be writable by current user.
perms = {"write": [request.prefixed_userid]}
<|code_end|>
, determine the next line of code. You have imports:
import datetime
import logging
from enum import Enum
from kinto.core.events import ACTIONS
from kinto.core.storage.exceptions import RecordNotFoundError
from pyramid.authorization import Everyone
from .serializer import canonical_json
from .utils import STATUS, ensure_resource_exists, notify_resource_event, records_diff
and context (class names, function names, or code) available:
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/serializer.py
# def canonical_json(records, last_modified):
# records = (r for r in records if not r.get("deleted", False))
# records = sorted(records, key=operator.itemgetter("id"))
#
# payload = {"data": records, "last_modified": "%s" % last_modified}
#
# dump = canonicaljson.dumps(payload)
#
# return dump
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/utils.py
# class STATUS(Enum):
# WORK_IN_PROGRESS = "work-in-progress"
# TO_SIGN = "to-sign"
# TO_REFRESH = "to-resign"
# TO_REVIEW = "to-review"
# TO_ROLLBACK = "to-rollback"
# SIGNED = "signed"
#
# def __eq__(self, other):
# if not hasattr(other, "value"):
# return self.value == other
# return super(STATUS, self).__eq__(other)
#
# def __ne__(self, other):
# return not self.__eq__(other)
#
# def ensure_resource_exists(
# request, resource_name, parent_id, obj, permissions, matchdict
# ):
# storage = request.registry.storage
# permission = request.registry.permission
# try:
# created = storage.create(
# resource_name=resource_name, parent_id=parent_id, obj=obj
# )
# object_uri = instance_uri(request, resource_name, **matchdict)
# permission.replace_object_permissions(object_uri, permissions)
# notify_resource_event(
# request,
# {"method": "PUT", "path": object_uri},
# matchdict=matchdict,
# resource_name=resource_name,
# parent_id=parent_id,
# obj=created,
# action=ACTIONS.CREATE,
# )
# except UnicityError:
# pass
#
# def notify_resource_event(
# request, request_options, matchdict, resource_name, parent_id, obj, action, old=None
# ):
# """Helper that triggers resource events as real requests."""
# fakerequest = build_request(request, request_options)
# fakerequest.matchdict = matchdict
# fakerequest.bound_data = request.bound_data
# fakerequest.authn_type, fakerequest.selected_userid = PLUGIN_USERID.split(":")
# fakerequest.current_resource_name = resource_name
#
# # When kinto-signer copies record from one place to another,
# # it simulates a resource event. Since kinto-attachment
# # prevents from updating attachment fields, it throws an error.
# # The following flag will disable the kinto-attachment check.
# # See https://github.com/Kinto/kinto-signer/issues/256
# # and https://bugzilla.mozilla.org/show_bug.cgi?id=1470812
# has_changed_attachment = (
# resource_name == "record"
# and action == ACTIONS.UPDATE
# and "attachment" in old
# and old["attachment"] != obj.get("attachment")
# )
# if has_changed_attachment:
# fakerequest._attachment_auto_save = True
#
# fakerequest.notify_resource_event(
# parent_id=parent_id,
# timestamp=obj[FIELD_LAST_MODIFIED],
# data=obj,
# action=action,
# old=old,
# )
#
# def records_diff(left, right):
# left_by_id = {r["id"]: r for r in left}
# results = []
# for r in right:
# rid = r["id"]
# left_record = left_by_id.pop(rid, None)
# if left_record is None:
# # In right, but not in left (deleted!)
# results.append({**r, "deleted": True})
# elif not records_equal(left_record, r):
# # Differ between left and right
# results.append(left_record)
# # In left, but not in right.
# results.extend(left_by_id.values())
# return results
. Output only the next line. | ensure_resource_exists( |
Continue the code snippet: <|code_start|> impacted = tombstone
# In dest_records, but not in source_records. Must be re-created.
elif record.get("deleted"):
self.storage.create(obj=dest_record, **storage_kwargs)
action = ACTIONS.CREATE
record_before = None
impacted = dest_record
# Differ, restore attributes of dest_record in source.
else:
self.storage.update(
object_id=record[FIELD_ID], obj=dest_record, **storage_kwargs
)
action = ACTIONS.UPDATE
record_before = record
impacted = dest_record
if action is not None:
changed_count += 1
# Notify resource event, in order to leave a trace in the history.
matchdict = {
"bucket_id": self.destination["bucket"],
"collection_id": self.destination["collection"],
FIELD_ID: record[FIELD_ID],
}
record_uri = (
"/buckets/{bucket_id}/collections/{collection_id}/records/{id}"
).format(**matchdict)
<|code_end|>
. Use current file imports:
import datetime
import logging
from enum import Enum
from kinto.core.events import ACTIONS
from kinto.core.storage.exceptions import RecordNotFoundError
from pyramid.authorization import Everyone
from .serializer import canonical_json
from .utils import STATUS, ensure_resource_exists, notify_resource_event, records_diff
and context (classes, functions, or code) from other files:
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/serializer.py
# def canonical_json(records, last_modified):
# records = (r for r in records if not r.get("deleted", False))
# records = sorted(records, key=operator.itemgetter("id"))
#
# payload = {"data": records, "last_modified": "%s" % last_modified}
#
# dump = canonicaljson.dumps(payload)
#
# return dump
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/utils.py
# class STATUS(Enum):
# WORK_IN_PROGRESS = "work-in-progress"
# TO_SIGN = "to-sign"
# TO_REFRESH = "to-resign"
# TO_REVIEW = "to-review"
# TO_ROLLBACK = "to-rollback"
# SIGNED = "signed"
#
# def __eq__(self, other):
# if not hasattr(other, "value"):
# return self.value == other
# return super(STATUS, self).__eq__(other)
#
# def __ne__(self, other):
# return not self.__eq__(other)
#
# def ensure_resource_exists(
# request, resource_name, parent_id, obj, permissions, matchdict
# ):
# storage = request.registry.storage
# permission = request.registry.permission
# try:
# created = storage.create(
# resource_name=resource_name, parent_id=parent_id, obj=obj
# )
# object_uri = instance_uri(request, resource_name, **matchdict)
# permission.replace_object_permissions(object_uri, permissions)
# notify_resource_event(
# request,
# {"method": "PUT", "path": object_uri},
# matchdict=matchdict,
# resource_name=resource_name,
# parent_id=parent_id,
# obj=created,
# action=ACTIONS.CREATE,
# )
# except UnicityError:
# pass
#
# def notify_resource_event(
# request, request_options, matchdict, resource_name, parent_id, obj, action, old=None
# ):
# """Helper that triggers resource events as real requests."""
# fakerequest = build_request(request, request_options)
# fakerequest.matchdict = matchdict
# fakerequest.bound_data = request.bound_data
# fakerequest.authn_type, fakerequest.selected_userid = PLUGIN_USERID.split(":")
# fakerequest.current_resource_name = resource_name
#
# # When kinto-signer copies record from one place to another,
# # it simulates a resource event. Since kinto-attachment
# # prevents from updating attachment fields, it throws an error.
# # The following flag will disable the kinto-attachment check.
# # See https://github.com/Kinto/kinto-signer/issues/256
# # and https://bugzilla.mozilla.org/show_bug.cgi?id=1470812
# has_changed_attachment = (
# resource_name == "record"
# and action == ACTIONS.UPDATE
# and "attachment" in old
# and old["attachment"] != obj.get("attachment")
# )
# if has_changed_attachment:
# fakerequest._attachment_auto_save = True
#
# fakerequest.notify_resource_event(
# parent_id=parent_id,
# timestamp=obj[FIELD_LAST_MODIFIED],
# data=obj,
# action=action,
# old=old,
# )
#
# def records_diff(left, right):
# left_by_id = {r["id"]: r for r in left}
# results = []
# for r in right:
# rid = r["id"]
# left_record = left_by_id.pop(rid, None)
# if left_record is None:
# # In right, but not in left (deleted!)
# results.append({**r, "deleted": True})
# elif not records_equal(left_record, r):
# # Differ between left and right
# results.append(left_record)
# # In left, but not in right.
# results.extend(left_by_id.values())
# return results
. Output only the next line. | notify_resource_event( |
Predict the next line for this snippet: <|code_start|> )
return changes_count
def refresh_signature(self, request, next_source_status=None):
"""Refresh the signature without moving records."""
records, timestamp = self.get_destination_records(empty_none=False)
serialized_records = canonical_json(records, timestamp)
logger.debug(f"{self.source_collection_uri}:\t'{serialized_records}'")
signature = self.signer.sign(serialized_records)
self.set_destination_signature(signature, request=request, source_attributes={})
if next_source_status is not None:
current_userid = request.prefixed_userid
current_date = datetime.datetime.now(datetime.timezone.utc).isoformat()
attrs = {"status": next_source_status}
attrs[TRACKING_FIELDS.LAST_SIGNATURE_BY.value] = current_userid
attrs[TRACKING_FIELDS.LAST_SIGNATURE_DATE.value] = current_date
self._update_source_attributes(request, **attrs)
def rollback_changes(
self, request, refresh_last_edit=True, refresh_signature=False
):
"""Restore the contents of *destination* to *source* (delete extras, recreate deleted,
and restore changes) (eg. destination -> preview, or preview -> source).
"""
dest_records, _ = self.get_destination_records(empty_none=False)
dest_by_id = {r["id"]: r for r in dest_records}
source_records, _ = self.get_source_records()
<|code_end|>
with the help of current file imports:
import datetime
import logging
from enum import Enum
from kinto.core.events import ACTIONS
from kinto.core.storage.exceptions import RecordNotFoundError
from pyramid.authorization import Everyone
from .serializer import canonical_json
from .utils import STATUS, ensure_resource_exists, notify_resource_event, records_diff
and context from other files:
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/serializer.py
# def canonical_json(records, last_modified):
# records = (r for r in records if not r.get("deleted", False))
# records = sorted(records, key=operator.itemgetter("id"))
#
# payload = {"data": records, "last_modified": "%s" % last_modified}
#
# dump = canonicaljson.dumps(payload)
#
# return dump
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/utils.py
# class STATUS(Enum):
# WORK_IN_PROGRESS = "work-in-progress"
# TO_SIGN = "to-sign"
# TO_REFRESH = "to-resign"
# TO_REVIEW = "to-review"
# TO_ROLLBACK = "to-rollback"
# SIGNED = "signed"
#
# def __eq__(self, other):
# if not hasattr(other, "value"):
# return self.value == other
# return super(STATUS, self).__eq__(other)
#
# def __ne__(self, other):
# return not self.__eq__(other)
#
# def ensure_resource_exists(
# request, resource_name, parent_id, obj, permissions, matchdict
# ):
# storage = request.registry.storage
# permission = request.registry.permission
# try:
# created = storage.create(
# resource_name=resource_name, parent_id=parent_id, obj=obj
# )
# object_uri = instance_uri(request, resource_name, **matchdict)
# permission.replace_object_permissions(object_uri, permissions)
# notify_resource_event(
# request,
# {"method": "PUT", "path": object_uri},
# matchdict=matchdict,
# resource_name=resource_name,
# parent_id=parent_id,
# obj=created,
# action=ACTIONS.CREATE,
# )
# except UnicityError:
# pass
#
# def notify_resource_event(
# request, request_options, matchdict, resource_name, parent_id, obj, action, old=None
# ):
# """Helper that triggers resource events as real requests."""
# fakerequest = build_request(request, request_options)
# fakerequest.matchdict = matchdict
# fakerequest.bound_data = request.bound_data
# fakerequest.authn_type, fakerequest.selected_userid = PLUGIN_USERID.split(":")
# fakerequest.current_resource_name = resource_name
#
# # When kinto-signer copies record from one place to another,
# # it simulates a resource event. Since kinto-attachment
# # prevents from updating attachment fields, it throws an error.
# # The following flag will disable the kinto-attachment check.
# # See https://github.com/Kinto/kinto-signer/issues/256
# # and https://bugzilla.mozilla.org/show_bug.cgi?id=1470812
# has_changed_attachment = (
# resource_name == "record"
# and action == ACTIONS.UPDATE
# and "attachment" in old
# and old["attachment"] != obj.get("attachment")
# )
# if has_changed_attachment:
# fakerequest._attachment_auto_save = True
#
# fakerequest.notify_resource_event(
# parent_id=parent_id,
# timestamp=obj[FIELD_LAST_MODIFIED],
# data=obj,
# action=action,
# old=old,
# )
#
# def records_diff(left, right):
# left_by_id = {r["id"]: r for r in left}
# results = []
# for r in right:
# rid = r["id"]
# left_record = left_by_id.pop(rid, None)
# if left_record is None:
# # In right, but not in left (deleted!)
# results.append({**r, "deleted": True})
# elif not records_equal(left_record, r):
# # Differ between left and right
# results.append(left_record)
# # In left, but not in right.
# results.extend(left_by_id.values())
# return results
, which may contain function names, class names, or code. Output only the next line. | changes_since_approval = records_diff(source_records, dest_records) |
Given snippet: <|code_start|>
RE_ISO8601 = re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}\+00:00")
class SignerAttachmentsTest(BaseWebTest, unittest.TestCase):
def setUp(self):
super().setUp()
# Patch calls to Autograph.
patch = mock.patch("kinto_remote_settings.signer.backends.autograph.requests")
self.addCleanup(patch.stop)
self.mocked_autograph = patch.start()
def fake_sign():
fake_signature = "".join(random.sample(string.ascii_lowercase, 10))
return [
{
"signature": "",
"hash_algorithm": "",
"signature_encoding": "",
"content-signature": fake_signature,
"x5u": "",
"ref": "",
}
]
self.mocked_autograph.post.return_value.json.side_effect = fake_sign
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import random
import re
import string
import unittest
from unittest import mock
from .support import BaseWebTest, get_user_headers
and context:
# Path: kinto-remote-settings/tests/signer/support.py
# class BaseWebTest(CoreWebTest):
# def __init__(self, *args, **kwargs):
# def get_app_settings(cls, extras=None):
which might include code, classes, or functions. Output only the next line. | self.headers = get_user_headers("tarte:en-pion") |
Using the snippet: <|code_start|> payload = SIGN_PREFIX + payload
signature = signature_bundle["signature"]
if isinstance(signature, str): # pragma: nocover
signature = signature.encode("utf-8")
signature_bytes = base64.urlsafe_b64decode(signature)
public_key = self.load_public_key()
try:
public_key.verify(
signature_bytes,
payload,
hashfunc=hashlib.sha384,
sigdecode=ecdsa.util.sigdecode_string,
)
except Exception as e:
raise BadSignatureError(e)
def load_from_settings(settings, prefix="", *, prefixes=None):
if prefixes is None:
prefixes = [prefix]
if prefix != "":
message = (
"signer.load_from_settings `prefix` parameter is deprecated, please "
"use `prefixes` instead."
)
warnings.warn(message, DeprecationWarning)
<|code_end|>
, determine the next line of code. You have imports:
import base64
import hashlib
import warnings
import ecdsa
from ecdsa import NIST384p, SigningKey, VerifyingKey
from ..utils import get_first_matching_setting
from .base import SignerBase
from .exceptions import BadSignatureError
and context (class names, function names, or code) available:
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/utils.py
# def get_first_matching_setting(setting_name, settings, prefixes, default=None):
# for prefix in prefixes:
# prefixed_setting_name = prefix + setting_name
# if prefixed_setting_name in settings:
# return settings[prefixed_setting_name]
# return default
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/backends/base.py
# class SignerBase(object):
# def sign(self, payload):
# """
# Signs the specified `payload` and returns the signature metadata.
#
# :returns: A mapping with every attributes about the signature
# (e.g. "signature", "hash_algorithm", "signature_encoding"...)
# :rtype: dict
# """
# raise NotImplementedError
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/backends/exceptions.py
# class BadSignatureError(Exception):
# pass
. Output only the next line. | private_key = get_first_matching_setting("ecdsa.private_key", settings, prefixes) |
Using the snippet: <|code_start|>
payload = SIGN_PREFIX + payload
private_key = self.load_private_key()
signature = private_key.sign(
payload, hashfunc=hashlib.sha384, sigencode=ecdsa.util.sigencode_string
)
x5u = ""
enc_signature = base64.urlsafe_b64encode(signature).decode("utf-8")
return {"signature": enc_signature, "x5u": x5u, "mode": "p384ecdsa"}
def verify(self, payload, signature_bundle):
if isinstance(payload, str): # pragma: nocover
payload = payload.encode("utf-8")
payload = SIGN_PREFIX + payload
signature = signature_bundle["signature"]
if isinstance(signature, str): # pragma: nocover
signature = signature.encode("utf-8")
signature_bytes = base64.urlsafe_b64decode(signature)
public_key = self.load_public_key()
try:
public_key.verify(
signature_bytes,
payload,
hashfunc=hashlib.sha384,
sigdecode=ecdsa.util.sigdecode_string,
)
except Exception as e:
<|code_end|>
, determine the next line of code. You have imports:
import base64
import hashlib
import warnings
import ecdsa
from ecdsa import NIST384p, SigningKey, VerifyingKey
from ..utils import get_first_matching_setting
from .base import SignerBase
from .exceptions import BadSignatureError
and context (class names, function names, or code) available:
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/utils.py
# def get_first_matching_setting(setting_name, settings, prefixes, default=None):
# for prefix in prefixes:
# prefixed_setting_name = prefix + setting_name
# if prefixed_setting_name in settings:
# return settings[prefixed_setting_name]
# return default
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/backends/base.py
# class SignerBase(object):
# def sign(self, payload):
# """
# Signs the specified `payload` and returns the signature metadata.
#
# :returns: A mapping with every attributes about the signature
# (e.g. "signature", "hash_algorithm", "signature_encoding"...)
# :rtype: dict
# """
# raise NotImplementedError
#
# Path: kinto-remote-settings/src/kinto_remote_settings/signer/backends/exceptions.py
# class BadSignatureError(Exception):
# pass
. Output only the next line. | raise BadSignatureError(e) |
Predict the next line for this snippet: <|code_start|>
pytestmark = pytest.mark.asyncio
async def test_review_signoff(
base_url: str,
selenium: WebDriver,
make_client: ClientFactory,
<|code_end|>
with the help of current file imports:
import pytest
from kinto_http.patch_type import JSONPatch
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from .conftest import Auth, ClientFactory
and context from other files:
# Path: tests/conftest.py
# DEFAULT_SERVER = "http://localhost:8888/v1"
# DEFAULT_AUTH = "user:pass"
# DEFAULT_EDITOR_AUTH = "editor:pass"
# DEFAULT_REVIEWER_AUTH = "reviewer:pass"
# DEFAULT_BUCKET = "main-workspace"
# DEFAULT_COLLECTION = "product-integrity"
# def pytest_addoption(parser):
# def server(request) -> str:
# def auth(request) -> Auth:
# def editor_auth(request) -> Auth:
# def reviewer_auth(request) -> Auth:
# def source_bucket(request) -> str:
# def source_collection(request) -> str:
# def keep_existing(request) -> bool:
# def make_client(
# server: str, source_bucket: str, source_collection: str
# ) -> ClientFactory:
# def _make_client(auth: Auth) -> AsyncClient:
# async def flush_default_collection(
# make_client: ClientFactory,
# auth: Auth,
# source_bucket: str,
# source_collection: str,
# ):
# def verify_url(request: FixtureRequest, base_url: str):
# def firefox_options(firefox_options: Options) -> Options:
# def selenium(selenium: WebDriver) -> WebDriver:
# def create_user(request_session: requests.Session, server: str, auth: Auth):
, which may contain function names, class names, or code. Output only the next line. | auth: Auth, |
Predict the next line for this snippet: <|code_start|>
pytestmark = pytest.mark.asyncio
async def test_review_signoff(
base_url: str,
selenium: WebDriver,
<|code_end|>
with the help of current file imports:
import pytest
from kinto_http.patch_type import JSONPatch
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from .conftest import Auth, ClientFactory
and context from other files:
# Path: tests/conftest.py
# DEFAULT_SERVER = "http://localhost:8888/v1"
# DEFAULT_AUTH = "user:pass"
# DEFAULT_EDITOR_AUTH = "editor:pass"
# DEFAULT_REVIEWER_AUTH = "reviewer:pass"
# DEFAULT_BUCKET = "main-workspace"
# DEFAULT_COLLECTION = "product-integrity"
# def pytest_addoption(parser):
# def server(request) -> str:
# def auth(request) -> Auth:
# def editor_auth(request) -> Auth:
# def reviewer_auth(request) -> Auth:
# def source_bucket(request) -> str:
# def source_collection(request) -> str:
# def keep_existing(request) -> bool:
# def make_client(
# server: str, source_bucket: str, source_collection: str
# ) -> ClientFactory:
# def _make_client(auth: Auth) -> AsyncClient:
# async def flush_default_collection(
# make_client: ClientFactory,
# auth: Auth,
# source_bucket: str,
# source_collection: str,
# ):
# def verify_url(request: FixtureRequest, base_url: str):
# def firefox_options(firefox_options: Options) -> Options:
# def selenium(selenium: WebDriver) -> WebDriver:
# def create_user(request_session: requests.Session, server: str, auth: Auth):
, which may contain function names, class names, or code. Output only the next line. | make_client: ClientFactory, |
Next line prediction: <|code_start|> "x5u": "",
"ref": "",
}
]
self.mocked_autograph.post.return_value.json.side_effect = fake_sign
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
settings["storage_backend"] = "kinto.core.storage.postgresql"
db = "postgresql://postgres:postgres@localhost/testdb"
settings["storage_url"] = db
settings["permission_backend"] = "kinto.core.permission.postgresql"
settings["permission_url"] = db
settings["cache_backend"] = "kinto.core.cache.memory"
settings["statsd_url"] = "udp://127.0.0.1:8125"
settings["kinto.signer.resources"] = "%s -> %s" % (
cls.source_collection,
cls.destination_collection,
)
return settings
class SignoffWebTest(PostgresWebTest):
def setUp(self):
super(SignoffWebTest, self).setUp()
<|code_end|>
. Use current file imports:
(import random
import re
import string
import unittest
from unittest import mock
from kinto.core.errors import ERRORS
from kinto.core.testing import FormattedErrorMixin
from .support import BaseWebTest, get_user_headers)
and context including class names, function names, or small code snippets from other files:
# Path: kinto-remote-settings/tests/signer/support.py
# class BaseWebTest(CoreWebTest):
# def __init__(self, *args, **kwargs):
# def get_app_settings(cls, extras=None):
. Output only the next line. | self.headers = get_user_headers("tarte:en-pion") |
Predict the next line after this snippet: <|code_start|>
def test_heartbeat(server: str):
resp = requests.get(f"{server}/__heartbeat__")
resp.raise_for_status()
<|code_end|>
using the current file's imports:
import os
import random
import pytest
import requests
from string import hexdigits
from typing import Dict, List
from kinto_http import AsyncClient, KintoException
from kinto_http.patch_type import JSONPatch
from kinto_remote_settings.signer.backends.local_ecdsa import ECDSASigner
from kinto_remote_settings.signer.serializer import canonical_json
from .conftest import Auth, ClientFactory
and any relevant context from other files:
# Path: tests/conftest.py
# DEFAULT_SERVER = "http://localhost:8888/v1"
# DEFAULT_AUTH = "user:pass"
# DEFAULT_EDITOR_AUTH = "editor:pass"
# DEFAULT_REVIEWER_AUTH = "reviewer:pass"
# DEFAULT_BUCKET = "main-workspace"
# DEFAULT_COLLECTION = "product-integrity"
# def pytest_addoption(parser):
# def server(request) -> str:
# def auth(request) -> Auth:
# def editor_auth(request) -> Auth:
# def reviewer_auth(request) -> Auth:
# def source_bucket(request) -> str:
# def source_collection(request) -> str:
# def keep_existing(request) -> bool:
# def make_client(
# server: str, source_bucket: str, source_collection: str
# ) -> ClientFactory:
# def _make_client(auth: Auth) -> AsyncClient:
# async def flush_default_collection(
# make_client: ClientFactory,
# auth: Auth,
# source_bucket: str,
# source_collection: str,
# ):
# def verify_url(request: FixtureRequest, base_url: str):
# def firefox_options(firefox_options: Options) -> Options:
# def selenium(selenium: WebDriver) -> WebDriver:
# def create_user(request_session: requests.Session, server: str, auth: Auth):
. Output only the next line. | async def test_history_plugin(make_client: ClientFactory, auth: Auth): |
Next line prediction: <|code_start|>
def test_heartbeat(server: str):
resp = requests.get(f"{server}/__heartbeat__")
resp.raise_for_status()
<|code_end|>
. Use current file imports:
(import os
import random
import pytest
import requests
from string import hexdigits
from typing import Dict, List
from kinto_http import AsyncClient, KintoException
from kinto_http.patch_type import JSONPatch
from kinto_remote_settings.signer.backends.local_ecdsa import ECDSASigner
from kinto_remote_settings.signer.serializer import canonical_json
from .conftest import Auth, ClientFactory)
and context including class names, function names, or small code snippets from other files:
# Path: tests/conftest.py
# DEFAULT_SERVER = "http://localhost:8888/v1"
# DEFAULT_AUTH = "user:pass"
# DEFAULT_EDITOR_AUTH = "editor:pass"
# DEFAULT_REVIEWER_AUTH = "reviewer:pass"
# DEFAULT_BUCKET = "main-workspace"
# DEFAULT_COLLECTION = "product-integrity"
# def pytest_addoption(parser):
# def server(request) -> str:
# def auth(request) -> Auth:
# def editor_auth(request) -> Auth:
# def reviewer_auth(request) -> Auth:
# def source_bucket(request) -> str:
# def source_collection(request) -> str:
# def keep_existing(request) -> bool:
# def make_client(
# server: str, source_bucket: str, source_collection: str
# ) -> ClientFactory:
# def _make_client(auth: Auth) -> AsyncClient:
# async def flush_default_collection(
# make_client: ClientFactory,
# auth: Auth,
# source_bucket: str,
# source_collection: str,
# ):
# def verify_url(request: FixtureRequest, base_url: str):
# def firefox_options(firefox_options: Options) -> Options:
# def selenium(selenium: WebDriver) -> WebDriver:
# def create_user(request_session: requests.Session, server: str, auth: Auth):
. Output only the next line. | async def test_history_plugin(make_client: ClientFactory, auth: Auth): |
Continue the code snippet: <|code_start|> self.updater.get_source_records()
self.storage.list_all.assert_called_with(
resource_name="record",
parent_id="/buckets/sourcebucket/collections/sourcecollection",
)
def test_get_destination_records(self):
# We want to test get_destination_records with some records.
records = [
{"id": idx, "foo": "bar %s" % idx, "last_modified": 42 - idx}
for idx in range(1, 4)
]
self.storage.list_all.return_value = records
self.updater.get_destination_records()
self.storage.resource_timestamp.assert_called_with(
resource_name="record",
parent_id="/buckets/destbucket/collections/destcollection",
)
self.storage.list_all.assert_called_with(
resource_name="record",
parent_id="/buckets/destbucket/collections/destcollection",
)
def test_push_records_to_destination(self):
self.patch(self.updater, "get_destination_records", return_value=([], 1324))
records = [
{"id": idx, "foo": "bar %s" % idx, "last_modified": 42 - idx}
for idx in range(1, 4)
]
self.patch(self.updater, "get_source_records", return_value=(records, 1325))
<|code_end|>
. Use current file imports:
import datetime
import unittest
import pytest
from unittest import mock
from kinto.core.storage.exceptions import RecordNotFoundError
from kinto_remote_settings.signer.updater import LocalUpdater
from kinto_remote_settings.signer.utils import STATUS
from .support import DummyRequest
and context (classes, functions, or code) from other files:
# Path: kinto-remote-settings/tests/signer/support.py
# class BaseWebTest(CoreWebTest):
# def __init__(self, *args, **kwargs):
# def get_app_settings(cls, extras=None):
. Output only the next line. | self.updater.push_records_to_destination(DummyRequest()) |
Here is a snippet: <|code_start|>
# =========================== plot for main data ============================
def plotInOneFigure(time, data, result, options):
"""
Plot the dlm results in one figure
Args:
time: the time label
data: the original data
result: the fitted result from dlm class
options: options for the plot, for details please refer to @dlm
"""
# plot the original data
plotData(time=time, data=data,
showDataPoint=options.showDataPoint, color=options.dataColor,
label='time series')
# plot fitered results if needed
if options.plotFilteredData:
start = result.filteredSteps[0]
end = result.filteredSteps[1] + 1
plotData(time=time[start:end],
data=to1dArray(result.filteredObs[start:end]),
showDataPoint=options.showFittedPoint,
color=options.filteredColor,
label='filtered series')
if options.showConfidenceInterval:
<|code_end|>
. Write the next line using the current file imports:
import matplotlib.pyplot as plt
from pydlm.base.tools import getInterval
and context from other files:
# Path: pydlm/base/tools.py
# def getInterval(means, var, p):
# alpha = abs(normal_CDF_inverse(min(1 - p, p) / 2))
# upper = [0] * len(means)
# lower = [0] * len(means)
# for i in range(0, len(means)):
# upper[i] = means[i] + alpha * math.sqrt(var[i])
# lower[i] = means[i] - alpha * math.sqrt(var[i])
#
# return (upper, lower)
, which may include functions, classes, or code. Output only the next line. | upper, lower = getInterval(result.filteredObs[start:end], |
Predict the next line after this snippet: <|code_start|>
class testAutoReg(unittest.TestCase):
def setUp(self):
self.data = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
<|code_end|>
using the current file's imports:
import unittest
from pydlm.modeler.autoReg import autoReg
and any relevant context from other files:
# Path: pydlm/modeler/autoReg.py
# class autoReg(component):
# """ The autoReg class allows user to add an autoregressive component to the dlm.
# This code implements the autoregressive component as a child class of
# component. Different from the dynamic component, the features in the
# autoReg is generated from the data, and updated according to the data.
#
# The latent states of autoReg are aligned in the order of
# [today - degree, today - degree + 1, ..., today - 2, today - 1]. Thus,
# when fetching the latents from autoReg component, use this order to
# correctly align the coefficients.
#
# Args:
# data (deprecated): Users get a warning if this argument is used.
# degree: the order of the autoregressive component
# discount: the discount factor
# name: the name of the trend component
# w: the value to set the prior covariance. Default to a diagonal
# matrix with 1e7 on the diagonal.
# padding: either 0 or None. The number to be padded for the first degree
# days, as no previous data is observed to form the feature
# matrix
# Examples:
# >>> # create a auto regression component:
# >>> autoReg8 = autoReg(degree=8, name='autoreg8', discount = 0.99)
# >>> # change the autoReg8 to have covariance with diagonals are 2 and state 1
# >>> autoReg8.createCovPrior(cov = 2)
# >>> autoReg8.createMeanPrior(mean = 1)
#
# Attributes:
# d: the degree of autoregressive, i.e., how many days to look back
# data (deprecatd): Users get a warning if this argument is used.
# discount factor: the discounting factor
# name: the name of the component
# padding: either 0 or None. The number to be padded for the first degree
# days, as no previous data is observed to form the feature
# matrix
# evaluation: the evaluation matrix for this component
# transition: the transition matrix for this component
# covPrior: the prior guess of the covariance matrix of the latent states
# meanPrior: the prior guess of the latent states
#
# """
#
#
# def __init__(self,
# data=None, # DEPRECATED
# degree=2,
# discount=0.99,
# name='ar2',
# w=100,
# padding=0):
#
# if data is not None:
# warn('The data argument in autoReg is deprecated. Please avoid using it.')
#
# self.componentType = 'autoReg'
# self.d = degree
# self.name = name
# self.discount = np.ones(self.d) * discount
# self.padding = padding
#
# # Initialize all basic quantities
# self.evaluation = None
# self.transition = None
# self.covPrior = None
# self.meanPrior = None
#
# # create all basic quantities
# self.createTransition()
# self.createCovPrior(scale=w)
# self.createMeanPrior()
#
# # record current step in case of lost
# self.step = 0
#
#
# def createEvaluation(self, step, data):
# """ The evaluation matrix for auto regressor.
#
# """
# if step > len(data):
# raise NameError("There is no sufficient data for creating autoregressor.")
# # We pad numbers if the step is too early
# self.evaluation = matrix([[self.padding] * (self.d - step) +
# list(data[max(0, (step - self.d)) : step])])
#
#
# def createTransition(self):
# """ Create the transition matrix.
#
# For the auto regressor component, the transition matrix is just the identity matrix
#
# """
# self.transition = np.matrix(np.eye(self.d))
#
#
# def createCovPrior(self, cov = None, scale = 1e6):
# """ Create the prior covariance matrix for the latent states
#
# """
# if cov is None:
# self.covPrior = np.matrix(np.eye(self.d)) * scale
# else:
# self.covPrior = cov * scale
#
#
# def createMeanPrior(self, mean = None, scale = 1):
# """ Create the prior latent state
#
# """
# if mean is None:
# self.meanPrior = np.matrix(np.zeros((self.d, 1))) * scale
# else:
# self.meanPrior = mean * scale
#
#
# def checkDimensions(self):
# """ if user supplies their own covPrior and meanPrior, this can
# be used to check if the dimension matches
#
# """
# tl.checker.checkVectorDimension(self.meanPrior, self.covPrior)
# print('The dimesnion looks good!')
#
#
# def updateEvaluation(self, date, data):
# self.createEvaluation(step=date, data=data)
#
#
# def appendNewData(self, data):
# """ AutoReg append new data automatically with the main time series. Nothing
# needs to be done here.
#
# """
# return
. Output only the next line. | self.ar4 = autoReg(degree=4, name='ar4', padding=0, w=1.0) |
Given the code snippet: <|code_start|>"""
=========================================================================
Code for the seasonality component
=========================================================================
This piece of code provide one building block for the dynamic linear model.
It decribes a latent seasonality trending in the time series data. The user
can use this class to construct any periodicy component to the model, for
instance, the hourly, weekly or monthly behavior. Different from the Fourier
series, the seasonality components are nonparametric, i.e., there is no sin
or cos relationship between each state. They can be arbitrarily valued.
"""
# create seasonality component
# We create the seasonality using the component class
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import pydlm.base.tools as tl
from .component import component
and context (functions, classes, or occasionally code) from other files:
# Path: pydlm/modeler/component.py
# class component:
# """ The abstract class provides the basic structure for all model components
#
# Methods:
# createEvaluation: create the initial evaluation matrix
# createTransition: create the initial transition matrix
# createCovPrior: create a simple prior covariance matrix
# createMeanPrior: create a simple prior latent state
# checkDimensions: if user supplies their own covPrior and meanPrior, this can
# be used to check if the dimension matches
#
# """
# __metaclass__ = ABCMeta
#
# def __init__(self):
# """All members that need to be initialized."""
# self.d = None
# self.name = None
# self.componentType = None
# self.discount = None
# self.evaluation = None
# self.transition = None
# self.covPrior = None
# self.meanPrior = None
#
# def __eq__(self, other):
# """Define equal method used for == comparison"""
# if not isinstance(other, component):
# return NotImplemented
# else:
# return (self.equalOrNone(self.d, other.d) and
# self.equalOrNone(self.name, other.name) and
# self.equalOrNone(self.componentType, other.componentType) and
# self.npEqualOrNone(self.discount, other.discount) and
# self.npEqualOrNone(self.evaluation, other.evaluation) and
# self.npEqualOrNone(self.transition, other.transition) and
# self.npEqualOrNone(self.covPrior, other.covPrior) and
# self.npEqualOrNone(self.meanPrior, other.meanPrior))
#
#
# # define the evaluation matrix for the component
# @abstractmethod
# def createEvaluation(self): pass
# """ Create the evaluation matrix
#
# """
#
#
# # define the transition matrix for the component
# @abstractmethod
# def createTransition(self): pass
# """ Create the transition matrix
#
# """
#
#
# # define the prior distribution for the covariance for the component
# @abstractmethod
# def createCovPrior(self): pass
# """ Create the prior covariance matrix for the latent states
#
# """
#
#
# # define the prior distribution for the mean vector for the component
# @abstractmethod
# def createMeanPrior(self): pass
# """ Create the prior latent state
#
# """
#
#
# # check the matrix dimensions in case user supplied matrices are wrong
# @abstractmethod
# def checkDimensions(self): pass
# """ Check the dimensionality of the state and covariance
#
# """
#
# def equalOrNone(self, a, b):
# """Check if a and b are equal or both are None"""
# return (a is None and b is None) or a == b
#
# def npEqualOrNone(self, a, b):
# """Check if a and b are equal or both are None for NP arrays"""
# return (a is None and b is None) or (a == b).all()
. Output only the next line. | class seasonality(component): |
Here is a snippet: <|code_start|> transition = mt.matrixAddInDiag(transition, comp.transition)
evaluation = mt.matrixAddByCol(evaluation,
comp.evaluation)
state = mt.matrixAddByRow(state, comp.meanPrior)
sysVar = mt.matrixAddInDiag(sysVar, comp.covPrior)
self.discount = np.concatenate((self.discount, comp.discount))
self.componentIndex[i] = (currentIndex,
currentIndex + comp.d - 1)
currentIndex += comp.d
# if the model contains the automatic dynamic part, we add
# them to the builder
if len(self.automaticComponents) > 0:
self.automaticEvaluation = None
for i in self.automaticComponents:
comp = self.automaticComponents[i]
comp.updateEvaluation(0, data)
transition = mt.matrixAddInDiag(transition, comp.transition)
evaluation = mt.matrixAddByCol(evaluation,
comp.evaluation)
state = mt.matrixAddByRow(state, comp.meanPrior)
sysVar = mt.matrixAddInDiag(sysVar, comp.covPrior)
self.discount = np.concatenate((self.discount, comp.discount))
self.componentIndex[i] = (currentIndex,
currentIndex + comp.d - 1)
currentIndex += comp.d
self.statePrior = state
self.sysVarPrior = sysVar
self.noiseVar = np.matrix(noise)
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
from pydlm.base.baseModel import baseModel
from copy import deepcopy
from pydlm.modeler.matrixTools import matrixTools as mt
and context from other files:
# Path: pydlm/base/baseModel.py
# class baseModel:
# """ The baseModel class that provides the basic model structure for dlm.
#
# Attributes:
# transition: the transition matrix G
# evaluation: the evaluation F
# noiseVar: the variance of the observation noise
# sysVar: the covariance of the underlying states
# innovation: the incremnent of the latent covariance W
# state: the latent states
# df: the degree of freedom (= number of data points)
# obs: the expectation of the observation
# obsVar: the variance of the observation
#
# Methods:
# initializeObservation: initialize the obs and obsVar
# validation: validate the matrix dimensions are consistent.
# """
#
#
# # define the components of a baseModel
# def __init__(self, transition = None, evaluation = None, noiseVar = None, \
# sysVar = None, innovation = None, state = None, df = None):
# self.transition = transition
# self.evaluation = evaluation
# self.noiseVar = noiseVar
# self.sysVar = sysVar
# self.innovation = innovation
# self.state = state
# self.df = df
# self.obs = None
# self.obsVar = None
#
# # a hidden data field used only for model prediction
# self.prediction = __model__()
#
#
# # initialize the observation mean and variance
# def initializeObservation(self):
# """ Initialize the value of obs and obsVar
#
# """
# self.validation()
# self.obs = np.dot(self.evaluation, self.state)
# self.obsVar = np.dot(np.dot(self.evaluation, self.sysVar), self.evaluation.T) \
# + self.noiseVar
#
#
# # checking if the dimension matches with each other
# def validation(self):
# """ Validate the model components are consistent
#
# """
# # check symmetric
# tl.checker.checkSymmetry(self.transition)
# tl.checker.checkSymmetry(self.sysVar)
# if self.innovation is not None:
# tl.checker.checkSymmetry(self.innovation)
#
# # check wether dimension match
# tl.checker.checkMatrixDimension(self.transition, self.sysVar)
# if self.innovation is not None:
# tl.checker.checkMatrixDimension(self.transition, self.innovation)
# tl.checker.checkVectorDimension(self.evaluation, self.transition)
# tl.checker.checkVectorDimension(self.state, self.transition)
#
# Path: pydlm/modeler/matrixTools.py
# class matrixTools:
# @staticmethod
# def matrixAddInDiag(A, B):
# if A is None:
# return np.matrix(B)
# elif B is None:
# return np.matrix(A)
# else:
# (An, Ap) = A.shape
# (Bn, Bp) = B.shape
#
# newMatrixA = np.concatenate((A, np.matrix(np.zeros((An, Bp)))), 1)
# newMatrixB = np.concatenate((np.matrix(np.zeros((Bn, Ap))), B), 1)
# return np.concatenate((newMatrixA, newMatrixB), 0)
#
#
# # A + B = (A; B)
# @staticmethod
# def matrixAddByRow(A, B):
# if A is None:
# return B
# elif B is None:
# return A
# else:
# return np.concatenate((A, B), 0)
#
#
# # A + B = (A B)
# @staticmethod
# def matrixAddByCol(A, B):
# if A is None:
# return np.matrix(B)
# elif B is None:
# return np.matrix(A)
# else:
# return np.concatenate((A, B), 1)
#
#
# @staticmethod
# def AddTwoVectors(a, b):
# if a is None:
# return np.array(b)
# elif b is None:
# return np.array(a)
# else:
# return np.concatenate((a, b))
, which may include functions, classes, or code. Output only the next line. | self.model = baseModel(transition=transition, |
Continue the code snippet: <|code_start|> # data is used by auto regressor.
def initialize(self, data=[], noise=1):
""" Initialize the model. It construct the baseModel by assembling all
quantities from the components.
Args:
noise: the initial guess of the variance of the observation noise.
"""
if len(self.staticComponents) == 0 and \
len(self.dynamicComponents) == 0 and \
len(self.automaticComponents) == 0:
raise NameError('The model must contain at least' +
' one component')
# construct transition, evaluation, prior state, prior covariance
if self._printInfo:
print('Initializing models...')
transition = None
evaluation = None
state = None
sysVar = None
self.discount = np.array([])
# first construct for the static components
# the evaluation will be treated separately for static or dynamic
# as the latter one will change over time
currentIndex = 0 # used for compute the index
for i in self.staticComponents:
comp = self.staticComponents[i]
<|code_end|>
. Use current file imports:
import numpy as np
from pydlm.base.baseModel import baseModel
from copy import deepcopy
from pydlm.modeler.matrixTools import matrixTools as mt
and context (classes, functions, or code) from other files:
# Path: pydlm/base/baseModel.py
# class baseModel:
# """ The baseModel class that provides the basic model structure for dlm.
#
# Attributes:
# transition: the transition matrix G
# evaluation: the evaluation F
# noiseVar: the variance of the observation noise
# sysVar: the covariance of the underlying states
# innovation: the incremnent of the latent covariance W
# state: the latent states
# df: the degree of freedom (= number of data points)
# obs: the expectation of the observation
# obsVar: the variance of the observation
#
# Methods:
# initializeObservation: initialize the obs and obsVar
# validation: validate the matrix dimensions are consistent.
# """
#
#
# # define the components of a baseModel
# def __init__(self, transition = None, evaluation = None, noiseVar = None, \
# sysVar = None, innovation = None, state = None, df = None):
# self.transition = transition
# self.evaluation = evaluation
# self.noiseVar = noiseVar
# self.sysVar = sysVar
# self.innovation = innovation
# self.state = state
# self.df = df
# self.obs = None
# self.obsVar = None
#
# # a hidden data field used only for model prediction
# self.prediction = __model__()
#
#
# # initialize the observation mean and variance
# def initializeObservation(self):
# """ Initialize the value of obs and obsVar
#
# """
# self.validation()
# self.obs = np.dot(self.evaluation, self.state)
# self.obsVar = np.dot(np.dot(self.evaluation, self.sysVar), self.evaluation.T) \
# + self.noiseVar
#
#
# # checking if the dimension matches with each other
# def validation(self):
# """ Validate the model components are consistent
#
# """
# # check symmetric
# tl.checker.checkSymmetry(self.transition)
# tl.checker.checkSymmetry(self.sysVar)
# if self.innovation is not None:
# tl.checker.checkSymmetry(self.innovation)
#
# # check wether dimension match
# tl.checker.checkMatrixDimension(self.transition, self.sysVar)
# if self.innovation is not None:
# tl.checker.checkMatrixDimension(self.transition, self.innovation)
# tl.checker.checkVectorDimension(self.evaluation, self.transition)
# tl.checker.checkVectorDimension(self.state, self.transition)
#
# Path: pydlm/modeler/matrixTools.py
# class matrixTools:
# @staticmethod
# def matrixAddInDiag(A, B):
# if A is None:
# return np.matrix(B)
# elif B is None:
# return np.matrix(A)
# else:
# (An, Ap) = A.shape
# (Bn, Bp) = B.shape
#
# newMatrixA = np.concatenate((A, np.matrix(np.zeros((An, Bp)))), 1)
# newMatrixB = np.concatenate((np.matrix(np.zeros((Bn, Ap))), B), 1)
# return np.concatenate((newMatrixA, newMatrixB), 0)
#
#
# # A + B = (A; B)
# @staticmethod
# def matrixAddByRow(A, B):
# if A is None:
# return B
# elif B is None:
# return A
# else:
# return np.concatenate((A, B), 0)
#
#
# # A + B = (A B)
# @staticmethod
# def matrixAddByCol(A, B):
# if A is None:
# return np.matrix(B)
# elif B is None:
# return np.matrix(A)
# else:
# return np.concatenate((A, B), 1)
#
#
# @staticmethod
# def AddTwoVectors(a, b):
# if a is None:
# return np.array(b)
# elif b is None:
# return np.array(a)
# else:
# return np.concatenate((a, b))
. Output only the next line. | transition = mt.matrixAddInDiag(transition, comp.transition) |
Given the following code snippet before the placeholder: <|code_start|>"""
=========================================================================
Code for the dynamic component
=========================================================================
This piece of code provide one building block for the dynamic linear model.
It decribes a dynamic component in the time series data. It basically allows
user to supply covariate or controlled variable into the dlm,
and the coefficients of the features will be trained as the latent states.
Examples are holiday indicators, other observed variables and so on.
The name dynamic means that the features are changing over time.
"""
# create trend component
# We create the trend using the component class
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import pydlm.base.tools as tl
from collections import MutableSequence
from copy import deepcopy
from .component import component
and context including class names, function names, and sometimes code from other files:
# Path: pydlm/modeler/component.py
# class component:
# """ The abstract class provides the basic structure for all model components
#
# Methods:
# createEvaluation: create the initial evaluation matrix
# createTransition: create the initial transition matrix
# createCovPrior: create a simple prior covariance matrix
# createMeanPrior: create a simple prior latent state
# checkDimensions: if user supplies their own covPrior and meanPrior, this can
# be used to check if the dimension matches
#
# """
# __metaclass__ = ABCMeta
#
# def __init__(self):
# """All members that need to be initialized."""
# self.d = None
# self.name = None
# self.componentType = None
# self.discount = None
# self.evaluation = None
# self.transition = None
# self.covPrior = None
# self.meanPrior = None
#
# def __eq__(self, other):
# """Define equal method used for == comparison"""
# if not isinstance(other, component):
# return NotImplemented
# else:
# return (self.equalOrNone(self.d, other.d) and
# self.equalOrNone(self.name, other.name) and
# self.equalOrNone(self.componentType, other.componentType) and
# self.npEqualOrNone(self.discount, other.discount) and
# self.npEqualOrNone(self.evaluation, other.evaluation) and
# self.npEqualOrNone(self.transition, other.transition) and
# self.npEqualOrNone(self.covPrior, other.covPrior) and
# self.npEqualOrNone(self.meanPrior, other.meanPrior))
#
#
# # define the evaluation matrix for the component
# @abstractmethod
# def createEvaluation(self): pass
# """ Create the evaluation matrix
#
# """
#
#
# # define the transition matrix for the component
# @abstractmethod
# def createTransition(self): pass
# """ Create the transition matrix
#
# """
#
#
# # define the prior distribution for the covariance for the component
# @abstractmethod
# def createCovPrior(self): pass
# """ Create the prior covariance matrix for the latent states
#
# """
#
#
# # define the prior distribution for the mean vector for the component
# @abstractmethod
# def createMeanPrior(self): pass
# """ Create the prior latent state
#
# """
#
#
# # check the matrix dimensions in case user supplied matrices are wrong
# @abstractmethod
# def checkDimensions(self): pass
# """ Check the dimensionality of the state and covariance
#
# """
#
# def equalOrNone(self, a, b):
# """Check if a and b are equal or both are None"""
# return (a is None and b is None) or a == b
#
# def npEqualOrNone(self, a, b):
# """Check if a and b are equal or both are None for NP arrays"""
# return (a is None and b is None) or (a == b).all()
. Output only the next line. | class dynamic(component): |
Given snippet: <|code_start|>"""
===========================================================================
The code for autoregressive components
===========================================================================
This code implements the autoregressive component as a sub-class of dynamic.
Different from the dynamic component, the features in the autoReg is generated
from the data, and updated according to the data. All other features are
similar to @dynamic.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from numpy import matrix
from warnings import warn
from .component import component
import numpy as np
import pydlm.base.tools as tl
and context:
# Path: pydlm/modeler/component.py
# class component:
# """ The abstract class provides the basic structure for all model components
#
# Methods:
# createEvaluation: create the initial evaluation matrix
# createTransition: create the initial transition matrix
# createCovPrior: create a simple prior covariance matrix
# createMeanPrior: create a simple prior latent state
# checkDimensions: if user supplies their own covPrior and meanPrior, this can
# be used to check if the dimension matches
#
# """
# __metaclass__ = ABCMeta
#
# def __init__(self):
# """All members that need to be initialized."""
# self.d = None
# self.name = None
# self.componentType = None
# self.discount = None
# self.evaluation = None
# self.transition = None
# self.covPrior = None
# self.meanPrior = None
#
# def __eq__(self, other):
# """Define equal method used for == comparison"""
# if not isinstance(other, component):
# return NotImplemented
# else:
# return (self.equalOrNone(self.d, other.d) and
# self.equalOrNone(self.name, other.name) and
# self.equalOrNone(self.componentType, other.componentType) and
# self.npEqualOrNone(self.discount, other.discount) and
# self.npEqualOrNone(self.evaluation, other.evaluation) and
# self.npEqualOrNone(self.transition, other.transition) and
# self.npEqualOrNone(self.covPrior, other.covPrior) and
# self.npEqualOrNone(self.meanPrior, other.meanPrior))
#
#
# # define the evaluation matrix for the component
# @abstractmethod
# def createEvaluation(self): pass
# """ Create the evaluation matrix
#
# """
#
#
# # define the transition matrix for the component
# @abstractmethod
# def createTransition(self): pass
# """ Create the transition matrix
#
# """
#
#
# # define the prior distribution for the covariance for the component
# @abstractmethod
# def createCovPrior(self): pass
# """ Create the prior covariance matrix for the latent states
#
# """
#
#
# # define the prior distribution for the mean vector for the component
# @abstractmethod
# def createMeanPrior(self): pass
# """ Create the prior latent state
#
# """
#
#
# # check the matrix dimensions in case user supplied matrices are wrong
# @abstractmethod
# def checkDimensions(self): pass
# """ Check the dimensionality of the state and covariance
#
# """
#
# def equalOrNone(self, a, b):
# """Check if a and b are equal or both are None"""
# return (a is None and b is None) or a == b
#
# def npEqualOrNone(self, a, b):
# """Check if a and b are equal or both are None for NP arrays"""
# return (a is None and b is None) or (a == b).all()
which might include code, classes, or functions. Output only the next line. | class autoReg(component): |
Based on the snippet: <|code_start|>"""
=========================================================================
Code for the trend component
=========================================================================
This piece of code provide one building block for the dynamic linear model.
It decribes a latent polynomial trending in the time series data.
"""
# create trend component
# We create the trend using the component class
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
import pydlm.base.tools as tl
from .component import component
and context (classes, functions, sometimes code) from other files:
# Path: pydlm/modeler/component.py
# class component:
# """ The abstract class provides the basic structure for all model components
#
# Methods:
# createEvaluation: create the initial evaluation matrix
# createTransition: create the initial transition matrix
# createCovPrior: create a simple prior covariance matrix
# createMeanPrior: create a simple prior latent state
# checkDimensions: if user supplies their own covPrior and meanPrior, this can
# be used to check if the dimension matches
#
# """
# __metaclass__ = ABCMeta
#
# def __init__(self):
# """All members that need to be initialized."""
# self.d = None
# self.name = None
# self.componentType = None
# self.discount = None
# self.evaluation = None
# self.transition = None
# self.covPrior = None
# self.meanPrior = None
#
# def __eq__(self, other):
# """Define equal method used for == comparison"""
# if not isinstance(other, component):
# return NotImplemented
# else:
# return (self.equalOrNone(self.d, other.d) and
# self.equalOrNone(self.name, other.name) and
# self.equalOrNone(self.componentType, other.componentType) and
# self.npEqualOrNone(self.discount, other.discount) and
# self.npEqualOrNone(self.evaluation, other.evaluation) and
# self.npEqualOrNone(self.transition, other.transition) and
# self.npEqualOrNone(self.covPrior, other.covPrior) and
# self.npEqualOrNone(self.meanPrior, other.meanPrior))
#
#
# # define the evaluation matrix for the component
# @abstractmethod
# def createEvaluation(self): pass
# """ Create the evaluation matrix
#
# """
#
#
# # define the transition matrix for the component
# @abstractmethod
# def createTransition(self): pass
# """ Create the transition matrix
#
# """
#
#
# # define the prior distribution for the covariance for the component
# @abstractmethod
# def createCovPrior(self): pass
# """ Create the prior covariance matrix for the latent states
#
# """
#
#
# # define the prior distribution for the mean vector for the component
# @abstractmethod
# def createMeanPrior(self): pass
# """ Create the prior latent state
#
# """
#
#
# # check the matrix dimensions in case user supplied matrices are wrong
# @abstractmethod
# def checkDimensions(self): pass
# """ Check the dimensionality of the state and covariance
#
# """
#
# def equalOrNone(self, a, b):
# """Check if a and b are equal or both are None"""
# return (a is None and b is None) or a == b
#
# def npEqualOrNone(self, a, b):
# """Check if a and b are equal or both are None for NP arrays"""
# return (a is None and b is None) or (a == b).all()
. Output only the next line. | class trend(component): |
Here is a snippet: <|code_start|>#!usr/env/python
# filename: test_integration.py
def test_api_integration_bcr_1k():
test_data = os.path.abspath('abstar/test_data/test_1k.fasta')
with open(test_data, 'r') as f:
test_seqs = [Sequence(s) for s in SeqIO.parse(f, 'fasta')]
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
from Bio import SeqIO
from abutils.core.sequence import Sequence
from ..core import abstar
and context from other files:
# Path: abstar/core/abstar.py
# STR_TYPES = [str, ]
# STR_TYPES = [str, unicode]
# def parse_arguments(print_help=False):
# def __init__(self, project_dir=None, input=None, output=None, log=None, temp=None,
# sequences=None, chunksize=500, output_type=['json', ], assigner='blastn',
# merge=False, pandaseq_algo='simple_bayesian', use_test_data=False,
# parquet=False, nextseq=False, uid=0, isotype=False, pretty=False, num_cores=0,
# basespace=False, cluster=False, padding=True, raw=False, json_keys=None,
# debug=False, germ_db='human', receptor='bcr', gzip=False):
# def validate_args(args):
# def make_directories(args):
# def _make_direc(d, cluster):
# def make_merge_dir(args):
# def setup_logging(log_dir, debug):
# def log_options(input_dir, output_dir, temp_dir, args):
# def list_files(d, log=False):
# def build_output_base(output_types):
# def concat_outputs(input_file, temp_output_file_dicts, output_dir, args):
# def concat_logs(input_file, logs, log_dir, log_type):
# def clear_temp_files(temp_files):
# def download_files(input_dir):
# def merge_reads(input_dir, args):
# def format_check(input_list):
# def _get_format(in_file):
# def split_file(f, fmt, temp_dir, args):
# def print_input_file_info(f, fmt):
# def log_job_stats(total_seqs, good_seq_counts, start_time, end_time):
# def print_job_stats(total_seqs, good_seq_counts, start_time, end_time):
# def run_abstar(seq_file, output_dir, log_dir, file_format, arg_dict):
# def process_sequences(sequences, args):
# def run_jobs(files, output_dir, log_dir, file_format, args):
# def _run_jobs_singlethreaded(files, output_dir, log_dir, file_format, args):
# def _run_jobs_via_multiprocessing(files, output_dir, log_dir, file_format, args):
# def monitor_mp_jobs(results):
# def _run_jobs_via_celery(files, output_dir, log_dir, file_format, args):
# def monitor_celery_jobs(results):
# def update_progress(finished, jobs, failed=None):
# def run(*args, **kwargs):
# def run_standalone(args):
# def main(args):
# class Args(object):
, which may include functions, classes, or code. Output only the next line. | seqs = abstar.run(*test_seqs) |
Predict the next line after this snippet: <|code_start|> ('cdr2_aa', self.antibody.v.regions.aa_seqs['CDR2']),
('cdr3_nt', self.antibody.junction.cdr3_nt),
('cdr3_aa', self.antibody.junction.cdr3_aa),
('v_start', str(self.antibody.v.germline_start)),
('vdj_nt', self.antibody.vdj_nt),
('vdj_aa', self.antibody.vdj_aa),
('var_muts_nt', '|'.join([m.abstar_formatted for m in self.antibody.v.nt_mutations])),
('var_muts_aa', '|'.join([m.abstar_formatted for m in self.antibody.v.aa_mutations])),
('var_identity_nt', str(self.antibody.v.nt_identity)),
('var_identity_aa', str(self.antibody.v.aa_identity)),
('var_mut_count_nt', str(self.antibody.v.nt_mutations.count)),
('var_mut_count_aa', str(self.antibody.v.aa_mutations.count)),
('var_ins', '|'.join([i.abstar_formatted for i in self.antibody.v.insertions])),
('var_del', '|'.join([d.abstar_formatted for d in self.antibody.v.deletions])),
('isotype', isotype),
('species', self.antibody.species),
('raw_input', self.antibody.raw_input.sequence),
])
return ','.join(output.values())
def _build_airr_output(self):
# format specification: https://docs.airr-community.org/en/latest/datarep/rearrangements.html
try:
c_call = self.antibody.isotype.isotype
except AttributeError:
c_call = ''
if self.antibody.d:
d_call = self.antibody.d.full
d_gene = self.antibody.d.gene
<|code_end|>
using the current file's imports:
import collections
import json
import os
import traceback
import uuid
from abutils.utils import log
from .cigar import make_cigar
and any relevant context from other files:
# Path: abstar/utils/cigar.py
# def make_cigar(germline_segment):
# cigar = ''
# if germline_segment.query_start > 0:
# cigar += '{}S'.format(germline_segment.query_start)
# if germline_segment.germline_start > 0:
# cigar += '{}N'.format(germline_segment.germline_start)
# cigar += make_alignment_cigar(germline_segment.realignment.aligned_query,
# germline_segment.realignment.aligned_target)
# return cigar
. Output only the next line. | d_cigar = make_cigar(self.antibody.d) |
Based on the snippet: <|code_start|>
paired_reads (bool): If ``True``, reads will be processed as paired reads. If ``False``,
each read will be processed separately. It is not advisable to process paired
reads with ``paired_reads`` set to ``False`` because if paired read files are
processed separately and one read passes filters while the paired read doesn't,
this may cause problems with downstream processes (like read merging).
allow_5prime_trimming (bool): If ``True``, quality trimming will be performed
on the 5' end of the reads as well as the 3' end. Default is ``False``.
Returns:
str: Path to the output directory
'''
if input_directory is None and any([file_pairs is None, output_directory is None]):
err = '\nERROR: Either an input_directory must be provided or '
err += 'both file_pairs and an output_directory must be provided.\n'
print(err)
sys.exit(1)
if file_pairs:
files = file_pairs
else:
input_directory = os.path.normpath(input_directory)
if output_directory is None:
oparent = os.path.dirname(input_directory)
output_directory = os.path.join(oparent, 'quality_trimmed')
make_dir(output_directory)
if paired_reads:
files = list_files(input_directory)
<|code_end|>
, predict the immediate next line with the help of imports:
from multiprocessing import cpu_count
from subprocess import Popen, PIPE
from Bio import SeqIO
from .utils.pandaseq import pair_files
from abutils.utils.log import get_logger
from abutils.utils.pipeline import list_files, make_dir
import os
import sys
and context (classes, functions, sometimes code) from other files:
# Path: abstar/utils/pandaseq.py
# def pair_files(files, nextseq):
# pairs = {}
# for f in files:
# if nextseq:
# f_prefix = '_'.join(os.path.basename(f).split('_')[:-2])
# else:
# f_prefix = '_'.join(os.path.basename(f).split('_')[:-3])
# if f_prefix in pairs:
# pairs[f_prefix].append(f)
# else:
# pairs[f_prefix] = [f, ]
# return pairs
. Output only the next line. | file_pairs = pair_files(files, nextseq) |
Predict the next line for this snippet: <|code_start|>
@property
def count(self):
return len(self.mutations)
def add(self, mutation):
'''
Add a single mutation.
Args:
mutation (Mutation): the Mutation object to be added
'''
self.mutations.append(mutation)
def add_many(self, mutations):
'''
Adds multiple mutations.
Args:
mutation (list): an iterable of Mutation objects to be added
'''
self.mutations += mutations
def in_region(self, region):
region_mutations = []
<|code_end|>
with the help of current file imports:
import math
import re
import traceback
from abutils.utils import log
from abutils.utils.codons import codon_lookup as codons
from .regions import IMGT_REGION_START_POSITIONS_AA, IMGT_REGION_END_POSITIONS_AA
and context from other files:
# Path: abstar/utils/regions.py
# IMGT_REGION_START_POSITIONS_AA = {'FR1': 1, 'CDR1': 27, 'FR2': 39, 'CDR2': 56,
# 'FR3': 66, 'CDR3': 105, 'FR4': 118}
#
# IMGT_REGION_END_POSITIONS_AA = {'FR1': 26, 'CDR1': 38, 'FR2': 55, 'CDR2': 65,
# 'FR3': 104, 'CDR3': 117, 'FR4': 129}
, which may contain function names, class names, or code. Output only the next line. | start = IMGT_REGION_START_POSITIONS_AA[region] |
Next line prediction: <|code_start|> @property
def count(self):
return len(self.mutations)
def add(self, mutation):
'''
Add a single mutation.
Args:
mutation (Mutation): the Mutation object to be added
'''
self.mutations.append(mutation)
def add_many(self, mutations):
'''
Adds multiple mutations.
Args:
mutation (list): an iterable of Mutation objects to be added
'''
self.mutations += mutations
def in_region(self, region):
region_mutations = []
start = IMGT_REGION_START_POSITIONS_AA[region]
<|code_end|>
. Use current file imports:
(import math
import re
import traceback
from abutils.utils import log
from abutils.utils.codons import codon_lookup as codons
from .regions import IMGT_REGION_START_POSITIONS_AA, IMGT_REGION_END_POSITIONS_AA)
and context including class names, function names, or small code snippets from other files:
# Path: abstar/utils/regions.py
# IMGT_REGION_START_POSITIONS_AA = {'FR1': 1, 'CDR1': 27, 'FR2': 39, 'CDR2': 56,
# 'FR3': 66, 'CDR3': 105, 'FR4': 118}
#
# IMGT_REGION_END_POSITIONS_AA = {'FR1': 26, 'CDR1': 38, 'FR2': 55, 'CDR2': 65,
# 'FR3': 104, 'CDR3': 117, 'FR4': 129}
. Output only the next line. | end = IMGT_REGION_END_POSITIONS_AA[region] |
Predict the next line after this snippet: <|code_start|> vdj.log('{}-ASSIGNMENT ERROR:'.format(segment),
'Score ({}) is too low'.format(germs[0].score))
return None
others = [GermlineSegment(germ.name, self.db_name, score=germ.score) for germ in germs[1:6]]
return GermlineSegment(germs[0].name, self.db_name, score=germs[0].score, others=others)
"""
__metaclass__ = abc.ABCMeta
def __init__(self, db_name, receptor):
super(BaseAssigner, self).__init__()
self.name = self.__class__.__name__.lower()
self.species = db_name
self.db_name = db_name
self.receptor = receptor.lower()
self._assigned = None
self._unassigned = None
self._germline_directory = None
self._binary_directory = None
@abc.abstractmethod
def __call__(self, sequence_file, file_format):
pass
@property
def germline_directory(self):
if self._germline_directory is None:
<|code_end|>
using the current file's imports:
import abc
import os
from ..core.germline import get_germline_database_directory
and any relevant context from other files:
# Path: abstar/core/germline.py
# def get_germline_database_directory(species, receptor='bcr'):
# species = species.lower()
# receptor = receptor.lower()
# addon_dir = os.path.expanduser(f'~/.abstar/germline_dbs/{receptor}')
# if os.path.isdir(addon_dir):
# if species.lower() in [os.path.basename(d[0]) for d in os.walk(addon_dir)]:
# return os.path.join(addon_dir, species.lower())
# mod_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# return os.path.join(mod_dir, f'assigners/germline_dbs/{receptor}/{species}')
. Output only the next line. | self._germline_directory = get_germline_database_directory(self.db_name, |
Predict the next line for this snippet: <|code_start|>#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import absolute_import, division, print_function, unicode_literals
def get_isotype(antibody):
try:
<|code_end|>
with the help of current file imports:
import logging
import os
import traceback
from Bio import SeqIO
from Bio.Seq import Seq
from abutils.core.sequence import Sequence
from abutils.utils import log
from abutils.utils.alignment import local_alignment
from abutils.utils.decorators import lazy_property
from ..core.germline import get_germline_database_directory
and context from other files:
# Path: abstar/core/germline.py
# def get_germline_database_directory(species, receptor='bcr'):
# species = species.lower()
# receptor = receptor.lower()
# addon_dir = os.path.expanduser(f'~/.abstar/germline_dbs/{receptor}')
# if os.path.isdir(addon_dir):
# if species.lower() in [os.path.basename(d[0]) for d in os.walk(addon_dir)]:
# return os.path.join(addon_dir, species.lower())
# mod_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# return os.path.join(mod_dir, f'assigners/germline_dbs/{receptor}/{species}')
, which may contain function names, class names, or code. Output only the next line. | germ_dir = get_germline_database_directory(antibody.germ_db) |
Next line prediction: <|code_start|> return coding_region
def _get_aa_sequence(self):
return Seq(self.coding_region).translate()
def _fix_ambigs(self, antibody):
'''
Fixes ambiguous nucleotides by replacing them with the germline nucleotide.
'''
self.query_alignment = ''.join([q if q.upper() != 'N' else g for q, g in zip(self.query_alignment,
self.germline_alignment)])
# don't forget to also correct ambigs in the oriented_input sequence
antibody.oriented_input.sequence = antibody.oriented_input.sequence[:self.query_start] + \
self.query_alignment.replace('-', '') + \
antibody.oriented_input.sequence[self.query_end + 1:]
def _indel_check(self):
if any(['-' in self.query_alignment, '-' in self.germline_alignment]):
return True
return False
def _find_indels(self, antibody):
'''
Identifies and annotates indels in the query sequence.
'''
if self._indel_check():
<|code_end|>
. Use current file imports:
(import math
import os
import traceback
from Bio import SeqIO
from Bio.Seq import Seq
from abutils.core.sequence import Sequence
from abutils.utils.alignment import global_alignment, local_alignment
from abutils.utils.codons import codon_lookup
from abutils.utils.decorators import lazy_property
from ..utils import indels
from ..utils.mixins import LoggingMixin)
and context including class names, function names, or small code snippets from other files:
# Path: abstar/utils/indels.py
# class Indel(object):
# class Insertion(Indel):
# class Deletion(Indel):
# def __init__(self, indel):
# def __contains__(self, item):
# def __getitem__(self, key):
# def _get_frame(self):
# def __init__(self, indel):
# def imgt_formatted(self):
# def abstar_formatted(self):
# def json_formatted(self):
# def __init__(self, indel):
# def imgt_formatted(self):
# def abstar_formatted(self):
# def json_formatted(self):
# def find_insertions(antibody, segment):
# def _annotate_insertion(segment, start, length, sequence, fixed=False):
# def _fix_frameshift_insertion(antibody, segment, s, e):
# def find_deletions(antibody, segment):
# def _annotate_deletion(segment, start, length, sequence, fixed=False):
# def _fix_frameshift_deletion(antibody, segment, s, e):
#
# Path: abstar/utils/mixins.py
# class LoggingMixin(object):
# """docstring for LoggingMixin"""
# def __init__(self):
# self._log = None
# self._exceptions = None
#
#
# @property
# def logs(self):
# if self._log is not None:
# return self._log
# return []
#
#
# @property
# def exceptions(self):
# if self._exceptions is not None:
# return self._exceptions
# return []
#
#
# def log(self, *args, **kwargs):
# '''
# Records a log message
# '''
# sep = kwargs.get('sep', ' ')
# lstring = sep.join([str(a) for a in args])
# if self._log is None:
# self._log = [lstring, ]
# else:
# self._log.append(lstring)
#
#
# def exception(self, *args, **kwargs):
# '''
# Records an exception.
# '''
# sep = kwargs.get('sep', '\n')
# estring = sep.join([str(a) for a in args])
# if self._exceptions is None:
# self._exceptions = [estring, ]
# else:
# self._exceptions.append(estring)
#
#
# def format_log(self):
# '''
# Formats the log, including exceptions.
#
# Log formatting will only be performed on sequences that had an
# error during annotation, unless AbStar is run in debug
# mode. In debug mode, all sequences will be logged.
#
# Returns:
# --------
#
# str: Formatted log string.
# '''
# output = ''
# output += '\n'.join(self.logs)
# if self._check_for_exceptions():
# output += '\n\n'
# output += self._format_exceptions()
# output += '\n\n'
# return output
#
#
# def _check_for_exceptions(self):
# if self.exceptions:
# return True
# if self.v is not None:
# if self.v._exceptions:
# return True
# if self.d is not None:
# if self.d._exceptions:
# return True
# if self.j is not None:
# if self.j._exceptions:
# return True
# return False
#
#
# def _format_exceptions(self):
# exceptions = []
# exceptions += self.exceptions
# estring = '\n\nEXCEPTIONS\n'
# estring += '----------\n\n'
# if self.v is not None:
# exceptions += self.v.exceptions
# if self.d is not None:
# exceptions += self.d.exceptions
# if self.j is not None:
# exceptions += self.j.exceptions
# estring += '\n\n'.join([e for e in exceptions])
# return estring
. Output only the next line. | self.insertions = indels.find_insertions(antibody, self) |
Predict the next line after this snippet: <|code_start|>
@celery.task
def run_abstar(sequence_file, output_directory, args):
'''
Wrapper function to multiprocess (or not) the assignment of V, D and J
germline genes. Also writes the JSON-formatted output to file.
Input is a a FASTA-formatted file of antibody sequences and the output directory.
Optional input items include the species (supported species: 'human'); length of
the unique antibody identifier (UAID); and debug mode (which forces single-threading
and prints more verbose errors.)
Output is the number of functional antibody sequences identified in the input file.
'''
try:
# setup logging
global logger
logger = log.get_logger(__name__)
assigned_log = ''
unassigned_log = ''
# identify output file
output_filename = os.path.basename(seq_file)
if args.output_type == 'json':
output_file = os.path.join(output_dir, output_filename + '.json')
elif args.output_type in ['imgt', 'hadoop']:
output_file = os.path.join(output_dir, output_filename + '.txt')
# start assignment
<|code_end|>
using the current file's imports:
from multiprocessing import Pool
from ..assigners.registry import ASSIGNERS
from ..utils.output import get_abstar_results, write_output
from ..utils.queue.celery import celery
from abutils.utils import log
and any relevant context from other files:
# Path: abstar/assigners/registry.py
# ASSIGNERS = {'blastn': Blastn}
#
# Path: abstar/utils/output.py
# def get_abstar_results(antibodies, pretty=False, padding=True, raw=False, keys=None):
# return [AbstarResult(ab, pretty, padding, raw, keys) for ab in antibodies]
#
# def write_output(output_dict, output_dir, output_prefix):
# output_file_dict = {}
# for fmt in output_dict.keys():
# subdir = os.path.join(output_dir, fmt)
# output_name = output_prefix + get_output_suffix(fmt)
# output_file = os.path.join(subdir, output_name)
# with open(output_file, 'w') as f:
# f.write('\n'.join(output_dict[fmt]))
# output_file_dict[fmt] = output_file
# return output_file_dict
#
# Path: abstar/utils/queue/celery.py
. Output only the next line. | assigner = ASSIGNERS[args.assigner] |
Given the following code snippet before the placeholder: <|code_start|> germline genes. Also writes the JSON-formatted output to file.
Input is a a FASTA-formatted file of antibody sequences and the output directory.
Optional input items include the species (supported species: 'human'); length of
the unique antibody identifier (UAID); and debug mode (which forces single-threading
and prints more verbose errors.)
Output is the number of functional antibody sequences identified in the input file.
'''
try:
# setup logging
global logger
logger = log.get_logger(__name__)
assigned_log = ''
unassigned_log = ''
# identify output file
output_filename = os.path.basename(seq_file)
if args.output_type == 'json':
output_file = os.path.join(output_dir, output_filename + '.json')
elif args.output_type in ['imgt', 'hadoop']:
output_file = os.path.join(output_dir, output_filename + '.txt')
# start assignment
assigner = ASSIGNERS[args.assigner]
assigner(sequence_file, args.species)
# process all of the successfully assigned sequences
assigned = [Antibody(vdj, args.species) for vdj in assigner.assigned]
for ab in assigned:
ab.annotate()
if args.debug:
assigned_log += ab.format_log()
<|code_end|>
, predict the next line using imports from the current file:
from multiprocessing import Pool
from ..assigners.registry import ASSIGNERS
from ..utils.output import get_abstar_results, write_output
from ..utils.queue.celery import celery
from abutils.utils import log
and context including class names, function names, and sometimes code from other files:
# Path: abstar/assigners/registry.py
# ASSIGNERS = {'blastn': Blastn}
#
# Path: abstar/utils/output.py
# def get_abstar_results(antibodies, pretty=False, padding=True, raw=False, keys=None):
# return [AbstarResult(ab, pretty, padding, raw, keys) for ab in antibodies]
#
# def write_output(output_dict, output_dir, output_prefix):
# output_file_dict = {}
# for fmt in output_dict.keys():
# subdir = os.path.join(output_dir, fmt)
# output_name = output_prefix + get_output_suffix(fmt)
# output_file = os.path.join(subdir, output_name)
# with open(output_file, 'w') as f:
# f.write('\n'.join(output_dict[fmt]))
# output_file_dict[fmt] = output_file
# return output_file_dict
#
# Path: abstar/utils/queue/celery.py
. Output only the next line. | results = get_abstar_results(assigned, pretty=args.pretty, padding=args.padding, raw=args.raw) |
Based on the snippet: <|code_start|>
Input is a a FASTA-formatted file of antibody sequences and the output directory.
Optional input items include the species (supported species: 'human'); length of
the unique antibody identifier (UAID); and debug mode (which forces single-threading
and prints more verbose errors.)
Output is the number of functional antibody sequences identified in the input file.
'''
try:
# setup logging
global logger
logger = log.get_logger(__name__)
assigned_log = ''
unassigned_log = ''
# identify output file
output_filename = os.path.basename(seq_file)
if args.output_type == 'json':
output_file = os.path.join(output_dir, output_filename + '.json')
elif args.output_type in ['imgt', 'hadoop']:
output_file = os.path.join(output_dir, output_filename + '.txt')
# start assignment
assigner = ASSIGNERS[args.assigner]
assigner(sequence_file, args.species)
# process all of the successfully assigned sequences
assigned = [Antibody(vdj, args.species) for vdj in assigner.assigned]
for ab in assigned:
ab.annotate()
if args.debug:
assigned_log += ab.format_log()
results = get_abstar_results(assigned, pretty=args.pretty, padding=args.padding, raw=args.raw)
<|code_end|>
, predict the immediate next line with the help of imports:
from multiprocessing import Pool
from ..assigners.registry import ASSIGNERS
from ..utils.output import get_abstar_results, write_output
from ..utils.queue.celery import celery
from abutils.utils import log
and context (classes, functions, sometimes code) from other files:
# Path: abstar/assigners/registry.py
# ASSIGNERS = {'blastn': Blastn}
#
# Path: abstar/utils/output.py
# def get_abstar_results(antibodies, pretty=False, padding=True, raw=False, keys=None):
# return [AbstarResult(ab, pretty, padding, raw, keys) for ab in antibodies]
#
# def write_output(output_dict, output_dir, output_prefix):
# output_file_dict = {}
# for fmt in output_dict.keys():
# subdir = os.path.join(output_dir, fmt)
# output_name = output_prefix + get_output_suffix(fmt)
# output_file = os.path.join(subdir, output_name)
# with open(output_file, 'w') as f:
# f.write('\n'.join(output_dict[fmt]))
# output_file_dict[fmt] = output_file
# return output_file_dict
#
# Path: abstar/utils/queue/celery.py
. Output only the next line. | write_output(results, output_file, args.output_type) |
Given the code snippet: <|code_start|> The unpacked list with all widths in order
Examples
--------
Time stepping for time-domain codes can be represented in condensed form, e.g.:
>>> from discretize.utils import unpack_widths
>>> dt = [ (1e-5, 10), (1e-4, 4), 1e-3 ]
The above means to take 10 steps at a step width of 1e-5 s and then
4 more at 1e-4 s, and then one step of 1e-3 s. When unpacked, the output is
of length 15 and is given by:
>>> unpack_widths(dt)
array([1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05, 1.e-05,
1.e-05, 1.e-05, 1.e-04, 1.e-04, 1.e-04, 1.e-04, 1.e-03])
Each axis of a tensor mesh can also be defined as a condensed list of floats
and/or tuples. When a third number is defined in any tuple, the width value
is successively expanded by that factor, e.g.:
>>> dt = [ 6., 8., (10.0, 3), (8.0, 4, 2.) ]
>>> unpack_widths(dt)
array([ 6., 8., 10., 10., 10., 16., 32., 64., 128.])
"""
if type(value) is not list:
raise Exception("unpack_widths must be a list of scalars and tuples.")
proposed = []
for v in value:
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import scipy.ndimage as ndi
import scipy.sparse as sp
import discretize
import warnings
from discretize.utils.code_utils import is_scalar
from scipy.spatial import cKDTree, Delaunay
from scipy import interpolate
from discretize.utils.code_utils import deprecate_function
and context (functions, classes, or occasionally code) from other files:
# Path: discretize/utils/code_utils.py
# def is_scalar(f):
# """Determine if the input argument is a scalar.
#
# The function **is_scalar** returns *True* if the input is an integer,
# float or complex number. The function returns *False* otherwise.
#
# Parameters
# ----------
# f :
# Any input quantity
#
# Returns
# -------
# bool :
#
# - *True* if the input argument is an integer, float or complex number
# - *False* otherwise
#
#
# """
#
# if isinstance(f, SCALARTYPES):
# return True
# elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
# return True
# return False
#
# Path: discretize/utils/code_utils.py
# def deprecate_function(new_function, old_name, removal_version=None, future_warn=False):
# if future_warn:
# Warning = FutureWarning
# else:
# Warning = DeprecationWarning
# new_name = new_function.__name__
# if removal_version is not None:
# tag = f" It will be removed in version {removal_version} of discretize."
# else:
# tag = " It will be removed in a future version of discretize."
#
# def dep_function(*args, **kwargs):
# warnings.warn(
# f"{old_name} has been deprecated, please use {new_name}." + tag,
# Warning,
# )
# return new_function(*args, **kwargs)
#
# doc = f"""
# `{old_name}` has been deprecated. See `{new_name}` for documentation
#
# See Also
# --------
# {new_name}
# """
# dep_function.__doc__ = doc
# return dep_function
. Output only the next line. | if is_scalar(v): |
Predict the next line for this snippet: <|code_start|> ]
)
elif mesh.dim == 2:
locations = np.vstack(
[
gridN[:-1, 1:].reshape((-1, mesh.dim), order="F"),
gridN[1:, 1:].reshape((-1, mesh.dim), order="F"),
]
)
# Interpolate z values on CC or N
z_xyz = z_interpolate(locations[:, :-1]).squeeze()
# Apply nearest neighbour if in extrapolation
ind_nan = np.isnan(z_xyz)
if any(ind_nan):
tree = cKDTree(xyz)
_, ind = tree.query(locations[ind_nan, :])
z_xyz[ind_nan] = xyz[ind, dim]
# Create an active bool of all True
active = np.all(
(locations[:, dim] < z_xyz).reshape((mesh.nC, -1), order="F"), axis=1
)
return active.ravel()
<|code_end|>
with the help of current file imports:
import numpy as np
import scipy.ndimage as ndi
import scipy.sparse as sp
import discretize
import warnings
from discretize.utils.code_utils import is_scalar
from scipy.spatial import cKDTree, Delaunay
from scipy import interpolate
from discretize.utils.code_utils import deprecate_function
and context from other files:
# Path: discretize/utils/code_utils.py
# def is_scalar(f):
# """Determine if the input argument is a scalar.
#
# The function **is_scalar** returns *True* if the input is an integer,
# float or complex number. The function returns *False* otherwise.
#
# Parameters
# ----------
# f :
# Any input quantity
#
# Returns
# -------
# bool :
#
# - *True* if the input argument is an integer, float or complex number
# - *False* otherwise
#
#
# """
#
# if isinstance(f, SCALARTYPES):
# return True
# elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
# return True
# return False
#
# Path: discretize/utils/code_utils.py
# def deprecate_function(new_function, old_name, removal_version=None, future_warn=False):
# if future_warn:
# Warning = FutureWarning
# else:
# Warning = DeprecationWarning
# new_name = new_function.__name__
# if removal_version is not None:
# tag = f" It will be removed in version {removal_version} of discretize."
# else:
# tag = " It will be removed in a future version of discretize."
#
# def dep_function(*args, **kwargs):
# warnings.warn(
# f"{old_name} has been deprecated, please use {new_name}." + tag,
# Warning,
# )
# return new_function(*args, **kwargs)
#
# doc = f"""
# `{old_name}` has been deprecated. See `{new_name}` for documentation
#
# See Also
# --------
# {new_name}
# """
# dep_function.__doc__ = doc
# return dep_function
, which may contain function names, class names, or code. Output only the next line. | meshTensor = deprecate_function(unpack_widths, "meshTensor", removal_version="1.0.0", future_warn=False) |
Given the following code snippet before the placeholder: <|code_start|>
class TestVolumeAverage(unittest.TestCase):
def test_tensor_to_tensor(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h2 = np.random.rand(16)
h2 /= h2.sum()
h1s = []
h2s = []
for i in range(3):
print(f"Tensor to Tensor {i+1}D: ", end="")
h1s.append(h1)
h2s.append(h2)
mesh1 = discretize.TensorMesh(h1s)
mesh2 = discretize.TensorMesh(h2s)
in_put = np.random.rand(mesh1.nC)
out_put = np.empty(mesh2.nC)
# test the three ways of calling...
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import unittest
import discretize
from discretize.utils import volume_average
from numpy.testing import assert_array_equal, assert_allclose
and context including class names, function names, and sometimes code from other files:
# Path: discretize/utils/interpolation_utils.py
# def volume_average(mesh_in, mesh_out, values=None, output=None):
# """Volume averaging interpolation between meshes.
#
# This volume averaging function looks for overlapping cells in each mesh,
# and weights the output values by the partial volume ratio of the overlapping
# input cells. The volume average operation should result in an output such that
# ``np.sum(mesh_in.cell_volumes*values)`` = ``np.sum(mesh_out.cell_volumes*output)``,
# when the input and output meshes have the exact same extent. When the output mesh
# extent goes beyond the input mesh, it is assumed to have constant values in that
# direction. When the output mesh extent is smaller than the input mesh, only the
# overlapping extent of the input mesh contributes to the output.
#
# This function operates in three different modes. If only *mesh_in* and
# *mesh_out* are given, the returned value is a ``scipy.sparse.csr_matrix``
# that represents this operation (so it could potentially be applied repeatedly).
# If *values* is given, the volume averaging is performed right away (without
# internally forming the matrix) and the returned value is the result of this.
# If *output* is given as well, it will be filled with the values of the
# operation and then returned (assuming it has the correct ``dtype``).
#
# Parameters
# ----------
# mesh_in : ~discretize.TensorMesh or ~discretize.TreeMesh
# Input mesh (the mesh you are interpolating from)
# mesh_out : ~discretize.TensorMesh or ~discretize.TreeMesh
# Output mesh (the mesh you are interpolating to)
# values : (mesh_in.n_cells) numpy.ndarray, optional
# Array with values defined at the cells of ``mesh_in``
# output : (mesh_out.n_cells) numpy.ndarray of float, optional
# Output array to be overwritten
#
# Returns
# -------
# (mesh_out.n_cells, mesh_in.n_cells) scipy.sparse.csr_matrix or (mesh_out.n_cells) numpy.ndarray
# If *values* = *None* , the returned value is a matrix representing this
# operation, otherwise it is a :class:`numpy.ndarray` of the result of the
# operation.
#
# Examples
# --------
# Create two meshes with the same extent, but different divisions (the meshes
# do not have to be the same extent).
#
# >>> import numpy as np
# >>> from discretize import TensorMesh
# >>> h1 = np.ones(32)
# >>> h2 = np.ones(16)*2
# >>> mesh_in = TensorMesh([h1, h1])
# >>> mesh_out = TensorMesh([h2, h2])
#
# Create a random model defined on the input mesh, and use volume averaging to
# interpolate it to the output mesh.
#
# >>> from discretize.utils import volume_average
# >>> model1 = np.random.rand(mesh_in.nC)
# >>> model2 = volume_average(mesh_in, mesh_out, model1)
#
# Because these two meshes' cells are perfectly aligned, but the output mesh
# has 1 cell for each 4 of the input cells, this operation should effectively
# look like averaging each of those cells values
#
# >>> import matplotlib.pyplot as plt
# >>> plt.figure(figsize=(6, 3))
# >>> ax1 = plt.subplot(121)
# >>> mesh_in.plot_image(model1, ax=ax1)
# >>> ax2 = plt.subplot(122)
# >>> mesh_out.plot_image(model2, ax=ax2)
# >>> plt.show()
#
# """
# try:
# in_type = mesh_in._meshType
# out_type = mesh_out._meshType
# except AttributeError:
# raise TypeError("Both input and output mesh must be valid discetize meshes")
#
# valid_meshs = ["TENSOR", "TREE"]
# if in_type not in valid_meshs or out_type not in valid_meshs:
# raise NotImplementedError(
# f"Volume averaging is only implemented for TensorMesh and TreeMesh, "
# f"not {type(mesh_in).__name__} and/or {type(mesh_out).__name__}"
# )
#
# if mesh_in.dim != mesh_out.dim:
# raise ValueError("Both meshes must have the same dimension")
#
# if values is not None and len(values) != mesh_in.nC:
# raise ValueError(
# "Input array does not have the same length as the number of cells in input mesh"
# )
# if output is not None and len(output) != mesh_out.nC:
# raise ValueError(
# "Output array does not have the same length as the number of cells in output mesh"
# )
#
# if values is not None:
# values = np.asarray(values, dtype=np.float64)
# if output is not None:
# output = np.asarray(output, dtype=np.float64)
#
# if in_type == "TENSOR":
# if out_type == "TENSOR":
# return _vol_interp(mesh_in, mesh_out, values, output)
# elif out_type == "TREE":
# return mesh_out._vol_avg_from_tens(mesh_in, values, output)
# elif in_type == "TREE":
# if out_type == "TENSOR":
# return mesh_in._vol_avg_to_tens(mesh_out, values, output)
# elif out_type == "TREE":
# return mesh_out._vol_avg_from_tree(mesh_in, values, output)
# else:
# raise TypeError("Unsupported mesh types")
. Output only the next line. | out1 = volume_average(mesh1, mesh2, in_put, out_put) |
Using the snippet: <|code_start|> print(
" Analytic: {analytic}, Numeric: {numeric}, "
"ratio (num/ana): {ratio}".format(
analytic=ans, numeric=numeric_ans, ratio=float(numeric_ans) / ans
)
)
assert np.abs(ans - numeric_ans) < TOL
def test_EdgeInnerProductDiagAnisotropic(self):
# Here we will make up some j vectors that vary in space
# h = [h_t] - to test edge inner products
fcts = EdgeInnerProductFunctionsDiagAnisotropic()
sig, hv = fcts.vectors(self.mesh)
MeSig = self.mesh.getEdgeInnerProduct(sig)
numeric_ans = hv.T.dot(MeSig.dot(hv))
ans = fcts.sol()
print("------ Testing Edge Inner Product Anisotropic -----------")
print(
" Analytic: {analytic}, Numeric: {numeric}, "
"ratio (num/ana): {ratio}".format(
analytic=ans, numeric=numeric_ans, ratio=float(numeric_ans) / ans
)
)
assert np.abs(ans - numeric_ans) < TOL
<|code_end|>
, determine the next line of code. You have imports:
import discretize
import numpy as np
import sympy
import unittest
import scipy.sparse as sp
from discretize import tests
from sympy.abc import r, t, z
and context (class names, function names, or code) available:
# Path: discretize/tests.py
# def setup_mesh(mesh_type, nC, nDim):
# def function(cell):
# def setupMesh(self, nC):
# def getError(self):
# def orderTest(self):
# def rosenbrock(x, return_g=True, return_H=True):
# def check_derivative(
# fctn,
# x0,
# num=7,
# plotIt=True,
# dx=None,
# expectedOrder=2,
# tolerance=0.85,
# eps=1e-10,
# ax=None,
# ):
# def l2norm(x):
# def plot_it(ax):
# def get_quadratic(A, b, c=0):
# def Quadratic(x, return_g=True, return_H=True):
# X, Y = example_curvilinear_grid([nC, nC], kwrd)
# X, Y, Z = example_curvilinear_grid([nC, nC, nC], kwrd)
# H = sp.csr_matrix(
# np.array(
# [[-400 * x[1] + 1200 * x[0] ** 2 + 2, -400 * x[0]], [-400 * x[0], 200]]
# )
# )
# E0 = np.ones(h.shape)
# E1 = np.ones(h.shape)
# H = A
# class OrderTest(unittest.TestCase):
. Output only the next line. | class TestCylFaceInnerProducts_Order(tests.OrderTest): |
Predict the next line for this snippet: <|code_start|> \vec{j} = \Sigma \vec{e} \;\;\; where \;\;\;
\Sigma = \begin{bmatrix}
\sigma_{xx} & \sigma_{xy} & \sigma_{xz} \\
\sigma_{xy} & \sigma_{yy} & \sigma_{yz} \\
\sigma_{xz} & \sigma_{yz} & \sigma_{zz}
\end{bmatrix}
In 3D, the tensor is defined by 6 independent element (3 independent elements in
2D). When using the input argument *tensor* to define the consitutive relationship
for every cell in the *mesh*, there are 4 classifications recognized by discretize:
- **Scalar:** :math:`\vec{j} = \sigma \vec{e}`, where :math:`\sigma` a constant.
Thus the input argument *tensor* is a float.
- **Isotropic:** :math:`\vec{j} = \sigma \vec{e}`, where :math:`\sigma` varies
spatially. Thus the input argument *tensor* is a 1D array that provides a
:math:`\sigma` value for every cell in the mesh.
- **Anisotropic:** :math:`\vec{j} = \Sigma \vec{e}`, where the off-diagonal elements
are zero. That is, :math:`\Sigma` is diagonal. In this case, the input argument
*tensor* defining the physical properties in each cell is a :class:`numpy.ndarray`
of shape (*nCells*, *dim*).
- **Tensor:** :math:`\vec{j} = \Sigma \vec{e}`, where off-diagonal elements are
non-zero and :math:`\Sigma` is a full tensor. In this case, the input argument
*tensor* defining the physical properties in each cell is a :class:`numpy.ndarray`
of shape (*nCells*, *nParam*). In 2D, *nParam* = 3 and in 3D, *nParam* = 6.
"""
def __init__(self, mesh, tensor):
if tensor is None: # default is ones
self._tt = -1
self._tts = "none"
<|code_end|>
with the help of current file imports:
import numpy as np
import scipy.sparse as sp
import warnings
from discretize.utils.code_utils import is_scalar, deprecate_function
and context from other files:
# Path: discretize/utils/code_utils.py
# def is_scalar(f):
# """Determine if the input argument is a scalar.
#
# The function **is_scalar** returns *True* if the input is an integer,
# float or complex number. The function returns *False* otherwise.
#
# Parameters
# ----------
# f :
# Any input quantity
#
# Returns
# -------
# bool :
#
# - *True* if the input argument is an integer, float or complex number
# - *False* otherwise
#
#
# """
#
# if isinstance(f, SCALARTYPES):
# return True
# elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
# return True
# return False
#
# def deprecate_function(new_function, old_name, removal_version=None, future_warn=False):
# if future_warn:
# Warning = FutureWarning
# else:
# Warning = DeprecationWarning
# new_name = new_function.__name__
# if removal_version is not None:
# tag = f" It will be removed in version {removal_version} of discretize."
# else:
# tag = " It will be removed in a future version of discretize."
#
# def dep_function(*args, **kwargs):
# warnings.warn(
# f"{old_name} has been deprecated, please use {new_name}." + tag,
# Warning,
# )
# return new_function(*args, **kwargs)
#
# doc = f"""
# `{old_name}` has been deprecated. See `{new_name}` for documentation
#
# See Also
# --------
# {new_name}
# """
# dep_function.__doc__ = doc
# return dep_function
, which may contain function names, class names, or code. Output only the next line. | elif is_scalar(tensor): |
Predict the next line for this snippet: <|code_start|> """Returns the *Identity* class as an operator"""
return self
def transpose(self):
"""Returns the transpose of the *Identity* class, i.e. itself"""
return self
class _inftup(tuple):
"""An infinitely long tuple of a value repeated infinitely"""
def __init__(self, val=None):
self._val = val
def __getitem__(self, key):
if isinstance(key, slice):
return _inftup(self._val)
return self._val
def __len__(self):
return 0
def __repr__(self):
return f"({self._val}, {self._val}, ...)"
################################################
# DEPRECATED FUNCTIONS
################################################
<|code_end|>
with the help of current file imports:
import numpy as np
import scipy.sparse as sp
import warnings
from discretize.utils.code_utils import is_scalar, deprecate_function
and context from other files:
# Path: discretize/utils/code_utils.py
# def is_scalar(f):
# """Determine if the input argument is a scalar.
#
# The function **is_scalar** returns *True* if the input is an integer,
# float or complex number. The function returns *False* otherwise.
#
# Parameters
# ----------
# f :
# Any input quantity
#
# Returns
# -------
# bool :
#
# - *True* if the input argument is an integer, float or complex number
# - *False* otherwise
#
#
# """
#
# if isinstance(f, SCALARTYPES):
# return True
# elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
# return True
# return False
#
# def deprecate_function(new_function, old_name, removal_version=None, future_warn=False):
# if future_warn:
# Warning = FutureWarning
# else:
# Warning = DeprecationWarning
# new_name = new_function.__name__
# if removal_version is not None:
# tag = f" It will be removed in version {removal_version} of discretize."
# else:
# tag = " It will be removed in a future version of discretize."
#
# def dep_function(*args, **kwargs):
# warnings.warn(
# f"{old_name} has been deprecated, please use {new_name}." + tag,
# Warning,
# )
# return new_function(*args, **kwargs)
#
# doc = f"""
# `{old_name}` has been deprecated. See `{new_name}` for documentation
#
# See Also
# --------
# {new_name}
# """
# dep_function.__doc__ = doc
# return dep_function
, which may contain function names, class names, or code. Output only the next line. | sdInv = deprecate_function(sdinv, "sdInv", removal_version="1.0.0", future_warn=False) |
Based on the snippet: <|code_start|> )
)
# class TestCellGradx3D(tests.OrderTest):
# name = "CellGradx"
# MESHTYPES = MESHTYPES
# meshDimension = 3
# meshSizes = [8, 16, 32, 64]
# def getError(self):
# fun = lambda r, t, z: (
# np.sin(2.*np.pi*r) + np.sin(t) + np.sin(2*np.pi*z)
# )
# solR = lambda r, t, z: 2.*np.pi*np.cos(2.*np.pi*r)
# phi = call3(fun, self.M.gridCC)
# phix_num = self.M.cellGradx * phi
# phix_ana = call3(solR, self.M.gridFx)
# err = np.linalg.norm(phix_num - phix_ana, np.inf)
# return err
# def test_order(self):
# self.orderTest()
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
import numpy as np
import sympy
import discretize
from sympy.abc import r, t, z
from discretize import tests
and context (classes, functions, sometimes code) from other files:
# Path: discretize/tests.py
# def setup_mesh(mesh_type, nC, nDim):
# def function(cell):
# def setupMesh(self, nC):
# def getError(self):
# def orderTest(self):
# def rosenbrock(x, return_g=True, return_H=True):
# def check_derivative(
# fctn,
# x0,
# num=7,
# plotIt=True,
# dx=None,
# expectedOrder=2,
# tolerance=0.85,
# eps=1e-10,
# ax=None,
# ):
# def l2norm(x):
# def plot_it(ax):
# def get_quadratic(A, b, c=0):
# def Quadratic(x, return_g=True, return_H=True):
# X, Y = example_curvilinear_grid([nC, nC], kwrd)
# X, Y, Z = example_curvilinear_grid([nC, nC, nC], kwrd)
# H = sp.csr_matrix(
# np.array(
# [[-400 * x[1] + 1200 * x[0] ** 2 + 2, -400 * x[0]], [-400 * x[0], 200]]
# )
# )
# E0 = np.ones(h.shape)
# E1 = np.ones(h.shape)
# H = A
# class OrderTest(unittest.TestCase):
. Output only the next line. | class TestFaceDiv3D(tests.OrderTest): |
Here is a snippet: <|code_start|> if not len(model) == mesh.nC:
raise Exception(
"""Something is not right, expected size is {:d}
but unwrap vector is size {:d}""".format(
mesh.nC, len(model)
)
)
return model.reshape(mesh.vnC, order="F")[:, ::-1].reshape(-1, order="F")
def _readModelUBC_3D(mesh, file_name):
"""Read UBC-GIF formatted model file for 3D tensor mesh.
Parameters
----------
file_name : str or file name
full path to the UBC-GIF formatted model file
Returns
-------
(n_cells) numpy.ndarray
The model defined on the 3D tensor mesh
"""
f = open(file_name, "r")
model = np.array(list(map(float, f.readlines())))
f.close()
nCx, nCy, nCz = mesh.shape_cells
model = np.reshape(model, (nCz, nCx, nCy), order="F")
model = model[::-1, :, :]
model = np.transpose(model, (1, 2, 0))
<|code_end|>
. Write the next line using the current file imports:
import os
import numpy as np
import warnings
from discretize.utils import mkvc
from discretize.utils.code_utils import deprecate_method
from discretize.mixins.vtk_mod import InterfaceTensorread_vtk
and context from other files:
# Path: discretize/utils/matrix_utils.py
# def mkvc(x, n_dims=1, **kwargs):
# """Creates a vector with specified dimensionality.
#
# This function converts a :class:`numpy.ndarray` to a vector. In general,
# the output vector has a dimension of 1. However, the dimensionality
# can be specified if the user intends to carry out a dot product with
# a higher order array.
#
# Parameters
# ----------
# x : array_like
# An array that will be reorganized and output as a vector. The input array
# will be flattened on input in Fortran order.
# n_dims : int
# The dimension of the output vector. :data:`numpy.newaxis` are appened to the
# output array until it has this many axes.
#
# Returns
# -------
# numpy.ndarray
# The output vector, with at least ``n_dims`` axes.
#
# Examples
# --------
# Here, we reorganize a simple 2D array as a vector and demonstrate the
# impact of the *n_dim* argument.
#
# >>> from discretize.utils import mkvc
# >>> import numpy as np
#
# >>> a = np.random.rand(3, 2)
# >>> a
# array([[0.33534155, 0.25334363],
# [0.07147884, 0.81080958],
# [0.85892774, 0.74357806]])
#
# >>> v = mkvc(a)
# >>> v
# array([0.33534155, 0.07147884, 0.85892774, 0.25334363, 0.81080958,
# 0.74357806])
#
# In Higher dimensions:
#
# >>> for ii in range(1, 4):
# ... v = mkvc(a, ii)
# ... print('Shape of output with n_dim =', ii, ': ', v.shape)
# Shape of output with n_dim = 1 : (6,)
# Shape of output with n_dim = 2 : (6, 1)
# Shape of output with n_dim = 3 : (6, 1, 1)
# """
# if "numDims" in kwargs:
# warnings.warn(
# "The numDims keyword argument has been deprecated, please use n_dims. "
# "This will be removed in discretize 1.0.0",
# DeprecationWarning,
# )
# n_dims = kwargs["numDims"]
# if type(x) == np.matrix:
# x = np.array(x)
#
# if hasattr(x, "tovec"):
# x = x.tovec()
#
# if isinstance(x, Zero):
# return x
#
# if not isinstance(x, np.ndarray):
# raise TypeError("Vector must be a numpy array")
#
# if n_dims == 1:
# return x.flatten(order="F")
# elif n_dims == 2:
# return x.flatten(order="F")[:, np.newaxis]
# elif n_dims == 3:
# return x.flatten(order="F")[:, np.newaxis, np.newaxis]
#
# Path: discretize/utils/code_utils.py
# def deprecate_method(new_name, old_name, removal_version=None, future_warn=False):
# if future_warn:
# Warning = FutureWarning
# else:
# Warning = DeprecationWarning
# if removal_version is not None:
# tag = f" It will be removed in version {removal_version} of discretize."
# else:
# tag = " It will be removed in a future version of discretize."
#
# def new_method(self, *args, **kwargs):
# class_name = type(self).__name__
# warnings.warn(
# f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
# + tag,
# Warning,
# )
# return getattr(self, new_name)(*args, **kwargs)
#
# doc = f"""
# `{old_name}` has been deprecated. See `{new_name}` for documentation
#
# See Also
# --------
# {new_name}
# """
# new_method.__doc__ = doc
# return new_method
, which may include functions, classes, or code. Output only the next line. | model = mkvc(model) |
Here is a snippet: <|code_start|> fname = os.path.join(directory, file_name)
if mesh.dim == 3:
mesh._writeUBC_3DMesh(fname, comment_lines=comment_lines)
elif mesh.dim == 2:
mesh._writeUBC_2DMesh(fname, comment_lines=comment_lines)
else:
raise Exception("mesh must be a Tensor Mesh 2D or 3D")
if models is None:
return
if not isinstance(models, dict):
raise TypeError("models must be a dict")
for key in models:
if not isinstance(key, str):
raise TypeError(
"The dict key must be a string representing the file name"
)
mesh.write_model_UBC(key, models[key], directory=directory)
# DEPRECATED
@classmethod
def readUBC(TensorMesh, file_name, directory=""):
"""*readUBC* has been deprecated and replaced by *read_UBC*"""
warnings.warn(
"TensorMesh.readUBC has been deprecated and will be removed in"
"discretize 1.0.0. please use TensorMesh.read_UBC",
DeprecationWarning,
)
return TensorMesh.read_UBC(file_name, directory)
<|code_end|>
. Write the next line using the current file imports:
import os
import numpy as np
import warnings
from discretize.utils import mkvc
from discretize.utils.code_utils import deprecate_method
from discretize.mixins.vtk_mod import InterfaceTensorread_vtk
and context from other files:
# Path: discretize/utils/matrix_utils.py
# def mkvc(x, n_dims=1, **kwargs):
# """Creates a vector with specified dimensionality.
#
# This function converts a :class:`numpy.ndarray` to a vector. In general,
# the output vector has a dimension of 1. However, the dimensionality
# can be specified if the user intends to carry out a dot product with
# a higher order array.
#
# Parameters
# ----------
# x : array_like
# An array that will be reorganized and output as a vector. The input array
# will be flattened on input in Fortran order.
# n_dims : int
# The dimension of the output vector. :data:`numpy.newaxis` are appened to the
# output array until it has this many axes.
#
# Returns
# -------
# numpy.ndarray
# The output vector, with at least ``n_dims`` axes.
#
# Examples
# --------
# Here, we reorganize a simple 2D array as a vector and demonstrate the
# impact of the *n_dim* argument.
#
# >>> from discretize.utils import mkvc
# >>> import numpy as np
#
# >>> a = np.random.rand(3, 2)
# >>> a
# array([[0.33534155, 0.25334363],
# [0.07147884, 0.81080958],
# [0.85892774, 0.74357806]])
#
# >>> v = mkvc(a)
# >>> v
# array([0.33534155, 0.07147884, 0.85892774, 0.25334363, 0.81080958,
# 0.74357806])
#
# In Higher dimensions:
#
# >>> for ii in range(1, 4):
# ... v = mkvc(a, ii)
# ... print('Shape of output with n_dim =', ii, ': ', v.shape)
# Shape of output with n_dim = 1 : (6,)
# Shape of output with n_dim = 2 : (6, 1)
# Shape of output with n_dim = 3 : (6, 1, 1)
# """
# if "numDims" in kwargs:
# warnings.warn(
# "The numDims keyword argument has been deprecated, please use n_dims. "
# "This will be removed in discretize 1.0.0",
# DeprecationWarning,
# )
# n_dims = kwargs["numDims"]
# if type(x) == np.matrix:
# x = np.array(x)
#
# if hasattr(x, "tovec"):
# x = x.tovec()
#
# if isinstance(x, Zero):
# return x
#
# if not isinstance(x, np.ndarray):
# raise TypeError("Vector must be a numpy array")
#
# if n_dims == 1:
# return x.flatten(order="F")
# elif n_dims == 2:
# return x.flatten(order="F")[:, np.newaxis]
# elif n_dims == 3:
# return x.flatten(order="F")[:, np.newaxis, np.newaxis]
#
# Path: discretize/utils/code_utils.py
# def deprecate_method(new_name, old_name, removal_version=None, future_warn=False):
# if future_warn:
# Warning = FutureWarning
# else:
# Warning = DeprecationWarning
# if removal_version is not None:
# tag = f" It will be removed in version {removal_version} of discretize."
# else:
# tag = " It will be removed in a future version of discretize."
#
# def new_method(self, *args, **kwargs):
# class_name = type(self).__name__
# warnings.warn(
# f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
# + tag,
# Warning,
# )
# return getattr(self, new_name)(*args, **kwargs)
#
# doc = f"""
# `{old_name}` has been deprecated. See `{new_name}` for documentation
#
# See Also
# --------
# {new_name}
# """
# new_method.__doc__ = doc
# return new_method
, which may include functions, classes, or code. Output only the next line. | readModelUBC = deprecate_method( |
Next line prediction: <|code_start|>
class HTTPConnectSetupClient(http.HTTPClient):
"""HTTPClient protocol to send a CONNECT message for proxies and read the response.
Args:
host (bytes): The hostname to send in the CONNECT message
port (int): The port to send in the CONNECT message
proxy_auth (tuple): None or tuple of (username, pasword) for HTTP basic proxy
authentication
"""
def __init__(self, host: bytes, port: int, proxy_auth: Optional[Tuple[str, str]]):
self.host = host
self.port = port
self._proxy_auth = proxy_auth
self.on_connected: defer.Deferred = defer.Deferred()
def connectionMade(self):
logger.debug("Connected to proxy, sending CONNECT")
self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
if self._proxy_auth is not None:
username, password = self._proxy_auth
# a credential pair is a urlsafe-base64-encoded pair separated by colon
encoded_credentials = urlsafe_b64encode(f"{username}:{password}".encode())
self.sendHeader(b"Proxy-Authorization", b"basic " + encoded_credentials)
self.endHeaders()
def handleStatus(self, version, status, message):
logger.debug("Got Status: %s %s %s", status, message, version)
if status != b"200":
<|code_end|>
. Use current file imports:
(import logging
from base64 import urlsafe_b64encode
from typing import Optional, Tuple
from twisted.internet import defer, protocol
from twisted.internet.base import ReactorBase
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IProtocolFactory, IStreamClientEndpoint
from twisted.internet.protocol import Protocol, connectionDone
from twisted.web import http
from zope.interface import implementer
from sygnal.exceptions import ProxyConnectError)
and context including class names, function names, or small code snippets from other files:
# Path: sygnal/exceptions.py
# class ProxyConnectError(ConnectError):
# """
# Exception raised when we are unable to start a connection using a HTTP proxy
# This indicates an issue with the HTTP Proxy in use rather than the final
# endpoint we wanted to contact.
# """
#
# pass
. Output only the next line. | raise ProxyConnectError("Unexpected status on CONNECT: %s" % status) |
Next line prediction: <|code_start|> if len(found_pushkins) == 0:
log.warning("Got notification for unknown app ID %s", appid)
rejected.append(d.pushkey)
continue
if len(found_pushkins) > 1:
log.warning("Got notification for an ambiguous app ID %s", appid)
rejected.append(d.pushkey)
continue
pushkin = found_pushkins[0]
log.debug(
"Sending push to pushkin %s for app ID %s", pushkin.name, appid
)
NOTIFS_BY_PUSHKIN.labels(pushkin.name).inc()
result = await pushkin.dispatch_notification(notif, d, context)
if not isinstance(result, list):
raise TypeError("Pushkin should return list.")
rejected += result
request.write(json.dumps({"rejected": rejected}).encode())
if rejected:
log.info(
"Successfully delivered notifications with %d rejected pushkeys",
len(rejected),
)
<|code_end|>
. Use current file imports:
(import json
import logging
import sys
import time
import traceback
from typing import TYPE_CHECKING, Callable, List, Union
from uuid import uuid4
from opentracing import Format, Span, logs, tags
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet.defer import ensureDeferred
from twisted.web import server
from twisted.web.http import (
Request,
combinedLogFormatter,
datetimeToLogString,
proxiedLogFormatter,
)
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
)
from sygnal.notifications import Notification, NotificationContext, Pushkin
from sygnal.utils import NotificationLoggerAdapter, json_decoder
from sygnal.sygnal import Sygnal)
and context including class names, function names, or small code snippets from other files:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# Path: sygnal/notifications.py
# class Notification:
# def __init__(self, notif):
# # optional attributes
# self.room_name: Optional[str] = notif.get("room_name")
# self.room_alias: Optional[str] = notif.get("room_alias")
# self.prio: Optional[str] = notif.get("prio")
# self.membership: Optional[str] = notif.get("membership")
# self.sender_display_name: Optional[str] = notif.get("sender_display_name")
# self.content: Optional[Dict[str, Any]] = notif.get("content")
# self.event_id: Optional[str] = notif.get("event_id")
# self.room_id: Optional[str] = notif.get("room_id")
# self.user_is_target: Optional[bool] = notif.get("user_is_target")
# self.type: Optional[str] = notif.get("type")
# self.sender: Optional[str] = notif.get("sender")
#
# if "devices" not in notif or not isinstance(notif["devices"], list):
# raise InvalidNotificationException("Expected list in 'devices' key")
#
# if "counts" in notif:
# self.counts = Counts(notif["counts"])
# else:
# self.counts = Counts({})
#
# self.devices = [Device(d) for d in notif["devices"]]
#
# class NotificationContext(object):
# def __init__(self, request_id: str, opentracing_span: Span, start_time: float):
# """
# Args:
# request_id: An ID for the request, or None to have it
# generated automatically.
# opentracing_span: The span for the API request triggering
# the notification.
# start_time: Start timer value, `time.perf_counter()`
# """
# self.request_id = request_id
# self.opentracing_span = opentracing_span
# self.start_time = start_time
#
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: sygnal/utils.py
# async def twisted_sleep(delay: float, twisted_reactor: "SygnalReactor") -> None:
# def process(
# self, msg: str, kwargs: MutableMapping[str, Any]
# ) -> Tuple[str, MutableMapping[str, Any]]:
# def _reject_invalid_json(val: Any) -> None:
# class NotificationLoggerAdapter(LoggerAdapter):
. Output only the next line. | except NotificationDispatchException: |
Based on the snippet: <|code_start|> Actually handle the request.
Args:
request: The request, corresponding to a POST request.
Returns:
Either a str instance or NOT_DONE_YET.
"""
request_id = self._make_request_id()
header_dict = {
k.decode(): v[0].decode()
for k, v in request.requestHeaders.getAllRawHeaders()
}
# extract OpenTracing scope from the HTTP headers
span_ctx = self.sygnal.tracer.extract(Format.HTTP_HEADERS, header_dict)
span_tags = {
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
"request_id": request_id,
}
root_span = self.sygnal.tracer.start_span(
"pushgateway_v1_notify", child_of=span_ctx, tags=span_tags
)
# if this is True, we will not close the root_span at the end of this
# function.
root_span_accounted_for = False
try:
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import logging
import sys
import time
import traceback
from typing import TYPE_CHECKING, Callable, List, Union
from uuid import uuid4
from opentracing import Format, Span, logs, tags
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet.defer import ensureDeferred
from twisted.web import server
from twisted.web.http import (
Request,
combinedLogFormatter,
datetimeToLogString,
proxiedLogFormatter,
)
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
)
from sygnal.notifications import Notification, NotificationContext, Pushkin
from sygnal.utils import NotificationLoggerAdapter, json_decoder
from sygnal.sygnal import Sygnal
and context (classes, functions, sometimes code) from other files:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# Path: sygnal/notifications.py
# class Notification:
# def __init__(self, notif):
# # optional attributes
# self.room_name: Optional[str] = notif.get("room_name")
# self.room_alias: Optional[str] = notif.get("room_alias")
# self.prio: Optional[str] = notif.get("prio")
# self.membership: Optional[str] = notif.get("membership")
# self.sender_display_name: Optional[str] = notif.get("sender_display_name")
# self.content: Optional[Dict[str, Any]] = notif.get("content")
# self.event_id: Optional[str] = notif.get("event_id")
# self.room_id: Optional[str] = notif.get("room_id")
# self.user_is_target: Optional[bool] = notif.get("user_is_target")
# self.type: Optional[str] = notif.get("type")
# self.sender: Optional[str] = notif.get("sender")
#
# if "devices" not in notif or not isinstance(notif["devices"], list):
# raise InvalidNotificationException("Expected list in 'devices' key")
#
# if "counts" in notif:
# self.counts = Counts(notif["counts"])
# else:
# self.counts = Counts({})
#
# self.devices = [Device(d) for d in notif["devices"]]
#
# class NotificationContext(object):
# def __init__(self, request_id: str, opentracing_span: Span, start_time: float):
# """
# Args:
# request_id: An ID for the request, or None to have it
# generated automatically.
# opentracing_span: The span for the API request triggering
# the notification.
# start_time: Start timer value, `time.perf_counter()`
# """
# self.request_id = request_id
# self.opentracing_span = opentracing_span
# self.start_time = start_time
#
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: sygnal/utils.py
# async def twisted_sleep(delay: float, twisted_reactor: "SygnalReactor") -> None:
# def process(
# self, msg: str, kwargs: MutableMapping[str, Any]
# ) -> Tuple[str, MutableMapping[str, Any]]:
# def _reject_invalid_json(val: Any) -> None:
# class NotificationLoggerAdapter(LoggerAdapter):
. Output only the next line. | context = NotificationContext(request_id, root_span, time.perf_counter()) |
Given snippet: <|code_start|> async def cb():
with REQUESTS_IN_FLIGHT_GUAGE.labels(
self.__class__.__name__
).track_inprogress():
await self._handle_dispatch(root_span, request, log, notif, context)
ensureDeferred(cb())
# we have to try and send the notifications first,
# so we can find out which ones to reject
return NOT_DONE_YET
except Exception as exc_val:
root_span.set_tag(tags.ERROR, True)
# [2] corresponds to the traceback
trace = traceback.format_tb(sys.exc_info()[2])
root_span.log_kv(
{
logs.EVENT: tags.ERROR,
logs.MESSAGE: str(exc_val),
logs.ERROR_OBJECT: exc_val,
logs.ERROR_KIND: type(exc_val),
logs.STACK: trace,
}
)
raise
finally:
if not root_span_accounted_for:
root_span.finish()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
import logging
import sys
import time
import traceback
from typing import TYPE_CHECKING, Callable, List, Union
from uuid import uuid4
from opentracing import Format, Span, logs, tags
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet.defer import ensureDeferred
from twisted.web import server
from twisted.web.http import (
Request,
combinedLogFormatter,
datetimeToLogString,
proxiedLogFormatter,
)
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
)
from sygnal.notifications import Notification, NotificationContext, Pushkin
from sygnal.utils import NotificationLoggerAdapter, json_decoder
from sygnal.sygnal import Sygnal
and context:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# Path: sygnal/notifications.py
# class Notification:
# def __init__(self, notif):
# # optional attributes
# self.room_name: Optional[str] = notif.get("room_name")
# self.room_alias: Optional[str] = notif.get("room_alias")
# self.prio: Optional[str] = notif.get("prio")
# self.membership: Optional[str] = notif.get("membership")
# self.sender_display_name: Optional[str] = notif.get("sender_display_name")
# self.content: Optional[Dict[str, Any]] = notif.get("content")
# self.event_id: Optional[str] = notif.get("event_id")
# self.room_id: Optional[str] = notif.get("room_id")
# self.user_is_target: Optional[bool] = notif.get("user_is_target")
# self.type: Optional[str] = notif.get("type")
# self.sender: Optional[str] = notif.get("sender")
#
# if "devices" not in notif or not isinstance(notif["devices"], list):
# raise InvalidNotificationException("Expected list in 'devices' key")
#
# if "counts" in notif:
# self.counts = Counts(notif["counts"])
# else:
# self.counts = Counts({})
#
# self.devices = [Device(d) for d in notif["devices"]]
#
# class NotificationContext(object):
# def __init__(self, request_id: str, opentracing_span: Span, start_time: float):
# """
# Args:
# request_id: An ID for the request, or None to have it
# generated automatically.
# opentracing_span: The span for the API request triggering
# the notification.
# start_time: Start timer value, `time.perf_counter()`
# """
# self.request_id = request_id
# self.opentracing_span = opentracing_span
# self.start_time = start_time
#
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: sygnal/utils.py
# async def twisted_sleep(delay: float, twisted_reactor: "SygnalReactor") -> None:
# def process(
# self, msg: str, kwargs: MutableMapping[str, Any]
# ) -> Tuple[str, MutableMapping[str, Any]]:
# def _reject_invalid_json(val: Any) -> None:
# class NotificationLoggerAdapter(LoggerAdapter):
which might include code, classes, or functions. Output only the next line. | def find_pushkins(self, appid: str) -> List[Pushkin]: |
Predict the next line for this snippet: <|code_start|> request: The request, corresponding to a POST request.
Returns:
Either a str instance or NOT_DONE_YET.
"""
request_id = self._make_request_id()
header_dict = {
k.decode(): v[0].decode()
for k, v in request.requestHeaders.getAllRawHeaders()
}
# extract OpenTracing scope from the HTTP headers
span_ctx = self.sygnal.tracer.extract(Format.HTTP_HEADERS, header_dict)
span_tags = {
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
"request_id": request_id,
}
root_span = self.sygnal.tracer.start_span(
"pushgateway_v1_notify", child_of=span_ctx, tags=span_tags
)
# if this is True, we will not close the root_span at the end of this
# function.
root_span_accounted_for = False
try:
context = NotificationContext(request_id, root_span, time.perf_counter())
<|code_end|>
with the help of current file imports:
import json
import logging
import sys
import time
import traceback
from typing import TYPE_CHECKING, Callable, List, Union
from uuid import uuid4
from opentracing import Format, Span, logs, tags
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet.defer import ensureDeferred
from twisted.web import server
from twisted.web.http import (
Request,
combinedLogFormatter,
datetimeToLogString,
proxiedLogFormatter,
)
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
)
from sygnal.notifications import Notification, NotificationContext, Pushkin
from sygnal.utils import NotificationLoggerAdapter, json_decoder
from sygnal.sygnal import Sygnal
and context from other files:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# Path: sygnal/notifications.py
# class Notification:
# def __init__(self, notif):
# # optional attributes
# self.room_name: Optional[str] = notif.get("room_name")
# self.room_alias: Optional[str] = notif.get("room_alias")
# self.prio: Optional[str] = notif.get("prio")
# self.membership: Optional[str] = notif.get("membership")
# self.sender_display_name: Optional[str] = notif.get("sender_display_name")
# self.content: Optional[Dict[str, Any]] = notif.get("content")
# self.event_id: Optional[str] = notif.get("event_id")
# self.room_id: Optional[str] = notif.get("room_id")
# self.user_is_target: Optional[bool] = notif.get("user_is_target")
# self.type: Optional[str] = notif.get("type")
# self.sender: Optional[str] = notif.get("sender")
#
# if "devices" not in notif or not isinstance(notif["devices"], list):
# raise InvalidNotificationException("Expected list in 'devices' key")
#
# if "counts" in notif:
# self.counts = Counts(notif["counts"])
# else:
# self.counts = Counts({})
#
# self.devices = [Device(d) for d in notif["devices"]]
#
# class NotificationContext(object):
# def __init__(self, request_id: str, opentracing_span: Span, start_time: float):
# """
# Args:
# request_id: An ID for the request, or None to have it
# generated automatically.
# opentracing_span: The span for the API request triggering
# the notification.
# start_time: Start timer value, `time.perf_counter()`
# """
# self.request_id = request_id
# self.opentracing_span = opentracing_span
# self.start_time = start_time
#
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: sygnal/utils.py
# async def twisted_sleep(delay: float, twisted_reactor: "SygnalReactor") -> None:
# def process(
# self, msg: str, kwargs: MutableMapping[str, Any]
# ) -> Tuple[str, MutableMapping[str, Any]]:
# def _reject_invalid_json(val: Any) -> None:
# class NotificationLoggerAdapter(LoggerAdapter):
, which may contain function names, class names, or code. Output only the next line. | log = NotificationLoggerAdapter(logger, {"request_id": request_id}) |
Predict the next line after this snippet: <|code_start|> Either a str instance or NOT_DONE_YET.
"""
request_id = self._make_request_id()
header_dict = {
k.decode(): v[0].decode()
for k, v in request.requestHeaders.getAllRawHeaders()
}
# extract OpenTracing scope from the HTTP headers
span_ctx = self.sygnal.tracer.extract(Format.HTTP_HEADERS, header_dict)
span_tags = {
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
"request_id": request_id,
}
root_span = self.sygnal.tracer.start_span(
"pushgateway_v1_notify", child_of=span_ctx, tags=span_tags
)
# if this is True, we will not close the root_span at the end of this
# function.
root_span_accounted_for = False
try:
context = NotificationContext(request_id, root_span, time.perf_counter())
log = NotificationLoggerAdapter(logger, {"request_id": request_id})
try:
<|code_end|>
using the current file's imports:
import json
import logging
import sys
import time
import traceback
from typing import TYPE_CHECKING, Callable, List, Union
from uuid import uuid4
from opentracing import Format, Span, logs, tags
from prometheus_client import Counter, Gauge, Histogram
from twisted.internet.defer import ensureDeferred
from twisted.web import server
from twisted.web.http import (
Request,
combinedLogFormatter,
datetimeToLogString,
proxiedLogFormatter,
)
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
)
from sygnal.notifications import Notification, NotificationContext, Pushkin
from sygnal.utils import NotificationLoggerAdapter, json_decoder
from sygnal.sygnal import Sygnal
and any relevant context from other files:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# Path: sygnal/notifications.py
# class Notification:
# def __init__(self, notif):
# # optional attributes
# self.room_name: Optional[str] = notif.get("room_name")
# self.room_alias: Optional[str] = notif.get("room_alias")
# self.prio: Optional[str] = notif.get("prio")
# self.membership: Optional[str] = notif.get("membership")
# self.sender_display_name: Optional[str] = notif.get("sender_display_name")
# self.content: Optional[Dict[str, Any]] = notif.get("content")
# self.event_id: Optional[str] = notif.get("event_id")
# self.room_id: Optional[str] = notif.get("room_id")
# self.user_is_target: Optional[bool] = notif.get("user_is_target")
# self.type: Optional[str] = notif.get("type")
# self.sender: Optional[str] = notif.get("sender")
#
# if "devices" not in notif or not isinstance(notif["devices"], list):
# raise InvalidNotificationException("Expected list in 'devices' key")
#
# if "counts" in notif:
# self.counts = Counts(notif["counts"])
# else:
# self.counts = Counts({})
#
# self.devices = [Device(d) for d in notif["devices"]]
#
# class NotificationContext(object):
# def __init__(self, request_id: str, opentracing_span: Span, start_time: float):
# """
# Args:
# request_id: An ID for the request, or None to have it
# generated automatically.
# opentracing_span: The span for the API request triggering
# the notification.
# start_time: Start timer value, `time.perf_counter()`
# """
# self.request_id = request_id
# self.opentracing_span = opentracing_span
# self.start_time = start_time
#
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: sygnal/utils.py
# async def twisted_sleep(delay: float, twisted_reactor: "SygnalReactor") -> None:
# def process(
# self, msg: str, kwargs: MutableMapping[str, Any]
# ) -> Tuple[str, MutableMapping[str, Any]]:
# def _reject_invalid_json(val: Any) -> None:
# class NotificationLoggerAdapter(LoggerAdapter):
. Output only the next line. | body = json_decoder.decode(request.content.read().decode("utf-8")) |
Here is a snippet: <|code_start|># distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if TYPE_CHECKING:
T = TypeVar("T")
@overload
def get_key(raw: Dict[str, Any], key: str, type_: Type[T], default: T) -> T:
...
@overload
def get_key(
raw: Dict[str, Any], key: str, type_: Type[T], default: None = None
) -> Optional[T]:
...
def get_key(
raw: Dict[str, Any], key: str, type_: Type[T], default: Optional[T] = None
) -> Optional[T]:
if key not in raw:
return default
if not isinstance(raw[key], type_):
<|code_end|>
. Write the next line using the current file imports:
import abc
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, overload
from matrix_common.regex import glob_to_regex
from opentracing import Span
from prometheus_client import Counter
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
PushkinSetupException,
)
from sygnal.sygnal import Sygnal
and context from other files:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# class PushkinSetupException(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | raise InvalidNotificationException(f"{key} is of invalid type") |
Here is a snippet: <|code_start|> DEFAULT_CONCURRENCY_LIMIT = 512
UNDERSTOOD_CONFIG_FIELDS = {"inflight_request_limit"}
RATELIMITING_DROPPED_REQUESTS = Counter(
"sygnal_inflight_request_limit_drop",
"Number of notifications dropped because the number of inflight requests"
" exceeded the configured inflight_request_limit.",
labelnames=["pushkin"],
)
def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
super().__init__(name, sygnal, config)
self._concurrent_limit = config.get(
"inflight_request_limit",
ConcurrencyLimitedPushkin.DEFAULT_CONCURRENCY_LIMIT,
)
self._concurrent_now = 0
# Grab an instance of the dropped request counter given our pushkin name.
# Note this ensures the counter appears in metrics even if it hasn't yet
# been incremented.
dropped_requests = ConcurrencyLimitedPushkin.RATELIMITING_DROPPED_REQUESTS
self.dropped_requests_counter = dropped_requests.labels(pushkin=name)
async def dispatch_notification(
self, n: Notification, device: Device, context: "NotificationContext"
) -> List[str]:
if self._concurrent_now >= self._concurrent_limit:
self.dropped_requests_counter.inc()
<|code_end|>
. Write the next line using the current file imports:
import abc
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, overload
from matrix_common.regex import glob_to_regex
from opentracing import Span
from prometheus_client import Counter
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
PushkinSetupException,
)
from sygnal.sygnal import Sygnal
and context from other files:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# class PushkinSetupException(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | raise NotificationDispatchException( |
Given snippet: <|code_start|>
if "counts" in notif:
self.counts = Counts(notif["counts"])
else:
self.counts = Counts({})
self.devices = [Device(d) for d in notif["devices"]]
class Pushkin(abc.ABC):
def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
self.name = name
self.appid_pattern = glob_to_regex(name, ignore_case=False)
self.cfg = config
self.sygnal = sygnal
@overload
def get_config(self, key: str, type_: Type[T], default: T) -> T:
...
@overload
def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
...
def get_config(
self, key: str, type_: Type[T], default: Optional[T] = None
) -> Optional[T]:
if key not in self.cfg:
return default
if not isinstance(self.cfg[key], type_):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import abc
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, overload
from matrix_common.regex import glob_to_regex
from opentracing import Span
from prometheus_client import Counter
from sygnal.exceptions import (
InvalidNotificationException,
NotificationDispatchException,
PushkinSetupException,
)
from sygnal.sygnal import Sygnal
and context:
# Path: sygnal/exceptions.py
# class InvalidNotificationException(Exception):
# pass
#
# class NotificationDispatchException(Exception):
# pass
#
# class PushkinSetupException(Exception):
# pass
which might include code, classes, or functions. Output only the next line. | raise PushkinSetupException( |
Given snippet: <|code_start|> offset: Offset of the string
Returns:
A string formed of weird and wonderful UTF-8 emoji characters.
"""
chars = ["\U0001F430", "\U0001F431", "\U0001F432", "\U0001F433"]
return "".join([chars[(i + offset) % len(chars)] for i in range(length)])
def payload_for_aps(aps):
"""
Returns the APNS payload for an 'aps' dictionary.
"""
return {"aps": aps}
class TruncateTestCase(unittest.TestCase):
def test_dont_truncate(self):
"""
Tests that truncation is not performed if unnecessary.
"""
# This shouldn't need to be truncated
txt = simplestring(20)
aps = {"alert": txt}
self.assertEqual(txt, truncate(payload_for_aps(aps), 256)["aps"]["alert"])
def test_truncate_alert(self):
"""
Tests that the 'alert' string field will be truncated when needed.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import string
import unittest
from sygnal.apnstruncate import json_encode, truncate
and context:
# Path: sygnal/apnstruncate.py
# def json_encode(payload) -> bytes:
# return json.dumps(payload, ensure_ascii=False).encode()
#
# def truncate(payload: Dict[str, Any], max_length: int = 2048) -> Dict[str, Any]:
# """
# Truncate APNs fields to make the payload fit within the max length
# specified.
# Only truncates fields that are safe to do so.
#
# Args:
# payload: nested dict that will be passed to APNs
# max_length: Maximum length, in bytes, that the payload should occupy
# when JSON-encoded.
#
# Returns:
# Nested dict which should comply with the maximum length restriction.
#
# """
# payload = payload.copy()
# if "aps" not in payload:
# if is_too_long(payload, max_length):
# raise BodyTooLongException()
# else:
# return payload
# aps = payload["aps"]
#
# # first ensure all our choppables are str objects.
# # We need them to be for truncating to work and this
# # makes more sense than checking every time.
# for c in _choppables_for_aps(aps):
# val = _choppable_get(aps, c)
# if isinstance(val, bytes):
# _choppable_put(aps, c, val.decode())
#
# # chop off whole unicode characters until it fits (or we run out of chars)
# while is_too_long(payload, max_length):
# longest = _longest_choppable(aps)
# if longest is None:
# raise BodyTooLongException()
#
# txt = _choppable_get(aps, longest)
# # Note that python's support for this is actually broken on some OSes
# # (see test_apnstruncate.py)
# txt = txt[:-1]
# _choppable_put(aps, longest, txt)
# payload["aps"] = aps
#
# return payload
which might include code, classes, or functions. Output only the next line. | overhead = len(json_encode(payload_for_aps({"alert": ""}))) |
Given snippet: <|code_start|>
def sillystring(length, offset=0):
"""
Deterministically generates a string
Args:
length: Length of the string
offset: Offset of the string
Returns:
A string formed of weird and wonderful UTF-8 emoji characters.
"""
chars = ["\U0001F430", "\U0001F431", "\U0001F432", "\U0001F433"]
return "".join([chars[(i + offset) % len(chars)] for i in range(length)])
def payload_for_aps(aps):
"""
Returns the APNS payload for an 'aps' dictionary.
"""
return {"aps": aps}
class TruncateTestCase(unittest.TestCase):
def test_dont_truncate(self):
"""
Tests that truncation is not performed if unnecessary.
"""
# This shouldn't need to be truncated
txt = simplestring(20)
aps = {"alert": txt}
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import string
import unittest
from sygnal.apnstruncate import json_encode, truncate
and context:
# Path: sygnal/apnstruncate.py
# def json_encode(payload) -> bytes:
# return json.dumps(payload, ensure_ascii=False).encode()
#
# def truncate(payload: Dict[str, Any], max_length: int = 2048) -> Dict[str, Any]:
# """
# Truncate APNs fields to make the payload fit within the max length
# specified.
# Only truncates fields that are safe to do so.
#
# Args:
# payload: nested dict that will be passed to APNs
# max_length: Maximum length, in bytes, that the payload should occupy
# when JSON-encoded.
#
# Returns:
# Nested dict which should comply with the maximum length restriction.
#
# """
# payload = payload.copy()
# if "aps" not in payload:
# if is_too_long(payload, max_length):
# raise BodyTooLongException()
# else:
# return payload
# aps = payload["aps"]
#
# # first ensure all our choppables are str objects.
# # We need them to be for truncating to work and this
# # makes more sense than checking every time.
# for c in _choppables_for_aps(aps):
# val = _choppable_get(aps, c)
# if isinstance(val, bytes):
# _choppable_put(aps, c, val.decode())
#
# # chop off whole unicode characters until it fits (or we run out of chars)
# while is_too_long(payload, max_length):
# longest = _longest_choppable(aps)
# if longest is None:
# raise BodyTooLongException()
#
# txt = _choppable_get(aps, longest)
# # Note that python's support for this is actually broken on some OSes
# # (see test_apnstruncate.py)
# txt = txt[:-1]
# _choppable_put(aps, longest, txt)
# payload["aps"] = aps
#
# return payload
which might include code, classes, or functions. Output only the next line. | self.assertEqual(txt, truncate(payload_for_aps(aps), 256)["aps"]["alert"]) |
Next line prediction: <|code_start|> kind_split = app_type.rsplit(".", 1)
to_import = kind_split[0]
to_construct = kind_split[1]
else:
to_import = f"sygnal.{app_type}pushkin"
to_construct = f"{app_type.capitalize()}Pushkin"
logger.info("Importing pushkin module: %s", to_import)
pushkin_module = importlib.import_module(to_import)
logger.info("Creating pushkin: %s", to_construct)
clarse = getattr(pushkin_module, to_construct)
return await clarse.create(app_name, self, app_config)
async def make_pushkins_then_start(self) -> None:
for app_id, app_cfg in self.config["apps"].items():
try:
self.pushkins[app_id] = await self._make_pushkin(app_id, app_cfg)
except Exception:
logger.error(
"Failed to load and create pushkin for kind '%s'" % app_cfg["type"]
)
raise
if len(self.pushkins) == 0:
raise RuntimeError(
"No app IDs are configured. Edit sygnal.yaml to define some."
)
logger.info("Configured with app IDs: %r", self.pushkins.keys())
<|code_end|>
. Use current file imports:
(import copy
import importlib
import logging
import logging.config
import os
import sys
import opentracing
import prometheus_client
import yaml
import sentry_sdk
import jaeger_client
from typing import Any, Dict, Set, cast
from opentracing import Tracer
from opentracing.scope_managers.asyncio import AsyncioScopeManager
from twisted.internet import asyncioreactor, defer
from twisted.internet.defer import ensureDeferred
from twisted.internet.interfaces import (
IReactorCore,
IReactorFDSet,
IReactorPluggableNameResolver,
IReactorTCP,
IReactorTime,
)
from twisted.python import log as twisted_log
from twisted.python.failure import Failure
from zope.interface import Interface
from sygnal.http import PushGatewayApiServer
from sygnal.notifications import Pushkin)
and context including class names, function names, or small code snippets from other files:
# Path: sygnal/http.py
# class PushGatewayApiServer(object):
# def __init__(self, sygnal: "Sygnal"):
# """
# Initialises the /_matrix/push/* (Push Gateway API) server.
# Args:
# sygnal (Sygnal): the Sygnal object
# """
# root = Resource()
# matrix = Resource()
# push = Resource()
# v1 = Resource()
#
# # Note that using plain strings here will lead to silent failure
# root.putChild(b"_matrix", matrix)
# matrix.putChild(b"push", push)
# push.putChild(b"v1", v1)
# v1.putChild(b"notify", V1NotifyHandler(sygnal))
#
# # add health
# root.putChild(b"health", HealthHandler())
#
# use_x_forwarded_for = sygnal.config["log"]["access"]["x_forwarded_for"]
#
# log_formatter = (
# proxiedLogFormatter if use_x_forwarded_for else combinedLogFormatter
# )
#
# self.site = SygnalLoggedSite(
# root,
# reactor=sygnal.reactor,
# log_formatter=log_formatter,
# requestFactory=SizeLimitingRequest,
# )
#
# Path: sygnal/notifications.py
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
. Output only the next line. | pushgateway_api = PushGatewayApiServer(self) |
Using the snippet: <|code_start|>}
class SygnalReactor(
IReactorFDSet,
IReactorPluggableNameResolver,
IReactorTCP,
IReactorCore,
IReactorTime,
Interface,
):
pass
class Sygnal:
def __init__(
self,
config: Dict[str, Any],
custom_reactor: SygnalReactor,
tracer: Tracer = opentracing.tracer,
):
"""
Object that holds state for the entirety of a Sygnal instance.
Args:
config: Configuration for this Sygnal
custom_reactor: a Twisted Reactor to use.
tracer (optional): an OpenTracing tracer. The default is the no-op tracer.
"""
self.config = config
self.reactor = custom_reactor
<|code_end|>
, determine the next line of code. You have imports:
import copy
import importlib
import logging
import logging.config
import os
import sys
import opentracing
import prometheus_client
import yaml
import sentry_sdk
import jaeger_client
from typing import Any, Dict, Set, cast
from opentracing import Tracer
from opentracing.scope_managers.asyncio import AsyncioScopeManager
from twisted.internet import asyncioreactor, defer
from twisted.internet.defer import ensureDeferred
from twisted.internet.interfaces import (
IReactorCore,
IReactorFDSet,
IReactorPluggableNameResolver,
IReactorTCP,
IReactorTime,
)
from twisted.python import log as twisted_log
from twisted.python.failure import Failure
from zope.interface import Interface
from sygnal.http import PushGatewayApiServer
from sygnal.notifications import Pushkin
and context (class names, function names, or code) available:
# Path: sygnal/http.py
# class PushGatewayApiServer(object):
# def __init__(self, sygnal: "Sygnal"):
# """
# Initialises the /_matrix/push/* (Push Gateway API) server.
# Args:
# sygnal (Sygnal): the Sygnal object
# """
# root = Resource()
# matrix = Resource()
# push = Resource()
# v1 = Resource()
#
# # Note that using plain strings here will lead to silent failure
# root.putChild(b"_matrix", matrix)
# matrix.putChild(b"push", push)
# push.putChild(b"v1", v1)
# v1.putChild(b"notify", V1NotifyHandler(sygnal))
#
# # add health
# root.putChild(b"health", HealthHandler())
#
# use_x_forwarded_for = sygnal.config["log"]["access"]["x_forwarded_for"]
#
# log_formatter = (
# proxiedLogFormatter if use_x_forwarded_for else combinedLogFormatter
# )
#
# self.site = SygnalLoggedSite(
# root,
# reactor=sygnal.reactor,
# log_formatter=log_formatter,
# requestFactory=SizeLimitingRequest,
# )
#
# Path: sygnal/notifications.py
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
. Output only the next line. | self.pushkins: Dict[str, Pushkin] = {} |
Predict the next line for this snippet: <|code_start|>}
DEVICE_TEMPORARY_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "temporary_remote_error",
"pushkey_ts": 1234,
}
DEVICE_REJECTED = {
"app_id": "com.example.spqr",
"pushkey": "reject",
"pushkey_ts": 1234,
}
DEVICE_ACCEPTED = {
"app_id": "com.example.spqr",
"pushkey": "accept",
"pushkey_ts": 1234,
}
class TestPushkin(Pushkin):
"""
A synthetic Pushkin with simple rules.
"""
async def dispatch_notification(self, n, device, context):
if device.pushkey == "raise_exception":
raise Exception("Bad things have occurred!")
elif device.pushkey == "remote_error":
<|code_end|>
with the help of current file imports:
from twisted.internet.address import IPv6Address
from twisted.internet.testing import StringTransport
from sygnal.exceptions import (
NotificationDispatchException,
TemporaryNotificationDispatchException,
)
from sygnal.notifications import Pushkin
from tests import testutils
and context from other files:
# Path: sygnal/exceptions.py
# class NotificationDispatchException(Exception):
# pass
#
# class TemporaryNotificationDispatchException(Exception):
# """
# To be used by pushkins for errors that are not our fault and are
# hopefully temporary, so the request should possibly be retried soon.
# """
#
# def __init__(self, *args: object, custom_retry_delay: Optional[int] = None) -> None:
# super().__init__(*args)
# self.custom_retry_delay = custom_retry_delay
#
# Path: sygnal/notifications.py
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: tests/testutils.py
# REQ_PATH = b"/_matrix/push/v1/notify"
# class TestCase(unittest.TestCase):
# class ExtendedMemoryReactorClock(MemoryReactorClock):
# class FakeResolver:
# class DummyResponse:
# class HTTPResult:
# class FakeChannel:
# def config_setup(self, config):
# def setUp(self):
# def _make_dummy_notification(self, devices):
# def _make_dummy_notification_event_id_only(self, devices):
# def _make_dummy_notification_badge_only(self, devices):
# def _request(self, payload: Union[str, dict]) -> Union[dict, int]:
# def _multi_requests(
# self, payloads: List[Union[str, dict]]
# ) -> List[Union[dict, int]]:
# def dump_if_needed(payload):
# def all_channels_done():
# def channel_result(channel):
# def __init__(self):
# def getHostByName(name, timeout=None):
# def installNameResolver(self, resolver):
# def callFromThread(self, function, *args):
# def callLater(self, when, what, *a, **kw):
# def wait_for_work(self, early_stop=lambda: False):
# def __init__(self, code):
# def make_async_magic_mock(ret_val):
# async def dummy(*_args, **_kwargs):
# def code(self):
# def writeHeaders(self, version, code, reason, headers):
# def write(self, content):
# def requestDone(self, _self):
# def getPeer(self):
# def getHost(self):
# def transport(self):
# def process_request(self, method: bytes, request_path: bytes, content: BinaryIO):
, which may contain function names, class names, or code. Output only the next line. | raise NotificationDispatchException("Synthetic failure") |
Here is a snippet: <|code_start|>DEVICE_TEMPORARY_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "temporary_remote_error",
"pushkey_ts": 1234,
}
DEVICE_REJECTED = {
"app_id": "com.example.spqr",
"pushkey": "reject",
"pushkey_ts": 1234,
}
DEVICE_ACCEPTED = {
"app_id": "com.example.spqr",
"pushkey": "accept",
"pushkey_ts": 1234,
}
class TestPushkin(Pushkin):
"""
A synthetic Pushkin with simple rules.
"""
async def dispatch_notification(self, n, device, context):
if device.pushkey == "raise_exception":
raise Exception("Bad things have occurred!")
elif device.pushkey == "remote_error":
raise NotificationDispatchException("Synthetic failure")
elif device.pushkey == "temporary_remote_error":
<|code_end|>
. Write the next line using the current file imports:
from twisted.internet.address import IPv6Address
from twisted.internet.testing import StringTransport
from sygnal.exceptions import (
NotificationDispatchException,
TemporaryNotificationDispatchException,
)
from sygnal.notifications import Pushkin
from tests import testutils
and context from other files:
# Path: sygnal/exceptions.py
# class NotificationDispatchException(Exception):
# pass
#
# class TemporaryNotificationDispatchException(Exception):
# """
# To be used by pushkins for errors that are not our fault and are
# hopefully temporary, so the request should possibly be retried soon.
# """
#
# def __init__(self, *args: object, custom_retry_delay: Optional[int] = None) -> None:
# super().__init__(*args)
# self.custom_retry_delay = custom_retry_delay
#
# Path: sygnal/notifications.py
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: tests/testutils.py
# REQ_PATH = b"/_matrix/push/v1/notify"
# class TestCase(unittest.TestCase):
# class ExtendedMemoryReactorClock(MemoryReactorClock):
# class FakeResolver:
# class DummyResponse:
# class HTTPResult:
# class FakeChannel:
# def config_setup(self, config):
# def setUp(self):
# def _make_dummy_notification(self, devices):
# def _make_dummy_notification_event_id_only(self, devices):
# def _make_dummy_notification_badge_only(self, devices):
# def _request(self, payload: Union[str, dict]) -> Union[dict, int]:
# def _multi_requests(
# self, payloads: List[Union[str, dict]]
# ) -> List[Union[dict, int]]:
# def dump_if_needed(payload):
# def all_channels_done():
# def channel_result(channel):
# def __init__(self):
# def getHostByName(name, timeout=None):
# def installNameResolver(self, resolver):
# def callFromThread(self, function, *args):
# def callLater(self, when, what, *a, **kw):
# def wait_for_work(self, early_stop=lambda: False):
# def __init__(self, code):
# def make_async_magic_mock(ret_val):
# async def dummy(*_args, **_kwargs):
# def code(self):
# def writeHeaders(self, version, code, reason, headers):
# def write(self, content):
# def requestDone(self, _self):
# def getPeer(self):
# def getHost(self):
# def transport(self):
# def process_request(self, method: bytes, request_path: bytes, content: BinaryIO):
, which may include functions, classes, or code. Output only the next line. | raise TemporaryNotificationDispatchException("Synthetic failure") |
Given the code snippet: <|code_start|> "app_id": "com.example.spqr",
"pushkey": "raise_exception",
"pushkey_ts": 1234,
}
DEVICE_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "remote_error",
"pushkey_ts": 1234,
}
DEVICE_TEMPORARY_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "temporary_remote_error",
"pushkey_ts": 1234,
}
DEVICE_REJECTED = {
"app_id": "com.example.spqr",
"pushkey": "reject",
"pushkey_ts": 1234,
}
DEVICE_ACCEPTED = {
"app_id": "com.example.spqr",
"pushkey": "accept",
"pushkey_ts": 1234,
}
<|code_end|>
, generate the next line using the imports in this file:
from twisted.internet.address import IPv6Address
from twisted.internet.testing import StringTransport
from sygnal.exceptions import (
NotificationDispatchException,
TemporaryNotificationDispatchException,
)
from sygnal.notifications import Pushkin
from tests import testutils
and context (functions, classes, or occasionally code) from other files:
# Path: sygnal/exceptions.py
# class NotificationDispatchException(Exception):
# pass
#
# class TemporaryNotificationDispatchException(Exception):
# """
# To be used by pushkins for errors that are not our fault and are
# hopefully temporary, so the request should possibly be retried soon.
# """
#
# def __init__(self, *args: object, custom_retry_delay: Optional[int] = None) -> None:
# super().__init__(*args)
# self.custom_retry_delay = custom_retry_delay
#
# Path: sygnal/notifications.py
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: tests/testutils.py
# REQ_PATH = b"/_matrix/push/v1/notify"
# class TestCase(unittest.TestCase):
# class ExtendedMemoryReactorClock(MemoryReactorClock):
# class FakeResolver:
# class DummyResponse:
# class HTTPResult:
# class FakeChannel:
# def config_setup(self, config):
# def setUp(self):
# def _make_dummy_notification(self, devices):
# def _make_dummy_notification_event_id_only(self, devices):
# def _make_dummy_notification_badge_only(self, devices):
# def _request(self, payload: Union[str, dict]) -> Union[dict, int]:
# def _multi_requests(
# self, payloads: List[Union[str, dict]]
# ) -> List[Union[dict, int]]:
# def dump_if_needed(payload):
# def all_channels_done():
# def channel_result(channel):
# def __init__(self):
# def getHostByName(name, timeout=None):
# def installNameResolver(self, resolver):
# def callFromThread(self, function, *args):
# def callLater(self, when, what, *a, **kw):
# def wait_for_work(self, early_stop=lambda: False):
# def __init__(self, code):
# def make_async_magic_mock(ret_val):
# async def dummy(*_args, **_kwargs):
# def code(self):
# def writeHeaders(self, version, code, reason, headers):
# def write(self, content):
# def requestDone(self, _self):
# def getPeer(self):
# def getHost(self):
# def transport(self):
# def process_request(self, method: bytes, request_path: bytes, content: BinaryIO):
. Output only the next line. | class TestPushkin(Pushkin): |
Here is a snippet: <|code_start|> "pushkey": "reject",
"pushkey_ts": 1234,
}
DEVICE_ACCEPTED = {
"app_id": "com.example.spqr",
"pushkey": "accept",
"pushkey_ts": 1234,
}
class TestPushkin(Pushkin):
"""
A synthetic Pushkin with simple rules.
"""
async def dispatch_notification(self, n, device, context):
if device.pushkey == "raise_exception":
raise Exception("Bad things have occurred!")
elif device.pushkey == "remote_error":
raise NotificationDispatchException("Synthetic failure")
elif device.pushkey == "temporary_remote_error":
raise TemporaryNotificationDispatchException("Synthetic failure")
elif device.pushkey == "reject":
return [device.pushkey]
elif device.pushkey == "accept":
return []
raise Exception(f"Unexpected fall-through. {device.pushkey}")
<|code_end|>
. Write the next line using the current file imports:
from twisted.internet.address import IPv6Address
from twisted.internet.testing import StringTransport
from sygnal.exceptions import (
NotificationDispatchException,
TemporaryNotificationDispatchException,
)
from sygnal.notifications import Pushkin
from tests import testutils
and context from other files:
# Path: sygnal/exceptions.py
# class NotificationDispatchException(Exception):
# pass
#
# class TemporaryNotificationDispatchException(Exception):
# """
# To be used by pushkins for errors that are not our fault and are
# hopefully temporary, so the request should possibly be retried soon.
# """
#
# def __init__(self, *args: object, custom_retry_delay: Optional[int] = None) -> None:
# super().__init__(*args)
# self.custom_retry_delay = custom_retry_delay
#
# Path: sygnal/notifications.py
# class Pushkin(abc.ABC):
# def __init__(self, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# self.name = name
# self.appid_pattern = glob_to_regex(name, ignore_case=False)
# self.cfg = config
# self.sygnal = sygnal
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: T) -> T:
# ...
#
# @overload
# def get_config(self, key: str, type_: Type[T], default: None = None) -> Optional[T]:
# ...
#
# def get_config(
# self, key: str, type_: Type[T], default: Optional[T] = None
# ) -> Optional[T]:
# if key not in self.cfg:
# return default
# if not isinstance(self.cfg[key], type_):
# raise PushkinSetupException(
# f"{key} is of incorrect type, please check that the entry for {key} is "
# f"formatted correctly in the config file. "
# )
# return self.cfg[key]
#
# def handles_appid(self, appid: str) -> bool:
# """Checks whether the pushkin is responsible for the given app ID"""
# return self.name == appid or self.appid_pattern.match(appid) is not None
#
# @abc.abstractmethod
# async def dispatch_notification(
# self, n: Notification, device: Device, context: "NotificationContext"
# ) -> List[str]:
# """
# Args:
# n: The notification to dispatch via this pushkin
# device: The device to dispatch the notification for.
# context: the request context
#
# Returns:
# A list of rejected pushkeys, to be reported back to the homeserver
# """
# ...
#
# @classmethod
# async def create(cls, name: str, sygnal: "Sygnal", config: Dict[str, Any]):
# """
# Override this if your pushkin needs to call async code in order to
# be constructed. Otherwise, it defaults to just invoking the Python-standard
# __init__ constructor.
#
# Returns:
# an instance of this Pushkin
# """
# return cls(name, sygnal, config)
#
# Path: tests/testutils.py
# REQ_PATH = b"/_matrix/push/v1/notify"
# class TestCase(unittest.TestCase):
# class ExtendedMemoryReactorClock(MemoryReactorClock):
# class FakeResolver:
# class DummyResponse:
# class HTTPResult:
# class FakeChannel:
# def config_setup(self, config):
# def setUp(self):
# def _make_dummy_notification(self, devices):
# def _make_dummy_notification_event_id_only(self, devices):
# def _make_dummy_notification_badge_only(self, devices):
# def _request(self, payload: Union[str, dict]) -> Union[dict, int]:
# def _multi_requests(
# self, payloads: List[Union[str, dict]]
# ) -> List[Union[dict, int]]:
# def dump_if_needed(payload):
# def all_channels_done():
# def channel_result(channel):
# def __init__(self):
# def getHostByName(name, timeout=None):
# def installNameResolver(self, resolver):
# def callFromThread(self, function, *args):
# def callLater(self, when, what, *a, **kw):
# def wait_for_work(self, early_stop=lambda: False):
# def __init__(self, code):
# def make_async_magic_mock(ret_val):
# async def dummy(*_args, **_kwargs):
# def code(self):
# def writeHeaders(self, version, code, reason, headers):
# def write(self, content):
# def requestDone(self, _self):
# def getPeer(self):
# def getHost(self):
# def transport(self):
# def process_request(self, method: bytes, request_path: bytes, content: BinaryIO):
, which may include functions, classes, or code. Output only the next line. | class PushGatewayApiV1TestCase(testutils.TestCase): |
Continue the code snippet: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__="elishowk@nonutc.fr"
# get tinasoft's logger
_logger = logging.getLogger('TinaAppLogger')
class Importer(Handler):
"""
Medline archive handler
"""
def __init__(self, path, **config):
if 'medlinearchive' in config:
self.loadOptions(config['medlinearchive'])
self.fileconfig = config['medline']
self.path = path
def walkArchive(self, periods):
"""
For a given list of periods
Yields a medline file reader for each period
"""
for id in periods:
abstractFilePath = join(self.path, id, id + '.txt')
<|code_end|>
. Use current file imports:
from tinasoft.data import medline, Handler
from os.path import join
import logging
and context (classes, functions, or code) from other files:
# Path: tinasoft/data/medline.py
# class Record(dict):
# class Importer(sourcefile.Importer):
# def __init__(self):
# def __init__(self, path, **options):
# def get_record(self):
# def _parse_period(self, record):
# def next(self):
# def __iter__(self):
. Output only the next line. | reader = medline.Importer( abstractFilePath, **self.fileconfig ) |
Here is a snippet: <|code_start|>
class TestTermBase(unittest.TestCase):
@patch("builtins.open")
@patch.object(requests.Session, "request")
def test_download(self, mock_request: unittest.mock, mock_open: unittest.mock.MagicMock):
mock_request.return_value = unittest.mock.MagicMock(status_code=200)
TermBase(token="mock-token").download(1, "mock-local-filepath")
mock_request.assert_called_with(
<|code_end|>
. Write the next line using the current file imports:
import requests
import unittest
from unittest.mock import patch
from memsource import constants
from memsource.api_rest.term_base import TermBase
and context from other files:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/api_rest/term_base.py
# class TermBase(api_rest.BaseApi):
# # Document: https://cloud.memsource.com/web/docs/api#tag/Term-Base
#
# def download(
# self,
# termbase_id: int,
# filepath: str,
# file_format: constants.TermBaseFormat=constants.TermBaseFormat.XLSX,
# chunk_size: int=constants.CHUNK_SIZE,
# charset: str=constants.CHAR_SET,
# ) -> None:
# """Download a term base.
#
# :param termbase_id: ID of the term base to be downloaded.
# :param filepath: Save exported data to this file path.
# :param file_format: TBX or XLSX. Defaults to XLSX.
# :param chunk_size: byte size of chunk for response data.
# """
# params = {
# "format": file_format.value.capitalize(),
# "charset": charset,
# }
#
# with open(filepath, 'wb') as f:
# data_stream = self._get_stream(
# "v1/termBases/{}/export".format(termbase_id), params
# ).iter_content(chunk_size)
#
# for chunk in data_stream:
# f.write(chunk)
, which may include functions, classes, or code. Output only the next line. | constants.HttpMethod.get.value, |
Given snippet: <|code_start|>
class TestTermBase(unittest.TestCase):
@patch("builtins.open")
@patch.object(requests.Session, "request")
def test_download(self, mock_request: unittest.mock, mock_open: unittest.mock.MagicMock):
mock_request.return_value = unittest.mock.MagicMock(status_code=200)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import requests
import unittest
from unittest.mock import patch
from memsource import constants
from memsource.api_rest.term_base import TermBase
and context:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/api_rest/term_base.py
# class TermBase(api_rest.BaseApi):
# # Document: https://cloud.memsource.com/web/docs/api#tag/Term-Base
#
# def download(
# self,
# termbase_id: int,
# filepath: str,
# file_format: constants.TermBaseFormat=constants.TermBaseFormat.XLSX,
# chunk_size: int=constants.CHUNK_SIZE,
# charset: str=constants.CHAR_SET,
# ) -> None:
# """Download a term base.
#
# :param termbase_id: ID of the term base to be downloaded.
# :param filepath: Save exported data to this file path.
# :param file_format: TBX or XLSX. Defaults to XLSX.
# :param chunk_size: byte size of chunk for response data.
# """
# params = {
# "format": file_format.value.capitalize(),
# "charset": charset,
# }
#
# with open(filepath, 'wb') as f:
# data_stream = self._get_stream(
# "v1/termBases/{}/export".format(termbase_id), params
# ).iter_content(chunk_size)
#
# for chunk in data_stream:
# f.write(chunk)
which might include code, classes, or functions. Output only the next line. | TermBase(token="mock-token").download(1, "mock-local-filepath") |
Given the following code snippet before the placeholder: <|code_start|>
class TestBilingual(unittest.TestCase):
@patch("builtins.open")
@patch.object(requests.Session, "request")
def test_get_bilingual_file(
self,
mock_request: unittest.mock.Mock,
mock_open: unittest.mock.Mock
):
type(mock_request()).status_code = PropertyMock(return_value=200)
mxliff_contents = ['test mxliff content', 'second']
mock_request().iter_content.return_value = [
bytes(content, 'utf-8') for content in mxliff_contents]
project_id = 1234
job_uids = [1, 2]
<|code_end|>
, predict the next line using imports from the current file:
import os
import requests
import unittest
import uuid
from unittest.mock import patch, PropertyMock
from memsource import constants, models
from memsource.api_rest.bilingual import Bilingual
and context including class names, function names, and sometimes code from other files:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/models.py
# class BaseModel(dict):
# class User(BaseModel):
# class Authentication(BaseModel):
# class Client(BaseModel):
# class Domain(BaseModel):
# class Language(BaseModel):
# class Project(BaseModel):
# class Job(BaseModel):
# class JobPart(BaseModel):
# class TranslationMemory(BaseModel):
# class AsynchronousRequest(BaseModel):
# class AsynchronousResponse(BaseModel):
# class Segment(BaseModel):
# class SegmentSearchResult(BaseModel):
# class Analysis(BaseModel):
# class MxliffUnit(BaseModel):
# class TermBase(BaseModel):
# def __getattr__(self, key):
# def _iso8601_to_datetime(self, source):
# def date_created(self):
# def __init__(self, source):
# def is_complete(self):
# def has_error(self):
#
# Path: memsource/api_rest/bilingual.py
# class Bilingual(api_rest.BaseApi):
# # Document: https://cloud.memsource.com/web/docs/api#tag/Bilingual-File
#
# def _get_bilingual_stream(self, project_id: int, job_uids: List[str]) -> Iterator[bytes]:
# """Common process of bilingualFile.
#
# :param project_id: ID of the project.
# :param job_uids: List of job uids.
# :return: Downloaded bilingual file with iterator.
# """
# return self._post_stream(
# path="v1/projects/{}/jobs/bilingualFile".format(project_id),
# data={"jobs": [{"uid": job_uid} for job_uid in job_uids]},
# ).iter_content(constants.CHUNK_SIZE)
#
# def get_bilingual_file_xml(self, project_id: int, job_uids: List[str]) -> bytes:
# """Download bilingual file and return it as bytes.
#
# This method might use huge memory.
#
# :param project_id: ID of the project.
# :param job_uids: List of job uids.
# :return: Downloaded bilingual file.
# """
# buffer = io.BytesIO()
#
# for chunk in self._get_bilingual_stream(project_id, job_uids):
# buffer.write(chunk)
#
# return buffer.getvalue()
#
# def get_bilingual_file(
# self,
# project_id: int,
# job_uids: List[int],
# dest_file_path: str
# ) -> None:
# """Download bilingual file and save it as a file.
#
# :param project_id: ID of the project.
# :param job_uids: List of job uids.
# :param dest_file_path: Save bilingual file to there.
# """
# with open(dest_file_path, "wb") as f:
# for chunk in self._get_bilingual_stream(project_id, job_uids):
# f.write(chunk)
#
# def get_bilingual_as_mxliff_units(
# self,
# project_id: int,
# job_uids: List[str]
# ) -> models.MxliffUnit:
# """Download bilingual file and parse it as [models.MxliffUnit]
#
# :param project_id: ID of the project.
# :param job_uids: List of job uids.
# :returns: MxliffUnit
# """
# return mxliff.MxliffParser().parse(self.get_bilingual_file_xml(project_id, job_uids))
#
# def upload_bilingual_file_from_xml(self, xml: str) -> List[models.Job]:
# """Call uploadBilingualFile API.
#
# :param xml: Upload this file.
# """
# extra_headers = {"Content-Type": "application/octet-stream"}
# self.add_headers(extra_headers)
#
# self._put("v1/bilingualFiles", None, {
# "file": ("{}.mxliff".format(uuid.uuid1().hex), xml),
# })
. Output only the next line. | Bilingual(token="mock-token").get_bilingual_file( |
Predict the next line for this snippet: <|code_start|> headers: Optional[Dict[str, Any]] = None
) -> None:
"""Inheriting classes must have the api_version attribute
:param token: Authentication token for using APIs
"""
if not hasattr(self, 'api_version'):
# This exception is for development this library.
raise NotImplementedError(
'api_version is not set in {}'.format(self.__class__.__name__))
self.token = token
self.headers = headers
@classmethod
def use_session(cls, session: requests.Session) -> None:
"""
Configures the session object which is used for API invocation.
This method is not thread-safe. It is recommended to configure only once.
Arguments:
session -- The session object to be used by BaseApi
"""
cls._session = session
def _make_url(self, *args, **kwargs):
return kwargs.get('format', '{base}/{api_version}/{path}').format(**kwargs)
def _get(
<|code_end|>
with the help of current file imports:
import io
import os
import os.path
import shutil
import types
import urllib.parse
import uuid
import requests
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
from memsource import constants, exceptions, models
from memsource.lib import mxliff
and context from other files:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/exceptions.py
# class MemsourceException(Exception):
# class MemsourceApiException(MemsourceException):
# class MemsourceUnsupportedFileException(MemsourceException):
# def __init__(self, status_code, result_json, url, params):
# def get_error_code(self):
# def get_error_description(self):
# def __init__(self, unsupported_files, original_file_path, url, params):
#
# Path: memsource/models.py
# class BaseModel(dict):
# class User(BaseModel):
# class Authentication(BaseModel):
# class Client(BaseModel):
# class Domain(BaseModel):
# class Language(BaseModel):
# class Project(BaseModel):
# class Job(BaseModel):
# class JobPart(BaseModel):
# class TranslationMemory(BaseModel):
# class AsynchronousRequest(BaseModel):
# class AsynchronousResponse(BaseModel):
# class Segment(BaseModel):
# class SegmentSearchResult(BaseModel):
# class Analysis(BaseModel):
# class MxliffUnit(BaseModel):
# class TermBase(BaseModel):
# def __getattr__(self, key):
# def _iso8601_to_datetime(self, source):
# def date_created(self):
# def __init__(self, source):
# def is_complete(self):
# def has_error(self):
#
# Path: memsource/lib/mxliff.py
# class MxliffParser(object):
# def parse(self, resource: {'XML file content as bytes': bytes}):
# def to_memsouce_key(s: str) -> str:
# def parse_group(self, group: objectify.ObjectifiedElement) -> models.MxliffUnit:
# def parse_tunit_metadata(self, trans_unit: objectify.ObjectifiedElement) -> list:
, which may contain function names, class names, or code. Output only the next line. | self, path: str, params: dict={}, *, timeout: int=constants.Base.timeout.value |
Given the code snippet: <|code_start|> :param params: Send request with this parameters
:param data: Send request with this data
:param timeout: When takes over this time in one request, raise timeout
:return: response of request module
"""
if http_method == constants.HttpMethod.get:
(url, params) = self._pre_request(path, params)
else:
(url, data) = self._pre_request(path, data)
arguments = {
key: value for key, value in [
('files', files), ('params', params), ('data', data), ('headers', self.headers)
] if value is not None
}
return self._get_response(http_method, url, timeout=timeout, stream=True, **arguments)
def _get_response(
self, http_method: constants.HttpMethod, url: str, **kwargs
) -> requests.models.Response:
"""Request with error handling.
:param http_method: Use this http method
:param url: access to this url
:param kwargs: optional parameters
:return: response of request module
"""
try:
response = self._session.request(http_method.value, url, **kwargs)
<|code_end|>
, generate the next line using the imports in this file:
import io
import os
import os.path
import shutil
import types
import urllib.parse
import uuid
import requests
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
from memsource import constants, exceptions, models
from memsource.lib import mxliff
and context (functions, classes, or occasionally code) from other files:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/exceptions.py
# class MemsourceException(Exception):
# class MemsourceApiException(MemsourceException):
# class MemsourceUnsupportedFileException(MemsourceException):
# def __init__(self, status_code, result_json, url, params):
# def get_error_code(self):
# def get_error_description(self):
# def __init__(self, unsupported_files, original_file_path, url, params):
#
# Path: memsource/models.py
# class BaseModel(dict):
# class User(BaseModel):
# class Authentication(BaseModel):
# class Client(BaseModel):
# class Domain(BaseModel):
# class Language(BaseModel):
# class Project(BaseModel):
# class Job(BaseModel):
# class JobPart(BaseModel):
# class TranslationMemory(BaseModel):
# class AsynchronousRequest(BaseModel):
# class AsynchronousResponse(BaseModel):
# class Segment(BaseModel):
# class SegmentSearchResult(BaseModel):
# class Analysis(BaseModel):
# class MxliffUnit(BaseModel):
# class TermBase(BaseModel):
# def __getattr__(self, key):
# def _iso8601_to_datetime(self, source):
# def date_created(self):
# def __init__(self, source):
# def is_complete(self):
# def has_error(self):
#
# Path: memsource/lib/mxliff.py
# class MxliffParser(object):
# def parse(self, resource: {'XML file content as bytes': bytes}):
# def to_memsouce_key(s: str) -> str:
# def parse_group(self, group: objectify.ObjectifiedElement) -> models.MxliffUnit:
# def parse_tunit_metadata(self, trans_unit: objectify.ObjectifiedElement) -> list:
. Output only the next line. | except requests.exceptions.Timeout: |
Next line prediction: <|code_start|> """
cls._session = session
def _make_url(self, *args, **kwargs):
return kwargs.get('format', '{base}/{api_version}/{path}').format(**kwargs)
def _get(
self, path: str, params: dict={}, *, timeout: int=constants.Base.timeout.value
) -> str:
return self._request(constants.HttpMethod.get, path,
files=None, params=params, data=None, timeout=timeout)
def _post(self, path: str, data: dict=None, files: dict=None,
timeout: Union[int, float]=constants.Base.timeout.value) -> dict:
"""Send a post request.
If you want to raw response, you can use _get_stream method.
:param path: Send request to this path
:param data: Send request with this parameters
:param files: Upload this files. Key is filename, value is file object
:param timeout: When takes over this time in one request, raise timeout
:return: parsed response body as JSON
"""
return self._request(constants.HttpMethod.post, path,
files=files, params=None, data=data, timeout=timeout)
def _get_stream(
self, path: str, params: dict={}, files: dict=None,
timeout: Union[int, float]=constants.Base.timeout.value * 5
<|code_end|>
. Use current file imports:
(import io
import os
import os.path
import shutil
import types
import urllib.parse
import uuid
import requests
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
from memsource import constants, exceptions, models
from memsource.lib import mxliff)
and context including class names, function names, or small code snippets from other files:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/exceptions.py
# class MemsourceException(Exception):
# class MemsourceApiException(MemsourceException):
# class MemsourceUnsupportedFileException(MemsourceException):
# def __init__(self, status_code, result_json, url, params):
# def get_error_code(self):
# def get_error_description(self):
# def __init__(self, unsupported_files, original_file_path, url, params):
#
# Path: memsource/models.py
# class BaseModel(dict):
# class User(BaseModel):
# class Authentication(BaseModel):
# class Client(BaseModel):
# class Domain(BaseModel):
# class Language(BaseModel):
# class Project(BaseModel):
# class Job(BaseModel):
# class JobPart(BaseModel):
# class TranslationMemory(BaseModel):
# class AsynchronousRequest(BaseModel):
# class AsynchronousResponse(BaseModel):
# class Segment(BaseModel):
# class SegmentSearchResult(BaseModel):
# class Analysis(BaseModel):
# class MxliffUnit(BaseModel):
# class TermBase(BaseModel):
# def __getattr__(self, key):
# def _iso8601_to_datetime(self, source):
# def date_created(self):
# def __init__(self, source):
# def is_complete(self):
# def has_error(self):
#
# Path: memsource/lib/mxliff.py
# class MxliffParser(object):
# def parse(self, resource: {'XML file content as bytes': bytes}):
# def to_memsouce_key(s: str) -> str:
# def parse_group(self, group: objectify.ObjectifiedElement) -> models.MxliffUnit:
# def parse_tunit_metadata(self, trans_unit: objectify.ObjectifiedElement) -> list:
. Output only the next line. | ) -> requests.models.Response: |
Given snippet: <|code_start|> def getBilingualFile(self, job_parts: List[int], dest_file_path: str) -> None:
"""Download bilingual file and save it as a file.
:param job_parts: List of job_part id.
:param dest_file_path: Save bilingual file to there.
"""
with open(dest_file_path, 'wb') as f:
[f.write(chunk) for chunk in self._getBilingualStream(job_parts)]
def getCompletedFileText(self, job_parts: List[int]) -> bytes:
"""Download completed file and return it.
:param job_parts: List of job_part id.
"""
def getCompletedFileStream() -> types.GeneratorType:
return self._get_stream('job/getCompletedFile', {
'jobPart': job_parts,
}).iter_content(1024)
buffer = io.BytesIO()
[buffer.write(chunk) for chunk in getCompletedFileStream()]
return buffer.getvalue()
def getBilingualAsMxliffUnits(self, job_parts: List[str]) -> models.MxliffUnit:
"""Download bilingual file and parse it as [models.MxliffUnit]
:param job_parts: List of job_part id.
:returns: MxliffUnit
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import io
import os
import os.path
import shutil
import types
import urllib.parse
import uuid
import requests
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
from memsource import constants, exceptions, models
from memsource.lib import mxliff
and context:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/exceptions.py
# class MemsourceException(Exception):
# class MemsourceApiException(MemsourceException):
# class MemsourceUnsupportedFileException(MemsourceException):
# def __init__(self, status_code, result_json, url, params):
# def get_error_code(self):
# def get_error_description(self):
# def __init__(self, unsupported_files, original_file_path, url, params):
#
# Path: memsource/models.py
# class BaseModel(dict):
# class User(BaseModel):
# class Authentication(BaseModel):
# class Client(BaseModel):
# class Domain(BaseModel):
# class Language(BaseModel):
# class Project(BaseModel):
# class Job(BaseModel):
# class JobPart(BaseModel):
# class TranslationMemory(BaseModel):
# class AsynchronousRequest(BaseModel):
# class AsynchronousResponse(BaseModel):
# class Segment(BaseModel):
# class SegmentSearchResult(BaseModel):
# class Analysis(BaseModel):
# class MxliffUnit(BaseModel):
# class TermBase(BaseModel):
# def __getattr__(self, key):
# def _iso8601_to_datetime(self, source):
# def date_created(self):
# def __init__(self, source):
# def is_complete(self):
# def has_error(self):
#
# Path: memsource/lib/mxliff.py
# class MxliffParser(object):
# def parse(self, resource: {'XML file content as bytes': bytes}):
# def to_memsouce_key(s: str) -> str:
# def parse_group(self, group: objectify.ObjectifiedElement) -> models.MxliffUnit:
# def parse_tunit_metadata(self, trans_unit: objectify.ObjectifiedElement) -> list:
which might include code, classes, or functions. Output only the next line. | return mxliff.MxliffParser().parse(self.getBilingualFileXml(job_parts)) |
Given snippet: <|code_start|>
class TestAnalysis(unittest.TestCase):
@patch.object(requests.Session, "request")
def test_get(self, mock_request: unittest.mock):
mock_request.return_value = unittest.mock.MagicMock(status_code=200)
Analysis(token="mock-token").get(1)
mock_request.assert_called_with(
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import requests
import unittest
from unittest.mock import patch, PropertyMock
from memsource import constants, models
from memsource.api_rest.analysis import Analysis
and context:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/models.py
# class BaseModel(dict):
# class User(BaseModel):
# class Authentication(BaseModel):
# class Client(BaseModel):
# class Domain(BaseModel):
# class Language(BaseModel):
# class Project(BaseModel):
# class Job(BaseModel):
# class JobPart(BaseModel):
# class TranslationMemory(BaseModel):
# class AsynchronousRequest(BaseModel):
# class AsynchronousResponse(BaseModel):
# class Segment(BaseModel):
# class SegmentSearchResult(BaseModel):
# class Analysis(BaseModel):
# class MxliffUnit(BaseModel):
# class TermBase(BaseModel):
# def __getattr__(self, key):
# def _iso8601_to_datetime(self, source):
# def date_created(self):
# def __init__(self, source):
# def is_complete(self):
# def has_error(self):
#
# Path: memsource/api_rest/analysis.py
# class Analysis(api_rest.BaseApi):
# # Document: https://cloud.memsource.com/web/docs/api#tag/Analysis
#
# def get(self, analysis_id: int) -> models.Analysis:
# """Call get API.
#
# :param analysis_id: Get analysis of this id.
# :return: Result of analysis.
# """
# return models.Analysis(self._get("v3/analyses/{}".format(analysis_id)))
#
# def create(self, jobs: List[int]) -> models.AsynchronousRequest:
# """Create new analysis.
#
# :param jobs: Target of analysis.
# :return: Result of analysis.
# """
# return models.AsynchronousRequest(self._post("v2/analyses", {
# "jobs": [{"uid": job} for job in jobs],
# }))
#
# def delete(self, analysis_id: int, purge: bool=False) -> None:
# """Delete an analysis.
#
# :param analysis_id: Analysis ID you want to delete.
# :param purge:
# """
# self._delete("v1/analyses/{}".format(analysis_id), {"purge": purge})
#
# def get_by_project(self, project_id: str) -> List[models.Analysis]:
# """List Analyses By Project.
#
# :param project_id: Project ID for which you want to get the analyses.
# :return: List of Analyses.
# """
# project_analyses = self._get("v2/projects/{}/analyses".format(project_id))
# return [models.Analysis(analysis) for analysis in project_analyses["content"]]
#
# def download(
# self,
# analysis_id: int,
# dest_file_path: str,
# file_format: constants.AnalysisFormat=constants.AnalysisFormat.CSV,
# ) -> None:
# """Download analysis into specified file format.
#
# :param analysis_id: Anaylsis ID for which you download.
# :param dest_file_path: Destination path where you want to download the file.
# :param file_format: File format of file.
# :return: Downloaded file with content of the analysis
# """
# with open(dest_file_path, "wb") as f:
# for chunk in self._get_analysis_stream(analysis_id, file_format):
# f.write(chunk)
#
# def _get_analysis_stream(
# self,
# analysis_id: int,
# file_format: constants.AnalysisFormat
# ) -> Iterator[bytes]:
# """Process bytes return by API
#
# :param analysis_id: Anaylsis ID for which you download.
# :param file_format: File format of file.
# :return: Downloaded analysis file with iterator.
# """
# return self._get_stream("v1/analyses/{}/download".format(analysis_id), {
# "format": file_format.value,
# }).iter_content(constants.CHUNK_SIZE)
which might include code, classes, or functions. Output only the next line. | constants.HttpMethod.get.value, |
Predict the next line for this snippet: <|code_start|>
@patch.object(requests.Session, "request")
def test_create(self, mock_request: unittest.mock.Mock):
type(mock_request()).status_code = PropertyMock(return_value=200)
mock_request().json.return_value = {
"asyncRequests": [
{
"asyncRequest": {
"createdBy": {
"lastName": "test",
"id": 1,
"firstName": "admin",
"role": "ADMIN",
"email": "test@test.com",
"userName": "admin",
"active": True
},
"action": "PRE_ANALYSE",
"id": "1",
"dateCreated": "2014-11-03T16:03:11Z",
"asyncResponse": None
},
"analyse": {"id": "string"},
}
]
}
jobs = [1]
self.assertIsInstance(
Analysis(token="mock-token").create(jobs),
<|code_end|>
with the help of current file imports:
import requests
import unittest
from unittest.mock import patch, PropertyMock
from memsource import constants, models
from memsource.api_rest.analysis import Analysis
and context from other files:
# Path: memsource/constants.py
# class Base(enum.Enum):
# class JobStatus(enum.Enum):
# class ProjectStatus(enum.Enum):
# class AnalysisFormat(enum.Enum):
# class TermBaseFormat(enum.Enum):
# class ApiVersion(enum.Enum):
# class HttpMethod(enum.Enum):
# class BaseRest(enum.Enum):
# class JobStatusRest(enum.Enum):
# NEW = "New"
# EMAILED = "Emailed"
# ASSIGNED = "Assigned"
# DECLINED_BY_LINGUIST = "Declined_By_Linguist"
# COMPLETED_BY_LINGUIST = "Completed_By_Linguist"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# NEW = "New"
# ASSIGNED = "Assigned"
# COMPLETED = "Completed"
# CANCELLED = "Cancelled"
# ACCEPTED_BY_VENDOR = "Accepted_By_Vendor"
# DECLINED_BY_VENDOR = "Declined_By_Vendor"
# COMPLETED_BY_VENDOR = "Completed_By_Vendor"
# CSV = "CSV"
# LOG = "LOG"
# CSV_EXTENDED = "CSV_EXTENDED"
# XLSX = "XLSX"
# TBX = "TBX"
# NEW = "NEW"
# ACCEPTED = "ACCEPTED"
# DECLINED = "DECLINED"
# REJECTED = "REJECTED"
# DELIVERED = "DELIVERED"
# EMAILED = "EMAILED"
# COMPLETED = "COMPLETED"
# CANCELLED = "CANCELLED"
# CHUNK_SIZE = 1024
# CHAR_SET = "UTF-8"
# TM_THRESHOLD = 0.7
#
# Path: memsource/models.py
# class BaseModel(dict):
# class User(BaseModel):
# class Authentication(BaseModel):
# class Client(BaseModel):
# class Domain(BaseModel):
# class Language(BaseModel):
# class Project(BaseModel):
# class Job(BaseModel):
# class JobPart(BaseModel):
# class TranslationMemory(BaseModel):
# class AsynchronousRequest(BaseModel):
# class AsynchronousResponse(BaseModel):
# class Segment(BaseModel):
# class SegmentSearchResult(BaseModel):
# class Analysis(BaseModel):
# class MxliffUnit(BaseModel):
# class TermBase(BaseModel):
# def __getattr__(self, key):
# def _iso8601_to_datetime(self, source):
# def date_created(self):
# def __init__(self, source):
# def is_complete(self):
# def has_error(self):
#
# Path: memsource/api_rest/analysis.py
# class Analysis(api_rest.BaseApi):
# # Document: https://cloud.memsource.com/web/docs/api#tag/Analysis
#
# def get(self, analysis_id: int) -> models.Analysis:
# """Call get API.
#
# :param analysis_id: Get analysis of this id.
# :return: Result of analysis.
# """
# return models.Analysis(self._get("v3/analyses/{}".format(analysis_id)))
#
# def create(self, jobs: List[int]) -> models.AsynchronousRequest:
# """Create new analysis.
#
# :param jobs: Target of analysis.
# :return: Result of analysis.
# """
# return models.AsynchronousRequest(self._post("v2/analyses", {
# "jobs": [{"uid": job} for job in jobs],
# }))
#
# def delete(self, analysis_id: int, purge: bool=False) -> None:
# """Delete an analysis.
#
# :param analysis_id: Analysis ID you want to delete.
# :param purge:
# """
# self._delete("v1/analyses/{}".format(analysis_id), {"purge": purge})
#
# def get_by_project(self, project_id: str) -> List[models.Analysis]:
# """List Analyses By Project.
#
# :param project_id: Project ID for which you want to get the analyses.
# :return: List of Analyses.
# """
# project_analyses = self._get("v2/projects/{}/analyses".format(project_id))
# return [models.Analysis(analysis) for analysis in project_analyses["content"]]
#
# def download(
# self,
# analysis_id: int,
# dest_file_path: str,
# file_format: constants.AnalysisFormat=constants.AnalysisFormat.CSV,
# ) -> None:
# """Download analysis into specified file format.
#
# :param analysis_id: Anaylsis ID for which you download.
# :param dest_file_path: Destination path where you want to download the file.
# :param file_format: File format of file.
# :return: Downloaded file with content of the analysis
# """
# with open(dest_file_path, "wb") as f:
# for chunk in self._get_analysis_stream(analysis_id, file_format):
# f.write(chunk)
#
# def _get_analysis_stream(
# self,
# analysis_id: int,
# file_format: constants.AnalysisFormat
# ) -> Iterator[bytes]:
# """Process bytes return by API
#
# :param analysis_id: Anaylsis ID for which you download.
# :param file_format: File format of file.
# :return: Downloaded analysis file with iterator.
# """
# return self._get_stream("v1/analyses/{}/download".format(analysis_id), {
# "format": file_format.value,
# }).iter_content(constants.CHUNK_SIZE)
, which may contain function names, class names, or code. Output only the next line. | models.AsynchronousRequest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.