Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line after this snippet: <|code_start|>
HERE = os.path.dirname(os.path.abspath(__file__))
class TestSpore(TestCase):
def test_spore(self):
<|code_end|>
using the current file's imports:
from unittest import TestCase
from werkzeug.wrappers import BaseResponse
from werkzeug.test import Client
from rxjson import Rx
from rest_api_framework.controllers import WSGIDispatcher
from .app import ApiApp
import json
import os
and any relevant context from other files:
# Path: rest_api_framework/controllers.py
# class WSGIDispatcher(DispatcherMiddleware):
# """
# WSGIDispatcher take a list of :class:`.Controller` and mount them
# on their ressource mount point.
# basic syntax is:
#
# .. code-block:: python
#
# app = WSGIDispatcher([FirstApp, SecondApp])
# """
#
# def __init__(self, apps, name='PRAF', version='devel',
# base_url=None, formats=None, autodoc=True,
# autospore=True, hello=True):
# if formats is None:
# formats = []
# endpoints = {}
# for elem in apps:
# endpoints["/{0}".format(elem.ressource["ressource_name"])] = elem()
# if not formats:
# formats = ["json"]
# if autodoc:
# endpoints["/schema"] = self.make_schema(apps)
# if autospore:
# endpoints["/spore"] = self.make_spore(apps, name, base_url,
# version)
# if hello:
# endpoints["/"] = self.make_hello(name, version)
#
# app = NotFound()
# mounts = endpoints
# super(WSGIDispatcher, self).__init__(app, mounts=mounts)
#
# def make_schema(self, apps):
# return AutoDocGenerator(apps)
#
# def make_spore(self, apps, name, base_url, version):
# return AutoSporeGenerator(apps, name, base_url, version)
#
# def make_hello(self, name, version):
# return HelloGenerator(name, version)
#
# Path: tests/app.py
# class ApiApp(Controller):
# ressource = {
# "ressource_name": "address",
# "ressource": ressources,
# "model": ApiModel,
# "datastore": PythonListDataStore
# }
#
# controller = {
# "list_verbs": ["GET", "POST"],
# "unique_verbs": ["GET", "PUT", "DELETE"],
# "options": {"pagination": Pagination(20)}
# }
#
# view = {"response_class": JsonResponse}
. Output only the next line. | client = Client(WSGIDispatcher([ApiApp], name='ApiApp', version='1.0', |
Based on the snippet: <|code_start|>
HERE = os.path.dirname(os.path.abspath(__file__))
class TestSpore(TestCase):
def test_spore(self):
<|code_end|>
, predict the immediate next line with the help of imports:
from unittest import TestCase
from werkzeug.wrappers import BaseResponse
from werkzeug.test import Client
from rxjson import Rx
from rest_api_framework.controllers import WSGIDispatcher
from .app import ApiApp
import json
import os
and context (classes, functions, sometimes code) from other files:
# Path: rest_api_framework/controllers.py
# class WSGIDispatcher(DispatcherMiddleware):
# """
# WSGIDispatcher take a list of :class:`.Controller` and mount them
# on their ressource mount point.
# basic syntax is:
#
# .. code-block:: python
#
# app = WSGIDispatcher([FirstApp, SecondApp])
# """
#
# def __init__(self, apps, name='PRAF', version='devel',
# base_url=None, formats=None, autodoc=True,
# autospore=True, hello=True):
# if formats is None:
# formats = []
# endpoints = {}
# for elem in apps:
# endpoints["/{0}".format(elem.ressource["ressource_name"])] = elem()
# if not formats:
# formats = ["json"]
# if autodoc:
# endpoints["/schema"] = self.make_schema(apps)
# if autospore:
# endpoints["/spore"] = self.make_spore(apps, name, base_url,
# version)
# if hello:
# endpoints["/"] = self.make_hello(name, version)
#
# app = NotFound()
# mounts = endpoints
# super(WSGIDispatcher, self).__init__(app, mounts=mounts)
#
# def make_schema(self, apps):
# return AutoDocGenerator(apps)
#
# def make_spore(self, apps, name, base_url, version):
# return AutoSporeGenerator(apps, name, base_url, version)
#
# def make_hello(self, name, version):
# return HelloGenerator(name, version)
#
# Path: tests/app.py
# class ApiApp(Controller):
# ressource = {
# "ressource_name": "address",
# "ressource": ressources,
# "model": ApiModel,
# "datastore": PythonListDataStore
# }
#
# controller = {
# "list_verbs": ["GET", "POST"],
# "unique_verbs": ["GET", "PUT", "DELETE"],
# "options": {"pagination": Pagination(20)}
# }
#
# view = {"response_class": JsonResponse}
. Output only the next line. | client = Client(WSGIDispatcher([ApiApp], name='ApiApp', version='1.0', |
Given the code snippet: <|code_start|>ViConstString = _ctypes.POINTER(ViChar)
# Part Two: Type Assignments for VISA only, see spec table 3.1.2. The
# difference to the above is of no significance in Python, so I use it here
# only for easier synchronisation with the spec.
ViAccessMode, ViPAccessMode = _type_pair(ViUInt32)
ViBusAddress, ViPBusAddress = _type_pair(ViUInt32)
ViBusAddress64, ViPBusAddress64 = _type_pair(ViUInt64)
ViBusSize = ViUInt32
ViAttrState, ViPAttrState = _type_pair(ViUInt32)
# The following is weird, taken from news:zn2ek2w2.fsf@python.net
ViVAList = _ctypes.POINTER(_ctypes.c_char)
ViEventType, ViPEventType, ViAEventType = _type_triplet(ViUInt32)
ViPAttr = _ctypes.POINTER(ViAttr)
ViAAttr = ViPAttr
ViEventFilter = ViUInt32
ViFindList, ViPFindList = _type_pair(ViObject)
ViEvent, ViPEvent = _type_pair(ViObject)
ViJobId, ViPJobId = _type_pair(ViUInt32)
# Class of callback functions for event handling, first type is result type
<|code_end|>
, generate the next line using the imports in this file:
import ctypes as _ctypes
from .cthelper import FUNCTYPE
and context (functions, classes, or occasionally code) from other files:
# Path: pyvisa/ctwrapper/cthelper.py
# def define_find_libary():
# def _findlib_gcc(name):
# def _findlib_ldconfig(name):
# def _find_library(name):
. Output only the next line. | ViHndlr = FUNCTYPE(ViStatus, ViSession, ViEventType, ViEvent, ViAddr) |
Given the following code snippet before the placeholder: <|code_start|> if member.type == tarfile.DIRTYPE:
git_dirs.add(name)
else:
git_files.add(name)
return git_files, git_dirs
def _git_ls_files_and_dirs(toplevel):
# use git archive instead of git ls-file to honor
# export-ignore git attribute
cmd = ["git", "archive", "--prefix", toplevel + os.path.sep, "HEAD"]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, cwd=toplevel, stderr=subprocess.DEVNULL
)
try:
try:
return _git_interpret_archive(proc.stdout, toplevel)
finally:
# ensure we avoid resource warnings by cleaning up the process
proc.stdout.close()
proc.terminate()
except Exception:
if proc.wait() != 0:
log.error("listing git files failed - pretending there aren't any")
return (), ()
def git_find_files(path=""):
toplevel = _git_toplevel(path)
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
import subprocess
import tarfile
from .file_finder import is_toplevel_acceptable
from .file_finder import scm_find_files
from .utils import do_ex
from .utils import trace
and context including class names, function names, and sometimes code from other files:
# Path: src/setuptools_scm/file_finder.py
# def is_toplevel_acceptable(toplevel):
# """ """
# if toplevel is None:
# return False
#
# ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
# ignored = [os.path.normcase(p) for p in ignored]
#
# trace(toplevel, ignored)
#
# return toplevel not in ignored
#
# Path: src/setuptools_scm/file_finder.py
# def scm_find_files(path, scm_files, scm_dirs):
# """ setuptools compatible file finder that follows symlinks
#
# - path: the root directory from which to search
# - scm_files: set of scm controlled files and symlinks
# (including symlinks to directories)
# - scm_dirs: set of scm controlled directories
# (including directories containing no scm controlled files)
#
# scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
# with normalized case (normcase)
#
# Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
# adding-support-for-revision-control-systems
# """
# realpath = os.path.normcase(os.path.realpath(path))
# seen = set()
# res = []
# for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# # dirpath with symlinks resolved
# realdirpath = os.path.normcase(os.path.realpath(dirpath))
#
# def _link_not_in_scm(n):
# fn = os.path.join(realdirpath, os.path.normcase(n))
# return os.path.islink(fn) and fn not in scm_files
#
# if realdirpath not in scm_dirs:
# # directory not in scm, don't walk it's content
# dirnames[:] = []
# continue
# if os.path.islink(dirpath) and not os.path.relpath(
# realdirpath, realpath
# ).startswith(os.pardir):
# # a symlink to a directory not outside path:
# # we keep it in the result and don't walk its content
# res.append(os.path.join(path, os.path.relpath(dirpath, path)))
# dirnames[:] = []
# continue
# if realdirpath in seen:
# # symlink loop protection
# dirnames[:] = []
# continue
# dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
# for filename in filenames:
# if _link_not_in_scm(filename):
# continue
# # dirpath + filename with symlinks preserved
# fullfilename = os.path.join(dirpath, filename)
# if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
# res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
# seen.add(realdirpath)
# return res
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
#
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
. Output only the next line. | if not is_toplevel_acceptable(toplevel): |
Next line prediction: <|code_start|>
def _git_ls_files_and_dirs(toplevel):
# use git archive instead of git ls-file to honor
# export-ignore git attribute
cmd = ["git", "archive", "--prefix", toplevel + os.path.sep, "HEAD"]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, cwd=toplevel, stderr=subprocess.DEVNULL
)
try:
try:
return _git_interpret_archive(proc.stdout, toplevel)
finally:
# ensure we avoid resource warnings by cleaning up the process
proc.stdout.close()
proc.terminate()
except Exception:
if proc.wait() != 0:
log.error("listing git files failed - pretending there aren't any")
return (), ()
def git_find_files(path=""):
toplevel = _git_toplevel(path)
if not is_toplevel_acceptable(toplevel):
return []
fullpath = os.path.abspath(os.path.normpath(path))
if not fullpath.startswith(toplevel):
trace("toplevel mismatch", toplevel, fullpath)
git_files, git_dirs = _git_ls_files_and_dirs(toplevel)
<|code_end|>
. Use current file imports:
(import logging
import os
import subprocess
import tarfile
from .file_finder import is_toplevel_acceptable
from .file_finder import scm_find_files
from .utils import do_ex
from .utils import trace)
and context including class names, function names, or small code snippets from other files:
# Path: src/setuptools_scm/file_finder.py
# def is_toplevel_acceptable(toplevel):
# """ """
# if toplevel is None:
# return False
#
# ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
# ignored = [os.path.normcase(p) for p in ignored]
#
# trace(toplevel, ignored)
#
# return toplevel not in ignored
#
# Path: src/setuptools_scm/file_finder.py
# def scm_find_files(path, scm_files, scm_dirs):
# """ setuptools compatible file finder that follows symlinks
#
# - path: the root directory from which to search
# - scm_files: set of scm controlled files and symlinks
# (including symlinks to directories)
# - scm_dirs: set of scm controlled directories
# (including directories containing no scm controlled files)
#
# scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
# with normalized case (normcase)
#
# Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
# adding-support-for-revision-control-systems
# """
# realpath = os.path.normcase(os.path.realpath(path))
# seen = set()
# res = []
# for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# # dirpath with symlinks resolved
# realdirpath = os.path.normcase(os.path.realpath(dirpath))
#
# def _link_not_in_scm(n):
# fn = os.path.join(realdirpath, os.path.normcase(n))
# return os.path.islink(fn) and fn not in scm_files
#
# if realdirpath not in scm_dirs:
# # directory not in scm, don't walk it's content
# dirnames[:] = []
# continue
# if os.path.islink(dirpath) and not os.path.relpath(
# realdirpath, realpath
# ).startswith(os.pardir):
# # a symlink to a directory not outside path:
# # we keep it in the result and don't walk its content
# res.append(os.path.join(path, os.path.relpath(dirpath, path)))
# dirnames[:] = []
# continue
# if realdirpath in seen:
# # symlink loop protection
# dirnames[:] = []
# continue
# dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
# for filename in filenames:
# if _link_not_in_scm(filename):
# continue
# # dirpath + filename with symlinks preserved
# fullfilename = os.path.join(dirpath, filename)
# if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
# res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
# seen.add(realdirpath)
# return res
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
#
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
. Output only the next line. | return scm_find_files(path, git_files, git_dirs) |
Here is a snippet: <|code_start|>
log = logging.getLogger(__name__)
def _git_toplevel(path):
try:
cwd = os.path.abspath(path or ".")
<|code_end|>
. Write the next line using the current file imports:
import logging
import os
import subprocess
import tarfile
from .file_finder import is_toplevel_acceptable
from .file_finder import scm_find_files
from .utils import do_ex
from .utils import trace
and context from other files:
# Path: src/setuptools_scm/file_finder.py
# def is_toplevel_acceptable(toplevel):
# """ """
# if toplevel is None:
# return False
#
# ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
# ignored = [os.path.normcase(p) for p in ignored]
#
# trace(toplevel, ignored)
#
# return toplevel not in ignored
#
# Path: src/setuptools_scm/file_finder.py
# def scm_find_files(path, scm_files, scm_dirs):
# """ setuptools compatible file finder that follows symlinks
#
# - path: the root directory from which to search
# - scm_files: set of scm controlled files and symlinks
# (including symlinks to directories)
# - scm_dirs: set of scm controlled directories
# (including directories containing no scm controlled files)
#
# scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
# with normalized case (normcase)
#
# Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
# adding-support-for-revision-control-systems
# """
# realpath = os.path.normcase(os.path.realpath(path))
# seen = set()
# res = []
# for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# # dirpath with symlinks resolved
# realdirpath = os.path.normcase(os.path.realpath(dirpath))
#
# def _link_not_in_scm(n):
# fn = os.path.join(realdirpath, os.path.normcase(n))
# return os.path.islink(fn) and fn not in scm_files
#
# if realdirpath not in scm_dirs:
# # directory not in scm, don't walk it's content
# dirnames[:] = []
# continue
# if os.path.islink(dirpath) and not os.path.relpath(
# realdirpath, realpath
# ).startswith(os.pardir):
# # a symlink to a directory not outside path:
# # we keep it in the result and don't walk its content
# res.append(os.path.join(path, os.path.relpath(dirpath, path)))
# dirnames[:] = []
# continue
# if realdirpath in seen:
# # symlink loop protection
# dirnames[:] = []
# continue
# dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
# for filename in filenames:
# if _link_not_in_scm(filename):
# continue
# # dirpath + filename with symlinks preserved
# fullfilename = os.path.join(dirpath, filename)
# if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
# res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
# seen.add(realdirpath)
# return res
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
#
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
, which may include functions, classes, or code. Output only the next line. | out, err, ret = do_ex(["git", "rev-parse", "HEAD"], cwd=cwd) |
Given the following code snippet before the placeholder: <|code_start|>log = logging.getLogger(__name__)
def _git_toplevel(path):
try:
cwd = os.path.abspath(path or ".")
out, err, ret = do_ex(["git", "rev-parse", "HEAD"], cwd=cwd)
if ret != 0:
# BAIL if there is no commit
log.error("listing git files failed - pretending there aren't any")
return None
out, err, ret = do_ex(
["git", "rev-parse", "--show-prefix"],
cwd=cwd,
)
if ret != 0:
return None
out = out.strip()[:-1] # remove the trailing pathsep
if not out:
out = cwd
else:
# Here, ``out`` is a relative path to root of git.
# ``cwd`` is absolute path to current working directory.
# the below method removes the length of ``out`` from
# ``cwd``, which gives the git toplevel
assert cwd.replace("\\", "/").endswith(out), f"cwd={cwd!r}\nout={out!r}"
# In windows cwd contains ``\`` which should be replaced by ``/``
# for this assertion to work. Length of string isn't changed by replace
# ``\\`` is just and escape for `\`
out = cwd[: -len(out)]
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
import subprocess
import tarfile
from .file_finder import is_toplevel_acceptable
from .file_finder import scm_find_files
from .utils import do_ex
from .utils import trace
and context including class names, function names, and sometimes code from other files:
# Path: src/setuptools_scm/file_finder.py
# def is_toplevel_acceptable(toplevel):
# """ """
# if toplevel is None:
# return False
#
# ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
# ignored = [os.path.normcase(p) for p in ignored]
#
# trace(toplevel, ignored)
#
# return toplevel not in ignored
#
# Path: src/setuptools_scm/file_finder.py
# def scm_find_files(path, scm_files, scm_dirs):
# """ setuptools compatible file finder that follows symlinks
#
# - path: the root directory from which to search
# - scm_files: set of scm controlled files and symlinks
# (including symlinks to directories)
# - scm_dirs: set of scm controlled directories
# (including directories containing no scm controlled files)
#
# scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
# with normalized case (normcase)
#
# Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
# adding-support-for-revision-control-systems
# """
# realpath = os.path.normcase(os.path.realpath(path))
# seen = set()
# res = []
# for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# # dirpath with symlinks resolved
# realdirpath = os.path.normcase(os.path.realpath(dirpath))
#
# def _link_not_in_scm(n):
# fn = os.path.join(realdirpath, os.path.normcase(n))
# return os.path.islink(fn) and fn not in scm_files
#
# if realdirpath not in scm_dirs:
# # directory not in scm, don't walk it's content
# dirnames[:] = []
# continue
# if os.path.islink(dirpath) and not os.path.relpath(
# realdirpath, realpath
# ).startswith(os.pardir):
# # a symlink to a directory not outside path:
# # we keep it in the result and don't walk its content
# res.append(os.path.join(path, os.path.relpath(dirpath, path)))
# dirnames[:] = []
# continue
# if realdirpath in seen:
# # symlink loop protection
# dirnames[:] = []
# continue
# dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
# for filename in filenames:
# if _link_not_in_scm(filename):
# continue
# # dirpath + filename with symlinks preserved
# fullfilename = os.path.join(dirpath, filename)
# if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
# res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
# seen.add(realdirpath)
# return res
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
#
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
. Output only the next line. | trace("find files toplevel", out) |
Using the snippet: <|code_start|>
def parse_pkginfo(root, config=None):
pkginfo = os.path.join(root, "PKG-INFO")
trace("pkginfo", pkginfo)
<|code_end|>
, determine the next line of code. You have imports:
import os
from .utils import data_from_mime
from .utils import trace
from .version import meta
from .version import tag_to_version
and context (class names, function names, or code) available:
# Path: src/setuptools_scm/utils.py
# def data_from_mime(path):
# with open(path, encoding="utf-8") as fp:
# content = fp.read()
# trace("content", repr(content))
# # the complex conditions come from reading pseudo-mime-messages
# data = dict(x.split(": ", 1) for x in content.splitlines() if ": " in x)
# trace("data", data)
# return data
#
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
#
# Path: src/setuptools_scm/version.py
# def meta(
# tag,
# distance: "int|None" = None,
# dirty: bool = False,
# node: "str|None" = None,
# preformatted: bool = False,
# branch: "str|None" = None,
# config: "Configuration|None" = None,
# **kw,
# ) -> ScmVersion:
# if not config:
# warnings.warn(
# "meta invoked without explicit configuration,"
# " will use defaults where required."
# )
# parsed_version = _parse_tag(tag, preformatted, config)
# trace("version", tag, "->", parsed_version)
# assert parsed_version is not None, "Can't parse version %s" % tag
# return ScmVersion(
# parsed_version, distance, node, dirty, preformatted, branch, config, **kw
# )
#
# Path: src/setuptools_scm/version.py
# def tag_to_version(tag, config: "Configuration | None" = None):
# """
# take a tag that might be prefixed with a keyword and return only the version part
# :param config: optional configuration object
# """
# trace("tag", tag)
#
# if not config:
# config = Configuration()
#
# tagdict = _parse_version_tag(tag, config)
# if not isinstance(tagdict, dict) or not tagdict.get("version", None):
# warnings.warn(f"tag {tag!r} no version found")
# return None
#
# version = tagdict["version"]
# trace("version pre parse", version)
#
# if tagdict.get("suffix", ""):
# warnings.warn(
# "tag {!r} will be stripped of its suffix '{}'".format(
# tag, tagdict["suffix"]
# )
# )
#
# version = config.version_cls(version)
# trace("version", repr(version))
#
# return version
. Output only the next line. | data = data_from_mime(pkginfo) |
Here is a snippet: <|code_start|>
def parse_pkginfo(root, config=None):
pkginfo = os.path.join(root, "PKG-INFO")
trace("pkginfo", pkginfo)
data = data_from_mime(pkginfo)
version = data.get("Version")
if version != "UNKNOWN":
<|code_end|>
. Write the next line using the current file imports:
import os
from .utils import data_from_mime
from .utils import trace
from .version import meta
from .version import tag_to_version
and context from other files:
# Path: src/setuptools_scm/utils.py
# def data_from_mime(path):
# with open(path, encoding="utf-8") as fp:
# content = fp.read()
# trace("content", repr(content))
# # the complex conditions come from reading pseudo-mime-messages
# data = dict(x.split(": ", 1) for x in content.splitlines() if ": " in x)
# trace("data", data)
# return data
#
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
#
# Path: src/setuptools_scm/version.py
# def meta(
# tag,
# distance: "int|None" = None,
# dirty: bool = False,
# node: "str|None" = None,
# preformatted: bool = False,
# branch: "str|None" = None,
# config: "Configuration|None" = None,
# **kw,
# ) -> ScmVersion:
# if not config:
# warnings.warn(
# "meta invoked without explicit configuration,"
# " will use defaults where required."
# )
# parsed_version = _parse_tag(tag, preformatted, config)
# trace("version", tag, "->", parsed_version)
# assert parsed_version is not None, "Can't parse version %s" % tag
# return ScmVersion(
# parsed_version, distance, node, dirty, preformatted, branch, config, **kw
# )
#
# Path: src/setuptools_scm/version.py
# def tag_to_version(tag, config: "Configuration | None" = None):
# """
# take a tag that might be prefixed with a keyword and return only the version part
# :param config: optional configuration object
# """
# trace("tag", tag)
#
# if not config:
# config = Configuration()
#
# tagdict = _parse_version_tag(tag, config)
# if not isinstance(tagdict, dict) or not tagdict.get("version", None):
# warnings.warn(f"tag {tag!r} no version found")
# return None
#
# version = tagdict["version"]
# trace("version pre parse", version)
#
# if tagdict.get("suffix", ""):
# warnings.warn(
# "tag {!r} will be stripped of its suffix '{}'".format(
# tag, tagdict["suffix"]
# )
# )
#
# version = config.version_cls(version)
# trace("version", repr(version))
#
# return version
, which may include functions, classes, or code. Output only the next line. | return meta(version, preformatted=True, config=config) |
Here is a snippet: <|code_start|>
def parse_pkginfo(root, config=None):
pkginfo = os.path.join(root, "PKG-INFO")
trace("pkginfo", pkginfo)
data = data_from_mime(pkginfo)
version = data.get("Version")
if version != "UNKNOWN":
return meta(version, preformatted=True, config=config)
def parse_pip_egg_info(root, config=None):
pipdir = os.path.join(root, "pip-egg-info")
if not os.path.isdir(pipdir):
return
items = os.listdir(pipdir)
trace("pip-egg-info", pipdir, items)
if not items:
return
return parse_pkginfo(os.path.join(pipdir, items[0]), config=config)
def fallback_version(root, config=None):
if config.parentdir_prefix_version is not None:
_, parent_name = os.path.split(os.path.abspath(root))
if parent_name.startswith(config.parentdir_prefix_version):
<|code_end|>
. Write the next line using the current file imports:
import os
from .utils import data_from_mime
from .utils import trace
from .version import meta
from .version import tag_to_version
and context from other files:
# Path: src/setuptools_scm/utils.py
# def data_from_mime(path):
# with open(path, encoding="utf-8") as fp:
# content = fp.read()
# trace("content", repr(content))
# # the complex conditions come from reading pseudo-mime-messages
# data = dict(x.split(": ", 1) for x in content.splitlines() if ": " in x)
# trace("data", data)
# return data
#
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
#
# Path: src/setuptools_scm/version.py
# def meta(
# tag,
# distance: "int|None" = None,
# dirty: bool = False,
# node: "str|None" = None,
# preformatted: bool = False,
# branch: "str|None" = None,
# config: "Configuration|None" = None,
# **kw,
# ) -> ScmVersion:
# if not config:
# warnings.warn(
# "meta invoked without explicit configuration,"
# " will use defaults where required."
# )
# parsed_version = _parse_tag(tag, preformatted, config)
# trace("version", tag, "->", parsed_version)
# assert parsed_version is not None, "Can't parse version %s" % tag
# return ScmVersion(
# parsed_version, distance, node, dirty, preformatted, branch, config, **kw
# )
#
# Path: src/setuptools_scm/version.py
# def tag_to_version(tag, config: "Configuration | None" = None):
# """
# take a tag that might be prefixed with a keyword and return only the version part
# :param config: optional configuration object
# """
# trace("tag", tag)
#
# if not config:
# config = Configuration()
#
# tagdict = _parse_version_tag(tag, config)
# if not isinstance(tagdict, dict) or not tagdict.get("version", None):
# warnings.warn(f"tag {tag!r} no version found")
# return None
#
# version = tagdict["version"]
# trace("version pre parse", version)
#
# if tagdict.get("suffix", ""):
# warnings.warn(
# "tag {!r} will be stripped of its suffix '{}'".format(
# tag, tagdict["suffix"]
# )
# )
#
# version = config.version_cls(version)
# trace("version", repr(version))
#
# return version
, which may include functions, classes, or code. Output only the next line. | version = tag_to_version( |
Given the code snippet: <|code_start|>
class Workdir:
def __init__(self, path):
require_command(self.COMMAND)
self.path = path
def do_ex(self, cmd):
return do_ex(cmd, cwd=self.path)
<|code_end|>
, generate the next line using the imports in this file:
from .utils import do
from .utils import do_ex
from .utils import require_command
and context (functions, classes, or occasionally code) from other files:
# Path: src/setuptools_scm/utils.py
# def do(cmd, cwd="."):
# out, err, ret = do_ex(cmd, cwd)
# if ret:
# print(err)
# return out
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
#
# Path: src/setuptools_scm/utils.py
# def require_command(name):
# if not has_command(name, warn=False):
# raise OSError("%r was not found" % name)
. Output only the next line. | def do(self, cmd): |
Given snippet: <|code_start|>
class Workdir:
def __init__(self, path):
require_command(self.COMMAND)
self.path = path
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .utils import do
from .utils import do_ex
from .utils import require_command
and context:
# Path: src/setuptools_scm/utils.py
# def do(cmd, cwd="."):
# out, err, ret = do_ex(cmd, cwd)
# if ret:
# print(err)
# return out
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
#
# Path: src/setuptools_scm/utils.py
# def require_command(name):
# if not has_command(name, warn=False):
# raise OSError("%r was not found" % name)
which might include code, classes, or functions. Output only the next line. | def do_ex(self, cmd): |
Predict the next line for this snippet: <|code_start|> ).startswith(os.pardir):
# a symlink to a directory not outside path:
# we keep it in the result and don't walk its content
res.append(os.path.join(path, os.path.relpath(dirpath, path)))
dirnames[:] = []
continue
if realdirpath in seen:
# symlink loop protection
dirnames[:] = []
continue
dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
for filename in filenames:
if _link_not_in_scm(filename):
continue
# dirpath + filename with symlinks preserved
fullfilename = os.path.join(dirpath, filename)
if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
seen.add(realdirpath)
return res
def is_toplevel_acceptable(toplevel):
""" """
if toplevel is None:
return False
ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
ignored = [os.path.normcase(p) for p in ignored]
<|code_end|>
with the help of current file imports:
import os
from .utils import trace
and context from other files:
# Path: src/setuptools_scm/utils.py
# def trace(*k) -> None:
# if DEBUG:
# print(*k, file=sys.stderr, flush=True)
, which may contain function names, class names, or code. Output only the next line. | trace(toplevel, ignored) |
Next line prediction: <|code_start|> stderr=devnull,
)
return os.path.normcase(os.path.realpath(out.strip()))
except subprocess.CalledProcessError:
# hg returned error, we are not in a mercurial repo
return None
except OSError:
# hg command not found, probably
return None
def _hg_ls_files_and_dirs(toplevel):
hg_files = set()
hg_dirs = {toplevel}
out, err, ret = do_ex(["hg", "files"], cwd=toplevel)
if ret:
(), ()
for name in out.splitlines():
name = os.path.normcase(name).replace("/", os.path.sep)
fullname = os.path.join(toplevel, name)
hg_files.add(fullname)
dirname = os.path.dirname(fullname)
while len(dirname) > len(toplevel) and dirname not in hg_dirs:
hg_dirs.add(dirname)
dirname = os.path.dirname(dirname)
return hg_files, hg_dirs
def hg_find_files(path=""):
toplevel = _hg_toplevel(path)
<|code_end|>
. Use current file imports:
(import os
import subprocess
from .file_finder import is_toplevel_acceptable
from .file_finder import scm_find_files
from .utils import do_ex)
and context including class names, function names, or small code snippets from other files:
# Path: src/setuptools_scm/file_finder.py
# def is_toplevel_acceptable(toplevel):
# """ """
# if toplevel is None:
# return False
#
# ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
# ignored = [os.path.normcase(p) for p in ignored]
#
# trace(toplevel, ignored)
#
# return toplevel not in ignored
#
# Path: src/setuptools_scm/file_finder.py
# def scm_find_files(path, scm_files, scm_dirs):
# """ setuptools compatible file finder that follows symlinks
#
# - path: the root directory from which to search
# - scm_files: set of scm controlled files and symlinks
# (including symlinks to directories)
# - scm_dirs: set of scm controlled directories
# (including directories containing no scm controlled files)
#
# scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
# with normalized case (normcase)
#
# Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
# adding-support-for-revision-control-systems
# """
# realpath = os.path.normcase(os.path.realpath(path))
# seen = set()
# res = []
# for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# # dirpath with symlinks resolved
# realdirpath = os.path.normcase(os.path.realpath(dirpath))
#
# def _link_not_in_scm(n):
# fn = os.path.join(realdirpath, os.path.normcase(n))
# return os.path.islink(fn) and fn not in scm_files
#
# if realdirpath not in scm_dirs:
# # directory not in scm, don't walk it's content
# dirnames[:] = []
# continue
# if os.path.islink(dirpath) and not os.path.relpath(
# realdirpath, realpath
# ).startswith(os.pardir):
# # a symlink to a directory not outside path:
# # we keep it in the result and don't walk its content
# res.append(os.path.join(path, os.path.relpath(dirpath, path)))
# dirnames[:] = []
# continue
# if realdirpath in seen:
# # symlink loop protection
# dirnames[:] = []
# continue
# dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
# for filename in filenames:
# if _link_not_in_scm(filename):
# continue
# # dirpath + filename with symlinks preserved
# fullfilename = os.path.join(dirpath, filename)
# if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
# res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
# seen.add(realdirpath)
# return res
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
. Output only the next line. | if not is_toplevel_acceptable(toplevel): |
Based on the snippet: <|code_start|> except subprocess.CalledProcessError:
# hg returned error, we are not in a mercurial repo
return None
except OSError:
# hg command not found, probably
return None
def _hg_ls_files_and_dirs(toplevel):
hg_files = set()
hg_dirs = {toplevel}
out, err, ret = do_ex(["hg", "files"], cwd=toplevel)
if ret:
(), ()
for name in out.splitlines():
name = os.path.normcase(name).replace("/", os.path.sep)
fullname = os.path.join(toplevel, name)
hg_files.add(fullname)
dirname = os.path.dirname(fullname)
while len(dirname) > len(toplevel) and dirname not in hg_dirs:
hg_dirs.add(dirname)
dirname = os.path.dirname(dirname)
return hg_files, hg_dirs
def hg_find_files(path=""):
toplevel = _hg_toplevel(path)
if not is_toplevel_acceptable(toplevel):
return []
hg_files, hg_dirs = _hg_ls_files_and_dirs(toplevel)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import subprocess
from .file_finder import is_toplevel_acceptable
from .file_finder import scm_find_files
from .utils import do_ex
and context (classes, functions, sometimes code) from other files:
# Path: src/setuptools_scm/file_finder.py
# def is_toplevel_acceptable(toplevel):
# """ """
# if toplevel is None:
# return False
#
# ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
# ignored = [os.path.normcase(p) for p in ignored]
#
# trace(toplevel, ignored)
#
# return toplevel not in ignored
#
# Path: src/setuptools_scm/file_finder.py
# def scm_find_files(path, scm_files, scm_dirs):
# """ setuptools compatible file finder that follows symlinks
#
# - path: the root directory from which to search
# - scm_files: set of scm controlled files and symlinks
# (including symlinks to directories)
# - scm_dirs: set of scm controlled directories
# (including directories containing no scm controlled files)
#
# scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
# with normalized case (normcase)
#
# Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
# adding-support-for-revision-control-systems
# """
# realpath = os.path.normcase(os.path.realpath(path))
# seen = set()
# res = []
# for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# # dirpath with symlinks resolved
# realdirpath = os.path.normcase(os.path.realpath(dirpath))
#
# def _link_not_in_scm(n):
# fn = os.path.join(realdirpath, os.path.normcase(n))
# return os.path.islink(fn) and fn not in scm_files
#
# if realdirpath not in scm_dirs:
# # directory not in scm, don't walk it's content
# dirnames[:] = []
# continue
# if os.path.islink(dirpath) and not os.path.relpath(
# realdirpath, realpath
# ).startswith(os.pardir):
# # a symlink to a directory not outside path:
# # we keep it in the result and don't walk its content
# res.append(os.path.join(path, os.path.relpath(dirpath, path)))
# dirnames[:] = []
# continue
# if realdirpath in seen:
# # symlink loop protection
# dirnames[:] = []
# continue
# dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
# for filename in filenames:
# if _link_not_in_scm(filename):
# continue
# # dirpath + filename with symlinks preserved
# fullfilename = os.path.join(dirpath, filename)
# if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
# res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
# seen.add(realdirpath)
# return res
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
. Output only the next line. | return scm_find_files(path, hg_files, hg_dirs) |
Next line prediction: <|code_start|>
def _hg_toplevel(path):
try:
with open(os.devnull, "wb") as devnull:
out = subprocess.check_output(
["hg", "root"],
cwd=(path or "."),
universal_newlines=True,
stderr=devnull,
)
return os.path.normcase(os.path.realpath(out.strip()))
except subprocess.CalledProcessError:
# hg returned error, we are not in a mercurial repo
return None
except OSError:
# hg command not found, probably
return None
def _hg_ls_files_and_dirs(toplevel):
hg_files = set()
hg_dirs = {toplevel}
<|code_end|>
. Use current file imports:
(import os
import subprocess
from .file_finder import is_toplevel_acceptable
from .file_finder import scm_find_files
from .utils import do_ex)
and context including class names, function names, or small code snippets from other files:
# Path: src/setuptools_scm/file_finder.py
# def is_toplevel_acceptable(toplevel):
# """ """
# if toplevel is None:
# return False
#
# ignored = os.environ.get("SETUPTOOLS_SCM_IGNORE_VCS_ROOTS", "").split(os.pathsep)
# ignored = [os.path.normcase(p) for p in ignored]
#
# trace(toplevel, ignored)
#
# return toplevel not in ignored
#
# Path: src/setuptools_scm/file_finder.py
# def scm_find_files(path, scm_files, scm_dirs):
# """ setuptools compatible file finder that follows symlinks
#
# - path: the root directory from which to search
# - scm_files: set of scm controlled files and symlinks
# (including symlinks to directories)
# - scm_dirs: set of scm controlled directories
# (including directories containing no scm controlled files)
#
# scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
# with normalized case (normcase)
#
# Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
# adding-support-for-revision-control-systems
# """
# realpath = os.path.normcase(os.path.realpath(path))
# seen = set()
# res = []
# for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# # dirpath with symlinks resolved
# realdirpath = os.path.normcase(os.path.realpath(dirpath))
#
# def _link_not_in_scm(n):
# fn = os.path.join(realdirpath, os.path.normcase(n))
# return os.path.islink(fn) and fn not in scm_files
#
# if realdirpath not in scm_dirs:
# # directory not in scm, don't walk it's content
# dirnames[:] = []
# continue
# if os.path.islink(dirpath) and not os.path.relpath(
# realdirpath, realpath
# ).startswith(os.pardir):
# # a symlink to a directory not outside path:
# # we keep it in the result and don't walk its content
# res.append(os.path.join(path, os.path.relpath(dirpath, path)))
# dirnames[:] = []
# continue
# if realdirpath in seen:
# # symlink loop protection
# dirnames[:] = []
# continue
# dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
# for filename in filenames:
# if _link_not_in_scm(filename):
# continue
# # dirpath + filename with symlinks preserved
# fullfilename = os.path.join(dirpath, filename)
# if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
# res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
# seen.add(realdirpath)
# return res
#
# Path: src/setuptools_scm/utils.py
# def do_ex(cmd, cwd="."):
# trace("cmd", repr(cmd))
# trace(" in", cwd)
# if os.name == "posix" and not isinstance(cmd, (list, tuple)):
# cmd = shlex.split(cmd)
#
# p = _popen_pipes(cmd, cwd)
# out, err = p.communicate()
# if out:
# trace("out", repr(out))
# if err:
# trace("err", repr(err))
# if p.returncode:
# trace("ret", p.returncode)
# return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
. Output only the next line. | out, err, ret = do_ex(["hg", "files"], cwd=toplevel) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test code for envVocabulary for SmartContainers.
This module provides tests for the ComputationalEnvironment ontology.
"""
tstuuid = str(uuid.uuid4())
uuidurn = "urn:uuid:"+tstuuid
def test_create_graph():
<|code_end|>
, generate the next line using the imports in this file:
import json
import pytest
import uuid
from rdflib import Namespace, URIRef, RDF, RDFS, Literal
from sc import baseVocabulary
from sc import envVocabulary
and context (functions, classes, or occasionally code) from other files:
# Path: sc/baseVocabulary.py
# class baseVocabulary(object):
# __metaclass__ = ABCMeta
# """
# Any class that will inherits from BaseRegisteredClass will be included
# inside the dict RegistryHolder.REGISTRY, the key being the name of the
# class and the associated value, the class itself.
# """
# graph = rdflib.Dataset(default_union=True)
# context = {}
# namespace = []
# @abstractmethod
# def build(self):
# pass
#
# Path: sc/envVocabulary.py
# class envVocabulary(baseVocabulary):
#
# def __init__(self):
# pass
#
# def build(self):
# ds = self.graph
# self.context = {"ce":
# "https://raw.githubusercontent.com/Vocamp/ComputationalActivity/master/pattern/ComputationalEnvironment.jsonld"}
#
# CE = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalEnvironment#")
# CA = Namespace("http://dase.cs.wright.edu/ontologies/ComputationalActivity#")
# DOCKER = Namespace("http://w3id.org/daspos/docker#")
# info = cpuinfo.get_cpu_info()
#
# # ISSUES: We want if the architecture URI's to be created only once on
# # build or initial commit. Otherwise, we want to re-read the URI's
# # from the original graph. There are imm
#
# ds.bind("ce", CE)
# ceuri = URIRef(str(uuid.uuid4()))
# ds.add((ceuri, RDF.type, CE.ComputationalEnvironment))
#
# osUri = URIRef(str(uuid.uuid4()))
# ds.add((ceuri, CE.hasOperatingSystem, osUri))
# ds.add((osUri, RDFS.label, Literal("linux")))
#
# processorUri = URIRef(str(uuid.uuid4()))
# ds.add((ceuri, CE.hasHardware, processorUri))
#
# archUri = URIRef(str(uuid.uuid4()))
# ds.add((processorUri, CE.hasArchitecture, archUri))
# ds.add((archUri, RDFS.label, Literal("amd64")))
# ds.add((processorUri, CE.hasNumberOfCores,
# Literal("4", datatype=XSD.nonNegativeInteger)))
#
# # :hasArchitecture
# # :hasNumberOfCores
# # :hasOperatingSystem
# # :hasSize Memory or HD
# # :isAvailable
# # :VirtualMACAddress
. Output only the next line. | vocab = envVocabulary.envVocabulary() |
Here is a snippet: <|code_start|>
@pytest.fixture(scope="session")
def createClient():
myclient = None
# Find docker-machine environment variables
docker_host = os.getenv("DOCKER_HOST")
docker_cert_path = os.getenv("DOCKER_CERT_PATH")
docker_machine_name = os.getenv("DOCKER_MACHINE_NAME")
# Look for linux docker socket file
path = "/var/run/docker.sock"
isSocket = False
if os.path.exists(path):
mode = os.stat(path).st_mode
isSocket = stat.S_ISSOCK(mode)
if isSocket:
docker_socket_file ="unix://"+path
<|code_end|>
. Write the next line using the current file imports:
import os
import stat
import pytest
from sc import client
from docker import tls
and context from other files:
# Path: sc/client.py
# class scClient(docker.Client):
# def __init__(self, *args, **kwargs):
# def commit(self, container, *args, **kwargs):
# def build(self, *args, **kwargs):
# def put_label_image(self, image, label, *args, **kwargs):
# def fileCopyOut(self, containerid, filename, path):
# def fileCopyIn(self, containerid, filename, path):
# def hasProv(self, containerid, filename, path):
# def simple_tar(self, path):
# def infect_image(self, image, *args, **kwargs):
# def get_label_image(self, imageID):
# def get_label_container(self, containerID):
# BP = buildProcessor.buildProcessor()
, which may include functions, classes, or code. Output only the next line. | myclient = client.scClient(base_url=docker_socket_file, version="auto") |
Using the snippet: <|code_start|>
@pytest.fixture
def runner():
return CliRunner()
def test_cli(runner):
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from click.testing import CliRunner
from sc import cli
and context (class names, function names, or code) available:
# Path: sc/cli.py
# @click.group()
# @click.version_option()
# @click.pass_context
# def cli(ctx):
# """Smartcontainers for software and data preservation.
# Smartcontainers provides a mechanism to add metadata to Docker
# containers as a JSON-LD label. The metadata is contextualized using
# W3C recommended PROV-O and ORCID IDs to capture provenance information.
# The sc command wraps the docker commandline interface and passes any
# docker command line parameters through to docker. Any command that changes
# the state of the container is recorded in a prov graph and attached to the resultant
# image.
# """
#
# # Ignore config loading if we intend to create an orcid config
# if ctx.args[0] == "config" and ctx.args[1] == "orcid":
# return
#
# Success = False
# while not Success:
# result = config_file.read_config()
# if 'Configuration does not exist.' in result:
# print("User configuration needs to be initialized")
# selected = None
# while not selected:
# try:
# selected = click.prompt('Do you have an ORCID profile (Y/N)')
# if selected.lower() == 'y' or selected.lower() == 'yes':
# config_by_search()
# continue
#
# if selected.lower() == 'n' or selected.lower() == 'no':
# print("Please provide some basic information:")
# query = {
# 'first_name': click.prompt(
# 'Please enter a first name', default='',
# show_default=False
# ),
# 'last_name': click.prompt(
# 'Please enter a last name', default='',
# show_default=False
# )
# }
# dockerUseruuid = str(uuid.uuid4())
# UUIDNS = Namespace("urn:uuid:")
# config_file.graph.bind("foaf", FOAF)
# config_file.graph.add( ( UUIDNS[dockerUseruuid], FOAF.givenName, Literal(query['first_name']) ) )
# config_file.graph.add( ( UUIDNS[dockerUseruuid], FOAF.familyName, Literal(query['last_name']) ) )
#
# config_file.config_obj = config_file.graph.serialize(format='turtle')
# config_file.write_config()
# except KeyError:
# print('That is not a valid selection. Please try again.\n')
# else:
# Success = True
# graph = config_file.graph
. Output only the next line. | result = runner.invoke(cli.cli) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""Tests for Smart Containers buildProcessor.
Tests for Smart Containers buildProcessor.
This module provides functions for processing Build commands.
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
class BuildProcessorTestCase(unittest.TestCase):
def setUp(self):
self.processors = [
<|code_end|>
, determine the next line of code. You have imports:
import os
import unittest
from sc import buildProcessor
and context (class names, function names, or code) available:
# Path: sc/buildProcessor.py
# class buildProcessor:
#
# def __init__(self):
# self.PU = parsingUtility.parsingUtility()
# self.backslashChar = '\\'
#
# def processDF(self, path):
# #Processes the dockerfile at path
# try:
# #Open the file for reading
# with open(path, 'r') as DF:
# #Initialize the cmd variable
# cmd = ''
# #Read the first line in the file
# line = DF.readline()
# #So long as there are lines to read in the file,
# #process each line.
# while line:
# #Trim off the line feed character
# line = line[:-1]
# #If the line is empty, read the next line
# if line == '':
# line = DF.readline()
# #If the line ends with a backslash, the command is continued
# #across mulitple lines
# elif line.endswith(self.backslashChar):
# #Remove the backslash character, and then:
# #Append the command fragment to the cmd variable
# #This will occur repeatedly until the entire command
# #is assembled
# cmd += line.replace(self.backslashChar, '')
# #Read teh next line
# line = DF.readline()
# #If the cmd variable is not empty and we have a line without
# #a backslash at the end, then the multi-line command is complete.
# elif cmd != '':
# #Append the line to the cmd variable
# cmd += line
# #Send the completed command for parsing
# self.PU.parseCommand(cmd)
# #Re-initialize the cmd variable
# cmd = ''
# #Read the next line in the file
# line = DF.readline()
# #If the cmd variable is empty and we have a line without
# #a backslash at the end, then we have a complete command to process.
# elif cmd == '':
# #Send the command off for parsing
# self.PU.parseCommand(line)
# #Read the next line in the file
# line = DF.readline()
# else:
# #We should never be here!
# print 'Problem processing DockerFile'
# return 0
# except Exception as e:
# return 1
#
# def processFO(self, fileobj):
# #Processes a fileobject passed in
# pass
. Output only the next line. | buildProcessor.buildProcessor(), |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""Test code for RDFlib Graph Factory for SmartContainers.
This module provides a common interface to all RDFlib graphs created by all
vocabularies. New vocabularies should subclass graphFactory.
"""
tstuuid = str(uuid.uuid4())
uuidurn = "urn:uuid:"+tstuuid
<|code_end|>
. Use current file imports:
(import json
import pytest
import uuid
from rdflib import Namespace, URIRef, RDF, RDFS, Literal
from sc import baseVocabulary
from sc import graphManager)
and context including class names, function names, or small code snippets from other files:
# Path: sc/baseVocabulary.py
# class baseVocabulary(object):
# __metaclass__ = ABCMeta
# """
# Any class that will inherits from BaseRegisteredClass will be included
# inside the dict RegistryHolder.REGISTRY, the key being the name of the
# class and the associated value, the class itself.
# """
# graph = rdflib.Dataset(default_union=True)
# context = {}
# namespace = []
# @abstractmethod
# def build(self):
# pass
. Output only the next line. | class Vocabulary1(baseVocabulary.baseVocabulary): |
Given snippet: <|code_start|> Version of get_events which returns immediately
"""
return self._events[:]
async def _input_callback(self, ev):
"""
Coroutine called by the evdev module when data is available
"""
self._expire()
if ev.keystate == ev.key_up and not self._keystates & InputQueue.KEY_UP:
return
if ev.keystate == ev.key_down and not self.keystates & InputQueue.KEY_DOWN:
return
if ev.keystate == ev.key_hold and not self.keystates & InputQueue.KEY_HOLD:
return
coords = None
if self._key_mapping is not None:
coords = self._key_mapping.get(ev.keycode, None)
event = KeyInputEvent(ev.event.timestamp(),
ev.event.timestamp() + self._expire_time,
ev.keycode, ev.scancode,
ev.keystate, coords, {})
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import asyncio
import time
from typing import NamedTuple
from uchroma.log import LOG_TRACE
from uchroma.util import clamp
and context:
# Path: uchroma/log.py
# LOG_TRACE = 5
#
# Path: uchroma/util.py
# def clamp(value, min_, max_):
# """
# Constrain a value to the specified range
#
# :param value: Input value
# :param min_: Range minimum
# :param max_: Range maximum
#
# :return: The constrained value
# """
# return max(min_, min(value, max_))
which might include code, classes, or functions. Output only the next line. | if self._logger.isEnabledFor(LOG_TRACE): |
Next line prediction: <|code_start|>
_KeyInputEvent = NamedTuple('KeyInputEvent', \
[('timestamp', float),
('expire_time', float),
('keycode', str),
('scancode', str),
('keystate', int),
('coords', list),
('data', dict)])
class KeyInputEvent(_KeyInputEvent, object):
"""
Container of all values of a keyboard input event
with optional expiration time.
"""
@property
def time_remaining(self) -> float:
"""
The number of seconds until this event expires
"""
return max(0.0, self.expire_time - time.time())
@property
def percent_complete(self) -> float:
"""
Percentage of elapsed time until this event expires
"""
duration = self.expire_time - self.timestamp
<|code_end|>
. Use current file imports:
(import asyncio
import time
from typing import NamedTuple
from uchroma.log import LOG_TRACE
from uchroma.util import clamp)
and context including class names, function names, or small code snippets from other files:
# Path: uchroma/log.py
# LOG_TRACE = 5
#
# Path: uchroma/util.py
# def clamp(value, min_, max_):
# """
# Constrain a value to the specified range
#
# :param value: Input value
# :param min_: Range minimum
# :param max_: Range maximum
#
# :return: The constrained value
# """
# return max(min_, min(value, max_))
. Output only the next line. | return clamp(self.time_remaining / duration, 0.0, 1.0) |
Based on the snippet: <|code_start|> if not self._opened:
return
if event.type == evdev.ecodes.EV_KEY:
ev = evdev.categorize(event)
for callback in self._event_callbacks:
await callback(ev)
if not self._opened:
return
except (OSError, IOError) as err:
self._logger.exception("Event device error", exc_info=err)
break
def _evdev_close(self, event_device, future):
self._logger.info('Closing event device %s', event_device)
def _open_input_devices(self):
if self._opened:
return True
for input_device in self._input_devices:
try:
event_device = evdev.InputDevice(input_device)
self._event_devices.append(event_device)
<|code_end|>
, predict the immediate next line with the help of imports:
import asyncio
import functools
import evdev
from concurrent import futures
from uchroma.util import ensure_future
and context (classes, functions, sometimes code) from other files:
# Path: uchroma/util.py
# def ensure_future(coro, loop=None):
# """
# Wrapper for asyncio.ensure_future which dumps exceptions
# """
# if loop is None:
# loop = asyncio.get_event_loop()
# fut = asyncio.ensure_future(coro, loop=loop)
# def exception_logging_done_cb(fut):
# try:
# e = fut.exception()
# except asyncio.CancelledError:
# return
# if e is not None:
# loop.call_exception_handler({
# 'message': 'Unhandled exception in async future',
# 'future': fut,
# 'exception': e,
# })
# fut.add_done_callback(exception_logging_done_cb)
# return fut
. Output only the next line. | task = ensure_future(self._evdev_callback(event_device)) |
Predict the next line after this snippet: <|code_start|> 'sphinx.ext.viewcode',
'sphinx_autodoc_annotation'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'uchroma'
copyright = '2021, Stefanie Kondik'
author = 'Stefanie Kondik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
<|code_end|>
using the current file's imports:
import os
import sys
from uchroma.version import __version__
and any relevant context from other files:
# Path: uchroma/version.py
. Output only the next line. | version = __version__ |
Continue the code snippet: <|code_start|>
def test_config_loaded():
assert Hardware.get_device(0x210).product_id == 0x0210
def test_recursive_attribute():
assert Hardware.get_device(0x210).vendor_id == 0x1532
def test_type_coercion():
hw = Hardware.get_device(0x210)
<|code_end|>
. Use current file imports:
import os
from uchroma.server.hardware import Hardware, KeyMapping, Point, PointList
and context (classes, functions, or code) from other files:
# Path: uchroma/server/hardware.py
# class Hardware(BaseHardware):
# """
# Static hardware configuration data
#
# Loaded by Configuration from YAML.
# """
#
# class Type(Enum):
# HEADSET = 'Headset'
# KEYBOARD = 'Keyboard'
# KEYPAD = 'Keypad'
# LAPTOP = 'Laptop'
# MOUSE = 'Mouse'
# MOUSEPAD = 'Mousepad'
#
# @property
# def has_matrix(self) -> bool:
# """
# True if the device has an addressable key matrix
# """
# return self.dimensions is not None and self.dimensions.x > 1 and self.dimensions.y > 1
#
#
# def has_quirk(self, *quirks) -> bool:
# """
# True if quirk is required for the device
#
# :param: quirks The quirks to check (varargs)
#
# :return: True if the quirk is required
# """
# if self.quirks is None:
# return False
#
# for quirk in quirks:
# if isinstance(self.quirks, (list, tuple)) and quirk in self.quirks:
# return True
# if self.quirks == quirk:
# return True
#
# return False
#
#
# @classmethod
# def get_type(cls, hw_type) -> 'Hardware':
# if hw_type is None:
# return None
#
# config_path = os.path.join(os.path.dirname(__file__), 'data')
# yaml_path = os.path.join(config_path, '%s.yaml' % hw_type.name.lower())
#
# config = cls.load_yaml(yaml_path)
# assert config is not None
#
# return config
#
#
# @classmethod
# def _get_device(cls, product_id: int, hw_type) -> 'Hardware':
# if product_id is None:
# return None
#
# config = cls.get_type(hw_type)
#
# result = config.search('product_id', product_id)
# if not result:
# return None
#
# if isinstance(result, list) and len(result) == 1:
# return result[0]
#
# return result
#
#
# @classmethod
# def get_device(cls, product_id: int, hw_type=None) -> 'Hardware':
# if hw_type is not None:
# return cls._get_device(product_id, hw_type)
#
# for hw in Hardware.Type:
# device = cls._get_device(product_id, hw)
# if device is not None:
# return device
#
# return None
#
#
# def _yaml_header(self) -> str:
# header = '#\n# uChroma device configuration\n#\n'
# if self.name is not None:
# header += '# Model: %s (%s)\n' % (self.name, self.type.value)
# elif self.type is not None:
# header += '# Type: %s\n' % self.type.name.title()
# header += '# Updated on: %s\n' % datetime.now().isoformat(' ')
# header += '#\n'
#
# return header
#
# class KeyMapping(OrderedDict):
# def __setitem__(self, key, value, **kwargs):
# super().__setitem__(key, PointList(value), **kwargs)
#
# class Point(_Point):
# def __repr__(self):
# return '(%s, %s)' % (self.y, self.x)
#
# class PointList(FlowSequence):
# def __new__(cls, args):
# if isinstance(args, list):
# if isinstance(args[0], int) and len(args) == 2:
# return Point(args[0], args[1])
# if isinstance(args[0], list):
# return cls([cls(arg) for arg in args])
# return super(PointList, cls).__new__(cls, args)
. Output only the next line. | assert isinstance(hw.key_mapping, KeyMapping) |
Here is a snippet: <|code_start|>
def test_config_loaded():
assert Hardware.get_device(0x210).product_id == 0x0210
def test_recursive_attribute():
assert Hardware.get_device(0x210).vendor_id == 0x1532
def test_type_coercion():
hw = Hardware.get_device(0x210)
assert isinstance(hw.key_mapping, KeyMapping)
space = hw.key_mapping['KEY_SPACE']
assert isinstance(space, PointList)
assert len(space) == 5
prev = 0
for coord in space:
<|code_end|>
. Write the next line using the current file imports:
import os
from uchroma.server.hardware import Hardware, KeyMapping, Point, PointList
and context from other files:
# Path: uchroma/server/hardware.py
# class Hardware(BaseHardware):
# """
# Static hardware configuration data
#
# Loaded by Configuration from YAML.
# """
#
# class Type(Enum):
# HEADSET = 'Headset'
# KEYBOARD = 'Keyboard'
# KEYPAD = 'Keypad'
# LAPTOP = 'Laptop'
# MOUSE = 'Mouse'
# MOUSEPAD = 'Mousepad'
#
# @property
# def has_matrix(self) -> bool:
# """
# True if the device has an addressable key matrix
# """
# return self.dimensions is not None and self.dimensions.x > 1 and self.dimensions.y > 1
#
#
# def has_quirk(self, *quirks) -> bool:
# """
# True if quirk is required for the device
#
# :param: quirks The quirks to check (varargs)
#
# :return: True if the quirk is required
# """
# if self.quirks is None:
# return False
#
# for quirk in quirks:
# if isinstance(self.quirks, (list, tuple)) and quirk in self.quirks:
# return True
# if self.quirks == quirk:
# return True
#
# return False
#
#
# @classmethod
# def get_type(cls, hw_type) -> 'Hardware':
# if hw_type is None:
# return None
#
# config_path = os.path.join(os.path.dirname(__file__), 'data')
# yaml_path = os.path.join(config_path, '%s.yaml' % hw_type.name.lower())
#
# config = cls.load_yaml(yaml_path)
# assert config is not None
#
# return config
#
#
# @classmethod
# def _get_device(cls, product_id: int, hw_type) -> 'Hardware':
# if product_id is None:
# return None
#
# config = cls.get_type(hw_type)
#
# result = config.search('product_id', product_id)
# if not result:
# return None
#
# if isinstance(result, list) and len(result) == 1:
# return result[0]
#
# return result
#
#
# @classmethod
# def get_device(cls, product_id: int, hw_type=None) -> 'Hardware':
# if hw_type is not None:
# return cls._get_device(product_id, hw_type)
#
# for hw in Hardware.Type:
# device = cls._get_device(product_id, hw)
# if device is not None:
# return device
#
# return None
#
#
# def _yaml_header(self) -> str:
# header = '#\n# uChroma device configuration\n#\n'
# if self.name is not None:
# header += '# Model: %s (%s)\n' % (self.name, self.type.value)
# elif self.type is not None:
# header += '# Type: %s\n' % self.type.name.title()
# header += '# Updated on: %s\n' % datetime.now().isoformat(' ')
# header += '#\n'
#
# return header
#
# class KeyMapping(OrderedDict):
# def __setitem__(self, key, value, **kwargs):
# super().__setitem__(key, PointList(value), **kwargs)
#
# class Point(_Point):
# def __repr__(self):
# return '(%s, %s)' % (self.y, self.x)
#
# class PointList(FlowSequence):
# def __new__(cls, args):
# if isinstance(args, list):
# if isinstance(args[0], int) and len(args) == 2:
# return Point(args[0], args[1])
# if isinstance(args[0], list):
# return cls([cls(arg) for arg in args])
# return super(PointList, cls).__new__(cls, args)
, which may include functions, classes, or code. Output only the next line. | assert isinstance(coord, Point) |
Next line prediction: <|code_start|>
def test_config_loaded():
assert Hardware.get_device(0x210).product_id == 0x0210
def test_recursive_attribute():
assert Hardware.get_device(0x210).vendor_id == 0x1532
def test_type_coercion():
hw = Hardware.get_device(0x210)
assert isinstance(hw.key_mapping, KeyMapping)
space = hw.key_mapping['KEY_SPACE']
<|code_end|>
. Use current file imports:
(import os
from uchroma.server.hardware import Hardware, KeyMapping, Point, PointList)
and context including class names, function names, or small code snippets from other files:
# Path: uchroma/server/hardware.py
# class Hardware(BaseHardware):
# """
# Static hardware configuration data
#
# Loaded by Configuration from YAML.
# """
#
# class Type(Enum):
# HEADSET = 'Headset'
# KEYBOARD = 'Keyboard'
# KEYPAD = 'Keypad'
# LAPTOP = 'Laptop'
# MOUSE = 'Mouse'
# MOUSEPAD = 'Mousepad'
#
# @property
# def has_matrix(self) -> bool:
# """
# True if the device has an addressable key matrix
# """
# return self.dimensions is not None and self.dimensions.x > 1 and self.dimensions.y > 1
#
#
# def has_quirk(self, *quirks) -> bool:
# """
# True if quirk is required for the device
#
# :param: quirks The quirks to check (varargs)
#
# :return: True if the quirk is required
# """
# if self.quirks is None:
# return False
#
# for quirk in quirks:
# if isinstance(self.quirks, (list, tuple)) and quirk in self.quirks:
# return True
# if self.quirks == quirk:
# return True
#
# return False
#
#
# @classmethod
# def get_type(cls, hw_type) -> 'Hardware':
# if hw_type is None:
# return None
#
# config_path = os.path.join(os.path.dirname(__file__), 'data')
# yaml_path = os.path.join(config_path, '%s.yaml' % hw_type.name.lower())
#
# config = cls.load_yaml(yaml_path)
# assert config is not None
#
# return config
#
#
# @classmethod
# def _get_device(cls, product_id: int, hw_type) -> 'Hardware':
# if product_id is None:
# return None
#
# config = cls.get_type(hw_type)
#
# result = config.search('product_id', product_id)
# if not result:
# return None
#
# if isinstance(result, list) and len(result) == 1:
# return result[0]
#
# return result
#
#
# @classmethod
# def get_device(cls, product_id: int, hw_type=None) -> 'Hardware':
# if hw_type is not None:
# return cls._get_device(product_id, hw_type)
#
# for hw in Hardware.Type:
# device = cls._get_device(product_id, hw)
# if device is not None:
# return device
#
# return None
#
#
# def _yaml_header(self) -> str:
# header = '#\n# uChroma device configuration\n#\n'
# if self.name is not None:
# header += '# Model: %s (%s)\n' % (self.name, self.type.value)
# elif self.type is not None:
# header += '# Type: %s\n' % self.type.name.title()
# header += '# Updated on: %s\n' % datetime.now().isoformat(' ')
# header += '#\n'
#
# return header
#
# class KeyMapping(OrderedDict):
# def __setitem__(self, key, value, **kwargs):
# super().__setitem__(key, PointList(value), **kwargs)
#
# class Point(_Point):
# def __repr__(self):
# return '(%s, %s)' % (self.y, self.x)
#
# class PointList(FlowSequence):
# def __new__(cls, args):
# if isinstance(args, list):
# if isinstance(args[0], int) and len(args) == 2:
# return Point(args[0], args[1])
# if isinstance(args[0], list):
# return cls([cls(arg) for arg in args])
# return super(PointList, cls).__new__(cls, args)
. Output only the next line. | assert isinstance(space, PointList) |
Based on the snippet: <|code_start|> def get_fx(self, fx_name) -> BaseFX:
"""
Get the requested effects implementation.
Returns the last active object if appropriate.
:param fx_name: The string name of the effect object
"""
if self.current_fx[0] == fx_name:
return self.current_fx[1]
return self._fxmod.create_fx(fx_name)
def disable(self) -> bool:
if 'disable' in self.available_fx:
return self.activate('disable')
return False
def _activate(self, fx_name, fx, future=None):
# need to do this as a callback if an animation
# is shutting down
if fx.apply():
if fx_name != self.current_fx[0]:
self.current_fx = (fx_name, fx)
if fx_name == CUSTOM:
return True
self._driver.preferences.fx = fx_name
<|code_end|>
, predict the immediate next line with the help of imports:
import functools
import inspect
import re
from abc import abstractmethod
from frozendict import frozendict
from traitlets import Bool, HasTraits, Instance, Tuple, Unicode
from uchroma.traits import get_args_dict
from uchroma.util import camel_to_snake
and context (classes, functions, sometimes code) from other files:
# Path: uchroma/traits.py
# def get_args_dict(obj: HasTraits, incl_all=False):
# """
# Return a dict of user-configurable traits for an object
#
# :param obj: an instance of HasTraits
# :param incl_all: If all items should be included, regardless of RO status
# :return: dict of arguments
# """
# argsdict = ArgsDict()
# for k in sorted(obj._trait_values.keys()):
# v = obj._trait_values[k]
# trait = obj.traits()[k]
# if incl_all or (not trait.get_metadata('hidden') and is_trait_writable(trait)):
# argsdict[k] = v
# return argsdict
#
# Path: uchroma/util.py
# def camel_to_snake(name: str) -> str:
# """
# Returns a snake_case_name from a CamelCaseName
# """
# s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
# return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
. Output only the next line. | argsdict = get_args_dict(fx) |
Given the following code snippet before the placeholder: <|code_start|>
class BaseFX(HasTraits):
# meta
hidden = Bool(default_value=False, read_only=True)
description = Unicode('_unimplemented_', read_only=True)
def __init__(self, fxmod, driver, *args, **kwargs):
super(BaseFX, self).__init__(*args, **kwargs)
self._fxmod = fxmod
self._driver = driver
@abstractmethod
def apply(self) -> bool:
return False
class FXModule:
def __init__(self, driver):
self._driver = driver
self._available_fx = frozendict(self._load_fx())
def _load_fx(self) -> dict:
fx = {}
for k, v in inspect.getmembers(self.__class__, \
lambda x: inspect.isclass(x) and issubclass(x, BaseFX)):
<|code_end|>
, predict the next line using imports from the current file:
import functools
import inspect
import re
from abc import abstractmethod
from frozendict import frozendict
from traitlets import Bool, HasTraits, Instance, Tuple, Unicode
from uchroma.traits import get_args_dict
from uchroma.util import camel_to_snake
and context including class names, function names, and sometimes code from other files:
# Path: uchroma/traits.py
# def get_args_dict(obj: HasTraits, incl_all=False):
# """
# Return a dict of user-configurable traits for an object
#
# :param obj: an instance of HasTraits
# :param incl_all: If all items should be included, regardless of RO status
# :return: dict of arguments
# """
# argsdict = ArgsDict()
# for k in sorted(obj._trait_values.keys()):
# v = obj._trait_values[k]
# trait = obj.traits()[k]
# if incl_all or (not trait.get_metadata('hidden') and is_trait_writable(trait)):
# argsdict[k] = v
# return argsdict
#
# Path: uchroma/util.py
# def camel_to_snake(name: str) -> str:
# """
# Returns a snake_case_name from a CamelCaseName
# """
# s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
# return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
. Output only the next line. | key = camel_to_snake(re.sub(r'FX$', '', k)) |
Continue the code snippet: <|code_start|>#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# pylint: disable=protected-access, invalid-name, no-member
class ColorTrait(TraitType):
"""
A traitlet which encapsulates a grapefruit.Color and performs
type coercion as needed.
"""
info_text = "a color"
allow_none = True
default_value = 'black'
def __init__(self, *args, **kwargs):
super(ColorTrait, self).__init__(*args, **kwargs)
def validate(self, obj, value):
try:
if value is not None:
<|code_end|>
. Use current file imports:
import enum
import importlib
import sys
from argparse import ArgumentParser
from typing import Iterable
from traitlets import CaselessStrEnum, Container, Dict, Enum, Int, HasTraits, \
List, TraitType, Undefined, UseEnum
from frozendict import frozendict
from uchroma.color import to_color
from uchroma.util import ArgsDict
and context (classes, functions, or code) from other files:
# Path: uchroma/color.py
# def to_color(*color_args) -> Color:
# """
# Convert various color representations to grapefruit.Color
#
# Handles RGB triplets, hexcodes, and html color names.
#
# :return: The color
# """
# colors = []
# for arg in color_args:
# value = None
# if arg is not None:
# if isinstance(arg, Color):
# value = arg
# elif isinstance(arg, str):
# if arg != '':
# # grapefruit's default str() spews a string repr of a tuple
# strtuple = COLOR_TUPLE_STR.match(arg)
# if strtuple:
# value = Color.NewFromRgb(*[float(x) \
# for x in strtuple.group(1).split(', ')])
# else:
# value = Color.NewFromHtml(arg)
# elif isinstance(arg, Iterable):
# value = rgb_from_tuple(arg)
# else:
# raise TypeError('Unable to parse color from \'%s\' (%s)' % (arg, type(arg)))
# colors.append(value)
#
# if len(colors) == 0:
# return None
# if len(colors) == 1:
# return colors[0]
#
# return colors
#
# Path: uchroma/util.py
# class ArgsDict(OrderedDict):
# """
# Extension of OrderedDict which does not allow empty keys
#
# FIXME: Get rid of this
# """
# def __init__(self, *args, **kwargs):
# super(ArgsDict, self).__init__(*args, **kwargs)
# empty_keys = []
# for k, v in self.items():
# if v is None:
# empty_keys.append(k)
# for empty_key in empty_keys:
# self.pop(empty_key)
. Output only the next line. | value = to_color(value) |
Using the snippet: <|code_start|> raise TypeError("Object must be a dict (was: %s)" % obj)
traits = {}
values = {}
for k, v in obj.items():
if '__value__' in v:
values[k] = v.pop('__value__')
trait = dict_as_trait(v)
if trait is None:
continue
traits[k] = trait
cls = HasTraits()
cls.add_traits(**traits)
for k, v in values.items():
setattr(cls, k, v)
return cls
def get_args_dict(obj: HasTraits, incl_all=False):
"""
Return a dict of user-configurable traits for an object
:param obj: an instance of HasTraits
:param incl_all: If all items should be included, regardless of RO status
:return: dict of arguments
"""
<|code_end|>
, determine the next line of code. You have imports:
import enum
import importlib
import sys
from argparse import ArgumentParser
from typing import Iterable
from traitlets import CaselessStrEnum, Container, Dict, Enum, Int, HasTraits, \
List, TraitType, Undefined, UseEnum
from frozendict import frozendict
from uchroma.color import to_color
from uchroma.util import ArgsDict
and context (class names, function names, or code) available:
# Path: uchroma/color.py
# def to_color(*color_args) -> Color:
# """
# Convert various color representations to grapefruit.Color
#
# Handles RGB triplets, hexcodes, and html color names.
#
# :return: The color
# """
# colors = []
# for arg in color_args:
# value = None
# if arg is not None:
# if isinstance(arg, Color):
# value = arg
# elif isinstance(arg, str):
# if arg != '':
# # grapefruit's default str() spews a string repr of a tuple
# strtuple = COLOR_TUPLE_STR.match(arg)
# if strtuple:
# value = Color.NewFromRgb(*[float(x) \
# for x in strtuple.group(1).split(', ')])
# else:
# value = Color.NewFromHtml(arg)
# elif isinstance(arg, Iterable):
# value = rgb_from_tuple(arg)
# else:
# raise TypeError('Unable to parse color from \'%s\' (%s)' % (arg, type(arg)))
# colors.append(value)
#
# if len(colors) == 0:
# return None
# if len(colors) == 1:
# return colors[0]
#
# return colors
#
# Path: uchroma/util.py
# class ArgsDict(OrderedDict):
# """
# Extension of OrderedDict which does not allow empty keys
#
# FIXME: Get rid of this
# """
# def __init__(self, *args, **kwargs):
# super(ArgsDict, self).__init__(*args, **kwargs)
# empty_keys = []
# for k, v in self.items():
# if v is None:
# empty_keys.append(k)
# for empty_key in empty_keys:
# self.pop(empty_key)
. Output only the next line. | argsdict = ArgsDict() |
Given the code snippet: <|code_start|> self._logger = driver.logger
self._driver = driver
self._status = status
self._transaction_id = transaction_id
self._remaining_packets = remaining_packets
self._protocol_type = protocol_type
self._command_class = command_class
self._command_id = command_id
self._result = None
self._data = ByteArgs(RazerReport.DATA_BUF_SIZE, data=data)
self._buf = np.zeros(shape=(RazerReport.BUF_SIZE,), dtype=np.uint8)
if reserved is None:
self._reserved = 0
else:
self._reserved = reserved
if crc is None:
self._crc = 0
else:
self._crc = crc
def _hexdump(self, data, tag=""):
<|code_end|>
, generate the next line using the imports in this file:
import struct
import time
import numpy as np
from enum import Enum
from uchroma.log import LOG_PROTOCOL_TRACE
from uchroma.util import smart_delay
from .byte_args import ByteArgs
from ._crc import fast_crc
and context (functions, classes, or occasionally code) from other files:
# Path: uchroma/log.py
# LOG_PROTOCOL_TRACE = 4
#
# Path: uchroma/util.py
# def smart_delay(delay: float, last_cmd: float, remain: int=0) -> float:
# """
# A "smart" delay mechanism which tries to reduce the
# delay as much as possible based on the time the last
# delay happened.
#
# :param delay: delay in seconds
# :param last_cmd: time of last command
# :param remain: counter, skip delay unless it's zero
#
# :return: timestamp to feed to next invocation
# """
# now = time.monotonic()
#
# if remain == 0 and last_cmd is not None and delay > 0.0:
#
# delta = now - last_cmd
# if delta < delay:
# sleep = delay - delta
# time.sleep(sleep)
#
# return now
#
# Path: uchroma/server/byte_args.py
# class ByteArgs:
# """
# Helper class for assembling byte arrays from
# argument lists of varying types
# """
# def __init__(self, size, data=None):
# self._data_ptr = 0
#
# if data is None:
# self._data = np.zeros(shape=(size,), dtype=np.uint8)
# else:
# self._data = np.frombuffer(data, dtype=np.uint8)
#
#
# @property
# def data(self):
# """
# The byte array assembled from supplied arguments
# """
# return self._data
#
#
# @property
# def size(self):
# """
# Size of the byte array
# """
# return len(self._data)
#
#
# def _ensure_space(self, size):
# assert len(self._data) > size + self._data_ptr, \
# ('Additional argument (len=%d) would exceed size limit %d (cur=%d)'
# % (size, len(self._data), self._data_ptr))
#
#
# def clear(self):
# """
# Empty the contents of the array
#
# :return: The empty ByteArgs
# :rtype: ByteArgs
# """
# self._data.fill(0)
# self._data_ptr = 0
# return self
#
#
# def put(self, arg, packing=None):
# """
# Add an argument to this array
#
# :param arg: The argument to append
# :type arg: varies
#
# :param packing: The representation passed to struct.pack
# :type packing: str
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# data = None
# if packing is not None:
# data = struct.pack(packing, arg)
# elif isinstance(arg, Color):
# data = struct.pack("=BBB", *arg.intTuple)
# elif isinstance(arg, Enum):
# if hasattr(arg, "opcode"):
# data = arg.opcode
# else:
# data = arg.value
# elif isinstance(arg, np.ndarray):
# data = arg.flatten()
# elif isinstance(arg, (bytearray, bytes)):
# data = arg
# else:
# data = struct.pack("=B", arg)
#
# if isinstance(data, int):
# if self._data_ptr + 1 > len(self._data):
# raise ValueError('No space left in argument list')
#
# self._ensure_space(1)
# self._data[self._data_ptr] = data
# self._data_ptr += 1
# else:
# datalen = len(data)
# if datalen > 0:
# if self._data_ptr + datalen > len(self._data):
# raise ValueError('No space left in argument list')
#
# if not isinstance(data, np.ndarray):
# data = np.frombuffer(data, dtype=np.uint8)
# self._data[self._data_ptr:self._data_ptr+datalen] = data
# self._data_ptr += datalen
#
# return self
#
#
# def put_all(self, args, packing=None):
# for arg in args:
# self.put(arg, packing=packing)
# return self
#
#
# def put_short(self, arg):
# """
# Convenience method to add an argument as a short to
# the array
#
# :param arg: The argument to append
# :type arg: varies
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# return self.put(arg, '=H')
#
#
# def put_int(self, arg):
# """
# Convenience method to add an argument as an integer to
# the array
#
# :param arg: The argument to append
# :type arg: varies
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# return self.put(arg, '=I')
. Output only the next line. | if self._logger.isEnabledFor(LOG_PROTOCOL_TRACE): |
Continue the code snippet: <|code_start|>
def run(self, delay: float = None, timeout_cb=None) -> bool:
"""
Run this report and retrieve the result from the hardware.
Sends the feature report and parses the result. A small delay
is required between calls to the hardware or a BUSY status
will be returned. This delay may need adjusted on a per-model
basis.
If debug loglevel is enabled, the raw report data from both
the request and the response will be logged.
:param delay: Time to delay between requests (defaults to 0.005 sec)
:param timeout_cb: Callback to run when a TIMEOUT is returned
:return: The parsed result from the hardware
"""
if delay is None:
delay = RazerReport.CMD_DELAY_TIME
retry_count = 3
with self._driver.device_open():
while retry_count > 0:
try:
req = self._pack_request()
self._hexdump(req, '--> ')
if self._remaining_packets == 0:
<|code_end|>
. Use current file imports:
import struct
import time
import numpy as np
from enum import Enum
from uchroma.log import LOG_PROTOCOL_TRACE
from uchroma.util import smart_delay
from .byte_args import ByteArgs
from ._crc import fast_crc
and context (classes, functions, or code) from other files:
# Path: uchroma/log.py
# LOG_PROTOCOL_TRACE = 4
#
# Path: uchroma/util.py
# def smart_delay(delay: float, last_cmd: float, remain: int=0) -> float:
# """
# A "smart" delay mechanism which tries to reduce the
# delay as much as possible based on the time the last
# delay happened.
#
# :param delay: delay in seconds
# :param last_cmd: time of last command
# :param remain: counter, skip delay unless it's zero
#
# :return: timestamp to feed to next invocation
# """
# now = time.monotonic()
#
# if remain == 0 and last_cmd is not None and delay > 0.0:
#
# delta = now - last_cmd
# if delta < delay:
# sleep = delay - delta
# time.sleep(sleep)
#
# return now
#
# Path: uchroma/server/byte_args.py
# class ByteArgs:
# """
# Helper class for assembling byte arrays from
# argument lists of varying types
# """
# def __init__(self, size, data=None):
# self._data_ptr = 0
#
# if data is None:
# self._data = np.zeros(shape=(size,), dtype=np.uint8)
# else:
# self._data = np.frombuffer(data, dtype=np.uint8)
#
#
# @property
# def data(self):
# """
# The byte array assembled from supplied arguments
# """
# return self._data
#
#
# @property
# def size(self):
# """
# Size of the byte array
# """
# return len(self._data)
#
#
# def _ensure_space(self, size):
# assert len(self._data) > size + self._data_ptr, \
# ('Additional argument (len=%d) would exceed size limit %d (cur=%d)'
# % (size, len(self._data), self._data_ptr))
#
#
# def clear(self):
# """
# Empty the contents of the array
#
# :return: The empty ByteArgs
# :rtype: ByteArgs
# """
# self._data.fill(0)
# self._data_ptr = 0
# return self
#
#
# def put(self, arg, packing=None):
# """
# Add an argument to this array
#
# :param arg: The argument to append
# :type arg: varies
#
# :param packing: The representation passed to struct.pack
# :type packing: str
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# data = None
# if packing is not None:
# data = struct.pack(packing, arg)
# elif isinstance(arg, Color):
# data = struct.pack("=BBB", *arg.intTuple)
# elif isinstance(arg, Enum):
# if hasattr(arg, "opcode"):
# data = arg.opcode
# else:
# data = arg.value
# elif isinstance(arg, np.ndarray):
# data = arg.flatten()
# elif isinstance(arg, (bytearray, bytes)):
# data = arg
# else:
# data = struct.pack("=B", arg)
#
# if isinstance(data, int):
# if self._data_ptr + 1 > len(self._data):
# raise ValueError('No space left in argument list')
#
# self._ensure_space(1)
# self._data[self._data_ptr] = data
# self._data_ptr += 1
# else:
# datalen = len(data)
# if datalen > 0:
# if self._data_ptr + datalen > len(self._data):
# raise ValueError('No space left in argument list')
#
# if not isinstance(data, np.ndarray):
# data = np.frombuffer(data, dtype=np.uint8)
# self._data[self._data_ptr:self._data_ptr+datalen] = data
# self._data_ptr += datalen
#
# return self
#
#
# def put_all(self, args, packing=None):
# for arg in args:
# self.put(arg, packing=packing)
# return self
#
#
# def put_short(self, arg):
# """
# Convenience method to add an argument as a short to
# the array
#
# :param arg: The argument to append
# :type arg: varies
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# return self.put(arg, '=H')
#
#
# def put_int(self, arg):
# """
# Convenience method to add an argument as an integer to
# the array
#
# :param arg: The argument to append
# :type arg: varies
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# return self.put(arg, '=I')
. Output only the next line. | self._driver.last_cmd_time = smart_delay(delay, self._driver.last_cmd_time, |
Predict the next line after this snippet: <|code_start|> REQ_HEADER = '=BHBBBB'
RSP_HEADER = '=BBHBBBB'
REQ_REPORT_ID = b'\x02'
RSP_REPORT_ID = b'\x00'
BUF_SIZE = 90
DATA_BUF_SIZE = 80
# Time to sleep between requests, needed to avoid BUSY replies
CMD_DELAY_TIME = 0.007
def __init__(self, driver, command_class, command_id, data_size,
status=0x00, transaction_id=0xFF, remaining_packets=0x00,
protocol_type=0x00, data=None, crc=None, reserved=None):
self._logger = driver.logger
self._driver = driver
self._status = status
self._transaction_id = transaction_id
self._remaining_packets = remaining_packets
self._protocol_type = protocol_type
self._command_class = command_class
self._command_id = command_id
self._result = None
<|code_end|>
using the current file's imports:
import struct
import time
import numpy as np
from enum import Enum
from uchroma.log import LOG_PROTOCOL_TRACE
from uchroma.util import smart_delay
from .byte_args import ByteArgs
from ._crc import fast_crc
and any relevant context from other files:
# Path: uchroma/log.py
# LOG_PROTOCOL_TRACE = 4
#
# Path: uchroma/util.py
# def smart_delay(delay: float, last_cmd: float, remain: int=0) -> float:
# """
# A "smart" delay mechanism which tries to reduce the
# delay as much as possible based on the time the last
# delay happened.
#
# :param delay: delay in seconds
# :param last_cmd: time of last command
# :param remain: counter, skip delay unless it's zero
#
# :return: timestamp to feed to next invocation
# """
# now = time.monotonic()
#
# if remain == 0 and last_cmd is not None and delay > 0.0:
#
# delta = now - last_cmd
# if delta < delay:
# sleep = delay - delta
# time.sleep(sleep)
#
# return now
#
# Path: uchroma/server/byte_args.py
# class ByteArgs:
# """
# Helper class for assembling byte arrays from
# argument lists of varying types
# """
# def __init__(self, size, data=None):
# self._data_ptr = 0
#
# if data is None:
# self._data = np.zeros(shape=(size,), dtype=np.uint8)
# else:
# self._data = np.frombuffer(data, dtype=np.uint8)
#
#
# @property
# def data(self):
# """
# The byte array assembled from supplied arguments
# """
# return self._data
#
#
# @property
# def size(self):
# """
# Size of the byte array
# """
# return len(self._data)
#
#
# def _ensure_space(self, size):
# assert len(self._data) > size + self._data_ptr, \
# ('Additional argument (len=%d) would exceed size limit %d (cur=%d)'
# % (size, len(self._data), self._data_ptr))
#
#
# def clear(self):
# """
# Empty the contents of the array
#
# :return: The empty ByteArgs
# :rtype: ByteArgs
# """
# self._data.fill(0)
# self._data_ptr = 0
# return self
#
#
# def put(self, arg, packing=None):
# """
# Add an argument to this array
#
# :param arg: The argument to append
# :type arg: varies
#
# :param packing: The representation passed to struct.pack
# :type packing: str
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# data = None
# if packing is not None:
# data = struct.pack(packing, arg)
# elif isinstance(arg, Color):
# data = struct.pack("=BBB", *arg.intTuple)
# elif isinstance(arg, Enum):
# if hasattr(arg, "opcode"):
# data = arg.opcode
# else:
# data = arg.value
# elif isinstance(arg, np.ndarray):
# data = arg.flatten()
# elif isinstance(arg, (bytearray, bytes)):
# data = arg
# else:
# data = struct.pack("=B", arg)
#
# if isinstance(data, int):
# if self._data_ptr + 1 > len(self._data):
# raise ValueError('No space left in argument list')
#
# self._ensure_space(1)
# self._data[self._data_ptr] = data
# self._data_ptr += 1
# else:
# datalen = len(data)
# if datalen > 0:
# if self._data_ptr + datalen > len(self._data):
# raise ValueError('No space left in argument list')
#
# if not isinstance(data, np.ndarray):
# data = np.frombuffer(data, dtype=np.uint8)
# self._data[self._data_ptr:self._data_ptr+datalen] = data
# self._data_ptr += datalen
#
# return self
#
#
# def put_all(self, args, packing=None):
# for arg in args:
# self.put(arg, packing=packing)
# return self
#
#
# def put_short(self, arg):
# """
# Convenience method to add an argument as a short to
# the array
#
# :param arg: The argument to append
# :type arg: varies
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# return self.put(arg, '=H')
#
#
# def put_int(self, arg):
# """
# Convenience method to add an argument as an integer to
# the array
#
# :param arg: The argument to append
# :type arg: varies
#
# :return: This ByteArgs instance
# :rtype: ByteArgs
# """
# return self.put(arg, '=I')
. Output only the next line. | self._data = ByteArgs(RazerReport.DATA_BUF_SIZE, data=data) |
Given the following code snippet before the placeholder: <|code_start|> if driver is None:
self._parser.error("Invalid device: %s" % self._args.device)
else:
dev_paths = self._client.get_device_paths()
if len(dev_paths) == 1:
driver = self._client.get_device(dev_paths[0])
else:
self._list_devices(self._args)
self._parser.error("Multiple devices found, select one with --device")
return driver
def set_property(self, target, name, value):
name = snake_to_camel(name)
if not hasattr(target, name):
raise ValueError("Invalid property: %s" % name)
cls_obj = getattr(target.__class__, name)
if hasattr(cls_obj, '_type'):
typespec = cls_obj._type
if typespec == 's':
value = str(value)
elif typespec == 'd':
value = float(value)
elif typespec == 'u' or typespec == 'i':
value = int(value)
elif typespec == 'b':
value = bool(value)
else:
<|code_end|>
, predict the next line using imports from the current file:
import logging
from argparse import ArgumentParser
from argcomplete import autocomplete
from uchroma.dbus_utils import dbus_prepare, snake_to_camel
from uchroma.version import __version__
from .dbus_client import UChromaClient
and context including class names, function names, and sometimes code from other files:
# Path: uchroma/dbus_utils.py
# def _check_variance(items: list):
# def dbus_prepare(obj, variant: bool=False, camel_keys: bool=False) -> tuple:
# def __init__(self, obj, interface_name, exclude=None):
# def add_property(self, name: str, signature: str, writable: bool=False):
# def add_method(self, method, *argspecs):
# def add_signal(self, signal, *argspecs):
# def _parse_traits(self):
# def build(self) -> str:
# def __init__(self, *args, **kwargs):
# def __getattribute__(self, name):
# def __setattr__(self, name, value):
# class DescriptorBuilder(object):
# class TraitsPropertiesMixin(object):
#
# Path: uchroma/version.py
#
# Path: uchroma/client/dbus_client.py
# class UChromaClient(object):
#
# def __init__(self):
# self._bus = pydbus.SessionBus()
#
#
# def get_device_paths(self) -> list:
# dm = self._bus.get(SERVICE)
# return dm.GetDevices()
#
#
# def get_device(self, identifier):
# if identifier is None:
# return None
#
# use_key = False
# if isinstance(identifier, str):
# if identifier.startswith(BASE_PATH):
# return self._bus.get(SERVICE, identifier)
#
# if re.match(r'\w{4}:\w{4}.\d{2}', identifier):
# use_key = True
# elif re.match(r'\d+', identifier):
# identifier = int(identifier)
# else:
# return None
#
# for dev_path in self.get_device_paths():
# dev = self.get_device(dev_path)
# if use_key and identifier == dev.Key:
# return dev
# elif identifier == dev.DeviceIndex:
# return dev
#
# return None
#
#
# def get_layer(self, device, layer_idx):
# layers = device.CurrentRenderers
# if layer_idx >= len(layers):
# raise ValueError("Layer index out of range")
#
# return self._bus.get(SERVICE, layers[layer_idx][1])
. Output only the next line. | value = dbus_prepare(value)[0] |
Given snippet: <|code_start|>
@property
def version(self):
return 'uchroma-%s' % __version__
def _add_subparsers(self, sub):
pass
def get_driver(self):
if hasattr(self._args, 'device') and self._args.device is not None:
driver = self._client.get_device(self._args.device)
if driver is None:
self._parser.error("Invalid device: %s" % self._args.device)
else:
dev_paths = self._client.get_device_paths()
if len(dev_paths) == 1:
driver = self._client.get_device(dev_paths[0])
else:
self._list_devices(self._args)
self._parser.error("Multiple devices found, select one with --device")
return driver
def set_property(self, target, name, value):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
from argparse import ArgumentParser
from argcomplete import autocomplete
from uchroma.dbus_utils import dbus_prepare, snake_to_camel
from uchroma.version import __version__
from .dbus_client import UChromaClient
and context:
# Path: uchroma/dbus_utils.py
# def _check_variance(items: list):
# def dbus_prepare(obj, variant: bool=False, camel_keys: bool=False) -> tuple:
# def __init__(self, obj, interface_name, exclude=None):
# def add_property(self, name: str, signature: str, writable: bool=False):
# def add_method(self, method, *argspecs):
# def add_signal(self, signal, *argspecs):
# def _parse_traits(self):
# def build(self) -> str:
# def __init__(self, *args, **kwargs):
# def __getattribute__(self, name):
# def __setattr__(self, name, value):
# class DescriptorBuilder(object):
# class TraitsPropertiesMixin(object):
#
# Path: uchroma/version.py
#
# Path: uchroma/client/dbus_client.py
# class UChromaClient(object):
#
# def __init__(self):
# self._bus = pydbus.SessionBus()
#
#
# def get_device_paths(self) -> list:
# dm = self._bus.get(SERVICE)
# return dm.GetDevices()
#
#
# def get_device(self, identifier):
# if identifier is None:
# return None
#
# use_key = False
# if isinstance(identifier, str):
# if identifier.startswith(BASE_PATH):
# return self._bus.get(SERVICE, identifier)
#
# if re.match(r'\w{4}:\w{4}.\d{2}', identifier):
# use_key = True
# elif re.match(r'\d+', identifier):
# identifier = int(identifier)
# else:
# return None
#
# for dev_path in self.get_device_paths():
# dev = self.get_device(dev_path)
# if use_key and identifier == dev.Key:
# return dev
# elif identifier == dev.DeviceIndex:
# return dev
#
# return None
#
#
# def get_layer(self, device, layer_idx):
# layers = device.CurrentRenderers
# if layer_idx >= len(layers):
# raise ValueError("Layer index out of range")
#
# return self._bus.get(SERVICE, layers[layer_idx][1])
which might include code, classes, or functions. Output only the next line. | name = snake_to_camel(name) |
Given the following code snippet before the placeholder: <|code_start|> autocomplete(self._parser)
self._args, self._unparsed = self._parser.parse_known_args(self._unparsed, self._args)
if not hasattr(self._args, 'func'):
self._parser.print_help()
self._parser.exit(1)
def _list_devices(self, args):
for dev_path in self._client.get_device_paths():
dev = self._client.get_device(dev_path)
serial_number = firmware_version = "Unknown"
try:
serial_number = dev.SerialNumber
firmware_version = dev.FirmwareVersion
except IOError as err:
if self._args.debug:
args.parser.error("Error opening device: %s" % err)
print('[%s]: %s (%s / %s)' % (dev.Key, dev.Name, serial_number, firmware_version))
@property
def description(self):
return 'Color control for Razer Chroma peripherals'
@property
def version(self):
<|code_end|>
, predict the next line using imports from the current file:
import logging
from argparse import ArgumentParser
from argcomplete import autocomplete
from uchroma.dbus_utils import dbus_prepare, snake_to_camel
from uchroma.version import __version__
from .dbus_client import UChromaClient
and context including class names, function names, and sometimes code from other files:
# Path: uchroma/dbus_utils.py
# def _check_variance(items: list):
# def dbus_prepare(obj, variant: bool=False, camel_keys: bool=False) -> tuple:
# def __init__(self, obj, interface_name, exclude=None):
# def add_property(self, name: str, signature: str, writable: bool=False):
# def add_method(self, method, *argspecs):
# def add_signal(self, signal, *argspecs):
# def _parse_traits(self):
# def build(self) -> str:
# def __init__(self, *args, **kwargs):
# def __getattribute__(self, name):
# def __setattr__(self, name, value):
# class DescriptorBuilder(object):
# class TraitsPropertiesMixin(object):
#
# Path: uchroma/version.py
#
# Path: uchroma/client/dbus_client.py
# class UChromaClient(object):
#
# def __init__(self):
# self._bus = pydbus.SessionBus()
#
#
# def get_device_paths(self) -> list:
# dm = self._bus.get(SERVICE)
# return dm.GetDevices()
#
#
# def get_device(self, identifier):
# if identifier is None:
# return None
#
# use_key = False
# if isinstance(identifier, str):
# if identifier.startswith(BASE_PATH):
# return self._bus.get(SERVICE, identifier)
#
# if re.match(r'\w{4}:\w{4}.\d{2}', identifier):
# use_key = True
# elif re.match(r'\d+', identifier):
# identifier = int(identifier)
# else:
# return None
#
# for dev_path in self.get_device_paths():
# dev = self.get_device(dev_path)
# if use_key and identifier == dev.Key:
# return dev
# elif identifier == dev.DeviceIndex:
# return dev
#
# return None
#
#
# def get_layer(self, device, layer_idx):
# layers = device.CurrentRenderers
# if layer_idx >= len(layers):
# raise ValueError("Layer index out of range")
#
# return self._bus.get(SERVICE, layers[layer_idx][1])
. Output only the next line. | return 'uchroma-%s' % __version__ |
Based on the snippet: <|code_start|>"""
Various helper functions that are used across the library.
"""
class UChromaConsoleUtil(object):
"""
A base class for command-line utilities
"""
def __init__(self):
self._parser = ArgumentParser(description=self.description, add_help=False)
self._parser.add_argument('-v', '--version', action='version', version=self.version)
self._parser.add_argument('-g', '--debug', action='store_true',
help="Enable debug output")
self._parser.add_argument('-d', '--device', type=str,
help="Select target device name or index")
self._parser.add_argument('-l', '--list', action='store_true',
help="List available devices")
self._args, self._unparsed = self._parser.parse_known_args()
if self._args.debug:
logging.getLogger().setLevel(logging.DEBUG)
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
from argparse import ArgumentParser
from argcomplete import autocomplete
from uchroma.dbus_utils import dbus_prepare, snake_to_camel
from uchroma.version import __version__
from .dbus_client import UChromaClient
and context (classes, functions, sometimes code) from other files:
# Path: uchroma/dbus_utils.py
# def _check_variance(items: list):
# def dbus_prepare(obj, variant: bool=False, camel_keys: bool=False) -> tuple:
# def __init__(self, obj, interface_name, exclude=None):
# def add_property(self, name: str, signature: str, writable: bool=False):
# def add_method(self, method, *argspecs):
# def add_signal(self, signal, *argspecs):
# def _parse_traits(self):
# def build(self) -> str:
# def __init__(self, *args, **kwargs):
# def __getattribute__(self, name):
# def __setattr__(self, name, value):
# class DescriptorBuilder(object):
# class TraitsPropertiesMixin(object):
#
# Path: uchroma/version.py
#
# Path: uchroma/client/dbus_client.py
# class UChromaClient(object):
#
# def __init__(self):
# self._bus = pydbus.SessionBus()
#
#
# def get_device_paths(self) -> list:
# dm = self._bus.get(SERVICE)
# return dm.GetDevices()
#
#
# def get_device(self, identifier):
# if identifier is None:
# return None
#
# use_key = False
# if isinstance(identifier, str):
# if identifier.startswith(BASE_PATH):
# return self._bus.get(SERVICE, identifier)
#
# if re.match(r'\w{4}:\w{4}.\d{2}', identifier):
# use_key = True
# elif re.match(r'\d+', identifier):
# identifier = int(identifier)
# else:
# return None
#
# for dev_path in self.get_device_paths():
# dev = self.get_device(dev_path)
# if use_key and identifier == dev.Key:
# return dev
# elif identifier == dev.DeviceIndex:
# return dev
#
# return None
#
#
# def get_layer(self, device, layer_idx):
# layers = device.CurrentRenderers
# if layer_idx >= len(layers):
# raise ValueError("Layer index out of range")
#
# return self._bus.get(SERVICE, layers[layer_idx][1])
. Output only the next line. | self._client = UChromaClient() |
Predict the next line after this snippet: <|code_start|> for field in self.__slots__:
if field not in ['parent', '_children']:
tmp[field] = self.get(field)
return self.__class__(**tmp)
return tuple(flat)
def _asdict(self) -> OrderedDict:
od = OrderedDict()
for slot in self.__slots__:
if slot in ('parent', '_children'):
continue
value = getattr(self, slot)
if value is None:
continue
od[slot] = value
return od
def sparsedict(self, deep=True) -> OrderedDict:
"""
Returns a "sparse" ordereddict with the parent->child relationships
represented. This is used for serialization.
:return: The sparse dict representation
"""
self.__class__._traverse = False
fields = tuple([x for x in self.__slots__ if x not in ['parent', '_children']])
<|code_end|>
using the current file's imports:
import os
import sys
import tempfile
import ruamel.yaml as yaml
from collections import Iterable, OrderedDict
from contextlib import contextmanager
from itertools import chain
from enum import Enum
from uchroma.util import ArgsDict
and any relevant context from other files:
# Path: uchroma/util.py
# class ArgsDict(OrderedDict):
# """
# Extension of OrderedDict which does not allow empty keys
#
# FIXME: Get rid of this
# """
# def __init__(self, *args, **kwargs):
# super(ArgsDict, self).__init__(*args, **kwargs)
# empty_keys = []
# for k, v in self.items():
# if v is None:
# empty_keys.append(k)
# for empty_key in empty_keys:
# self.pop(empty_key)
. Output only the next line. | odict = ArgsDict({x: getattr(self, x) for x in fields}) |
Predict the next line for this snippet: <|code_start|>
class EnumTest(Enum):
FIRST = 1
SECOND = 2
THIRD = 3
def test_primitives():
<|code_end|>
with the help of current file imports:
import sys
from enum import Enum
from gi.repository import GLib
from grapefruit import Color
from traitlets import Int
from uchroma.dbus_utils import dbus_prepare
and context from other files:
# Path: uchroma/dbus_utils.py
# def dbus_prepare(obj, variant: bool=False, camel_keys: bool=False) -> tuple:
# """
# Recursively walks obj and builds a D-Bus signature
# by inspecting types. Variant types are created as
# necessary, and the returned obj may have changed.
#
# :param obj: An arbitrary primitive or container type
# :param variant: Force wrapping contained objects with variants
# :param camel_keys: Convert dict keys to CamelCase
# """
# sig = ''
# use_variant = variant
#
# try:
# if isinstance(obj, Variant):
# sig = 'v'
#
# elif isinstance(obj, bool):
# sig = 'b'
#
# elif isinstance(obj, str):
# sig = 's'
#
# elif isinstance(obj, int):
# if obj < pow(2, 16):
# sig = 'n'
# elif obj < pow(2, 32):
# sig = 'i'
# else:
# sig = 'x'
#
# elif isinstance(obj, float):
# sig = 'd'
#
# elif isinstance(obj, Color):
# sig = 's'
# obj = obj.html
#
# elif isinstance(obj, TraitType):
# obj, sig = dbus_prepare(trait_as_dict(obj), variant=True)
#
# elif isinstance(obj, HasTraits):
# obj, sig = dbus_prepare(class_traits_as_dict(obj), variant=True)
#
# elif hasattr(obj, '_asdict') and hasattr(obj, '_field_types'):
# # typing.NamedTuple
# obj, sig = dbus_prepare(obj._asdict(), variant=True)
#
# elif isinstance(obj, type) and issubclass(obj, enum.Enum):
# # top level enum, tuple of string keys
# obj = tuple(obj.__members__.keys())
# sig = '(%s)' % ('s' * len(obj))
#
# elif isinstance(obj, enum.Enum):
# obj = obj.name
# sig = 's'
#
# elif isinstance(obj, np.ndarray):
# dtype = obj.dtype.kind
# if dtype == 'f':
# dtype = 'd'
# sig = 'a' * obj.ndim + dtype
# obj = obj.tolist()
#
# elif isinstance(obj, tuple):
# tmp = []
# sig = '('
#
# for item in obj:
# if item is None and use_variant:
# continue
# # struct of all items
# r_obj, r_sig = dbus_prepare(item)
# if r_obj is None:
# continue
# sig += r_sig
# tmp.append(r_obj)
# if len(tmp) > 0:
# sig += ')'
# obj = tuple(tmp)
# else:
# sig = ''
# obj = None
#
# elif isinstance(obj, list):
# tmp = []
# sig = 'a'
# is_variant = use_variant or _check_variance(obj)
#
# for item in obj:
# if item is None and is_variant:
# continue
# r_obj, r_sig = dbus_prepare(item, variant=is_variant)
# if r_obj is None:
# continue
#
# tmp.append(r_obj)
#
# if is_variant:
# sig += 'v'
# else:
# sig += dbus_prepare(tmp[0])[1]
#
# obj = tmp
#
# elif isinstance(obj, (dict, frozendict)):
# if isinstance(obj, frozendict):
# tmp = {}
# else:
# tmp = obj.__class__()
# sig = 'a{s'
# vals = [x for x in obj.values() if x is not None]
# is_variant = use_variant or _check_variance(vals)
#
# for k, v in obj.items():
# if v is None:
# continue
# r_obj, r_sig = dbus_prepare(v)
# if r_obj is None:
# continue
# if camel_keys:
# k = snake_to_camel(k)
# if is_variant:
# tmp[k] = Variant(r_sig, r_obj)
# else:
# tmp[k] = r_obj
#
# if is_variant:
# sig += 'v'
# else:
# sig += dbus_prepare(vals[0])[1]
#
# obj = tmp
# sig += '}'
#
# elif isinstance(obj, type):
# obj = obj.__name__
# sig = 's'
#
# except Exception as err:
# logger.exception('obj: %s sig: %s variant: %s', obj, sig, variant, exc_info=err)
# raise
#
# return obj, sig
, which may contain function names, class names, or code. Output only the next line. | assert dbus_prepare(23)[1] == 'n' |
Using the snippet: <|code_start|> if arg is None:
return (0, 0, 0)
if isinstance(arg, Color):
return arg.intTuple[:3]
if isinstance(arg, str):
return Color.NewFromHtml(arg).intTuple[:3]
if isinstance(arg, tuple) or isinstance(arg, list):
if arg[0] is None:
return (0, 0, 0)
if isinstance(arg[0], list) or isinstance(arg[0], tuple) \
or isinstance(arg[0], str) or isinstance(arg[0], Color):
return [to_rgb(item) for item in arg]
return rgb_to_int_tuple(arg)
raise TypeError('Unable to parse color from \'%s\' (%s)' % (arg, type(arg)))
"""
Decorator to parse various color representations
Invokes to_color on any arguments listed in decls. This will cause
the listed arguments to be resolved to grapefruit.Color objects from
the various different representations that might be in use.
Example:
@colorarg
def frobizzle(self, speed, color1: ColorType=None, color2: ColorType=None)
"""
<|code_end|>
, determine the next line of code. You have imports:
import itertools
import math
import random
import re
import numpy as np
from enum import Enum
from typing import Iterable, List, Union
from grapefruit import Color
from hsluv import hsluv_to_rgb, rgb_to_hsluv
from skimage.util import dtype
from uchroma.util import autocast_decorator, clamp, lerp, lerp_degrees
and context (class names, function names, or code) available:
# Path: uchroma/util.py
# def autocast_decorator(type_hint, fix_arg_func):
# """
# Decorator which will invoke fix_arg_func for any
# arguments annotated with type_hint. The decorated
# function will then be called with the result.
#
# :param type_hint: A PEP484 type hint
# :param fix_arg_func: Function to invoke
#
# :return: decorator
# """
# @decorator
# def wrapper(wrapped, instance, args, kwargs):
# hinted_args = names = None
# cache_key = '%s-%s-%s' % (wrapped.__class__.__name__,
# wrapped.__name__, str(type_hint))
#
# if cache_key in AUTOCAST_CACHE:
# hinted_args, names = AUTOCAST_CACHE[cache_key]
# else:
# sig = inspect.signature(wrapped)
# names = list(sig.parameters.keys())
# hinted_args = [x[0] for x in typing.get_type_hints(wrapped).items() \
# if x[1] == type_hint or x[1] == typing.Union[type_hint, None]]
# AUTOCAST_CACHE[cache_key] = hinted_args, names
#
# if len(hinted_args) == 0:
# raise ValueError("No arguments with %s hint found" % type_hint)
#
# new_args = list(args)
# for hinted_arg in hinted_args:
# if hinted_arg in kwargs:
# kwargs[hinted_arg] = fix_arg_func(kwargs[hinted_arg])
#
# elif hinted_arg in names:
# idx = names.index(hinted_arg)
# if idx < len(new_args):
# new_args[idx] = fix_arg_func(new_args[idx])
#
# return wrapped(*new_args, **kwargs)
#
# return wrapper
#
# def clamp(value, min_, max_):
# """
# Constrain a value to the specified range
#
# :param value: Input value
# :param min_: Range minimum
# :param max_: Range maximum
#
# :return: The constrained value
# """
# return max(min_, min(value, max_))
#
# def lerp(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation
#
# Return a value between start and stop at the requested percentage
#
# :param start: Range start
# :param end: Range end
# :param amount: Position in range (0.0 - 1.0)
#
# :return: The interpolated value
# """
# return start + (end - start) * amount
#
# def lerp_degrees(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation between angles
#
# :param start: Range start angle in degrees
# :param end: Range end angle in degrees
# :param amount: Angle in range (0.0 - 1.0)
#
# :return: The interpolated angle in degrees
# """
# start_r = math.radians(start)
# end_r = math.radians(end)
# delta = math.atan2(math.sin(end_r - start_r), math.cos(end_r - start_r))
# return (math.degrees(start_r + delta * amount) + 360.0) % 360.0
. Output only the next line. | colorarg = autocast_decorator(ColorType, to_color) |
Based on the snippet: <|code_start|>
def rgb_from_tuple(arg: tuple) -> Color:
"""
Convert a 3-tuple of ints or floats to a Grapefruit color
:param arg: The RGB tuple to convert
:return: The Color object
"""
if len(arg) >= 3:
if arg[0] is None:
return Color.NewFromRgb(0, 0, 0)
if all(isinstance(n, int) for n in arg):
return Color.NewFromRgb(*Color.IntTupleToRgb(arg))
if all(isinstance(n, float) for n in arg):
return Color.NewFromRgb(*arg)
raise TypeError('Unable to convert %s (%s) to color' % (arg, type(arg[0])))
def rgb_to_int_tuple(arg: tuple) -> tuple:
"""
Convert/sanitize a 3-tuple of ints or floats
:param arg: Tuple of RGB values
:return: Tuple of RGB ints
"""
if len(arg) >= 3:
<|code_end|>
, predict the immediate next line with the help of imports:
import itertools
import math
import random
import re
import numpy as np
from enum import Enum
from typing import Iterable, List, Union
from grapefruit import Color
from hsluv import hsluv_to_rgb, rgb_to_hsluv
from skimage.util import dtype
from uchroma.util import autocast_decorator, clamp, lerp, lerp_degrees
and context (classes, functions, sometimes code) from other files:
# Path: uchroma/util.py
# def autocast_decorator(type_hint, fix_arg_func):
# """
# Decorator which will invoke fix_arg_func for any
# arguments annotated with type_hint. The decorated
# function will then be called with the result.
#
# :param type_hint: A PEP484 type hint
# :param fix_arg_func: Function to invoke
#
# :return: decorator
# """
# @decorator
# def wrapper(wrapped, instance, args, kwargs):
# hinted_args = names = None
# cache_key = '%s-%s-%s' % (wrapped.__class__.__name__,
# wrapped.__name__, str(type_hint))
#
# if cache_key in AUTOCAST_CACHE:
# hinted_args, names = AUTOCAST_CACHE[cache_key]
# else:
# sig = inspect.signature(wrapped)
# names = list(sig.parameters.keys())
# hinted_args = [x[0] for x in typing.get_type_hints(wrapped).items() \
# if x[1] == type_hint or x[1] == typing.Union[type_hint, None]]
# AUTOCAST_CACHE[cache_key] = hinted_args, names
#
# if len(hinted_args) == 0:
# raise ValueError("No arguments with %s hint found" % type_hint)
#
# new_args = list(args)
# for hinted_arg in hinted_args:
# if hinted_arg in kwargs:
# kwargs[hinted_arg] = fix_arg_func(kwargs[hinted_arg])
#
# elif hinted_arg in names:
# idx = names.index(hinted_arg)
# if idx < len(new_args):
# new_args[idx] = fix_arg_func(new_args[idx])
#
# return wrapped(*new_args, **kwargs)
#
# return wrapper
#
# def clamp(value, min_, max_):
# """
# Constrain a value to the specified range
#
# :param value: Input value
# :param min_: Range minimum
# :param max_: Range maximum
#
# :return: The constrained value
# """
# return max(min_, min(value, max_))
#
# def lerp(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation
#
# Return a value between start and stop at the requested percentage
#
# :param start: Range start
# :param end: Range end
# :param amount: Position in range (0.0 - 1.0)
#
# :return: The interpolated value
# """
# return start + (end - start) * amount
#
# def lerp_degrees(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation between angles
#
# :param start: Range start angle in degrees
# :param end: Range end angle in degrees
# :param amount: Angle in range (0.0 - 1.0)
#
# :return: The interpolated angle in degrees
# """
# start_r = math.radians(start)
# end_r = math.radians(end)
# delta = math.atan2(math.sin(end_r - start_r), math.cos(end_r - start_r))
# return (math.degrees(start_r + delta * amount) + 360.0) % 360.0
. Output only the next line. | return tuple([clamp(round(x), 0, 255) for x in arg[:3]]) |
Continue the code snippet: <|code_start|> return [to_rgb(item) for item in arg]
return rgb_to_int_tuple(arg)
raise TypeError('Unable to parse color from \'%s\' (%s)' % (arg, type(arg)))
"""
Decorator to parse various color representations
Invokes to_color on any arguments listed in decls. This will cause
the listed arguments to be resolved to grapefruit.Color objects from
the various different representations that might be in use.
Example:
@colorarg
def frobizzle(self, speed, color1: ColorType=None, color2: ColorType=None)
"""
colorarg = autocast_decorator(ColorType, to_color)
class ColorUtils(object):
"""
Various helpers and utilities for working with colors
"""
@staticmethod
def _circular_interp(start, end, amount: float) -> tuple:
h = lerp_degrees(start[0], end[0], amount)
<|code_end|>
. Use current file imports:
import itertools
import math
import random
import re
import numpy as np
from enum import Enum
from typing import Iterable, List, Union
from grapefruit import Color
from hsluv import hsluv_to_rgb, rgb_to_hsluv
from skimage.util import dtype
from uchroma.util import autocast_decorator, clamp, lerp, lerp_degrees
and context (classes, functions, or code) from other files:
# Path: uchroma/util.py
# def autocast_decorator(type_hint, fix_arg_func):
# """
# Decorator which will invoke fix_arg_func for any
# arguments annotated with type_hint. The decorated
# function will then be called with the result.
#
# :param type_hint: A PEP484 type hint
# :param fix_arg_func: Function to invoke
#
# :return: decorator
# """
# @decorator
# def wrapper(wrapped, instance, args, kwargs):
# hinted_args = names = None
# cache_key = '%s-%s-%s' % (wrapped.__class__.__name__,
# wrapped.__name__, str(type_hint))
#
# if cache_key in AUTOCAST_CACHE:
# hinted_args, names = AUTOCAST_CACHE[cache_key]
# else:
# sig = inspect.signature(wrapped)
# names = list(sig.parameters.keys())
# hinted_args = [x[0] for x in typing.get_type_hints(wrapped).items() \
# if x[1] == type_hint or x[1] == typing.Union[type_hint, None]]
# AUTOCAST_CACHE[cache_key] = hinted_args, names
#
# if len(hinted_args) == 0:
# raise ValueError("No arguments with %s hint found" % type_hint)
#
# new_args = list(args)
# for hinted_arg in hinted_args:
# if hinted_arg in kwargs:
# kwargs[hinted_arg] = fix_arg_func(kwargs[hinted_arg])
#
# elif hinted_arg in names:
# idx = names.index(hinted_arg)
# if idx < len(new_args):
# new_args[idx] = fix_arg_func(new_args[idx])
#
# return wrapped(*new_args, **kwargs)
#
# return wrapper
#
# def clamp(value, min_, max_):
# """
# Constrain a value to the specified range
#
# :param value: Input value
# :param min_: Range minimum
# :param max_: Range maximum
#
# :return: The constrained value
# """
# return max(min_, min(value, max_))
#
# def lerp(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation
#
# Return a value between start and stop at the requested percentage
#
# :param start: Range start
# :param end: Range end
# :param amount: Position in range (0.0 - 1.0)
#
# :return: The interpolated value
# """
# return start + (end - start) * amount
#
# def lerp_degrees(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation between angles
#
# :param start: Range start angle in degrees
# :param end: Range end angle in degrees
# :param amount: Angle in range (0.0 - 1.0)
#
# :return: The interpolated angle in degrees
# """
# start_r = math.radians(start)
# end_r = math.radians(end)
# delta = math.atan2(math.sin(end_r - start_r), math.cos(end_r - start_r))
# return (math.degrees(start_r + delta * amount) + 360.0) % 360.0
. Output only the next line. | s = lerp(start[1], end[1], amount) |
Based on the snippet: <|code_start|> or isinstance(arg[0], str) or isinstance(arg[0], Color):
return [to_rgb(item) for item in arg]
return rgb_to_int_tuple(arg)
raise TypeError('Unable to parse color from \'%s\' (%s)' % (arg, type(arg)))
"""
Decorator to parse various color representations
Invokes to_color on any arguments listed in decls. This will cause
the listed arguments to be resolved to grapefruit.Color objects from
the various different representations that might be in use.
Example:
@colorarg
def frobizzle(self, speed, color1: ColorType=None, color2: ColorType=None)
"""
colorarg = autocast_decorator(ColorType, to_color)
class ColorUtils(object):
"""
Various helpers and utilities for working with colors
"""
@staticmethod
def _circular_interp(start, end, amount: float) -> tuple:
<|code_end|>
, predict the immediate next line with the help of imports:
import itertools
import math
import random
import re
import numpy as np
from enum import Enum
from typing import Iterable, List, Union
from grapefruit import Color
from hsluv import hsluv_to_rgb, rgb_to_hsluv
from skimage.util import dtype
from uchroma.util import autocast_decorator, clamp, lerp, lerp_degrees
and context (classes, functions, sometimes code) from other files:
# Path: uchroma/util.py
# def autocast_decorator(type_hint, fix_arg_func):
# """
# Decorator which will invoke fix_arg_func for any
# arguments annotated with type_hint. The decorated
# function will then be called with the result.
#
# :param type_hint: A PEP484 type hint
# :param fix_arg_func: Function to invoke
#
# :return: decorator
# """
# @decorator
# def wrapper(wrapped, instance, args, kwargs):
# hinted_args = names = None
# cache_key = '%s-%s-%s' % (wrapped.__class__.__name__,
# wrapped.__name__, str(type_hint))
#
# if cache_key in AUTOCAST_CACHE:
# hinted_args, names = AUTOCAST_CACHE[cache_key]
# else:
# sig = inspect.signature(wrapped)
# names = list(sig.parameters.keys())
# hinted_args = [x[0] for x in typing.get_type_hints(wrapped).items() \
# if x[1] == type_hint or x[1] == typing.Union[type_hint, None]]
# AUTOCAST_CACHE[cache_key] = hinted_args, names
#
# if len(hinted_args) == 0:
# raise ValueError("No arguments with %s hint found" % type_hint)
#
# new_args = list(args)
# for hinted_arg in hinted_args:
# if hinted_arg in kwargs:
# kwargs[hinted_arg] = fix_arg_func(kwargs[hinted_arg])
#
# elif hinted_arg in names:
# idx = names.index(hinted_arg)
# if idx < len(new_args):
# new_args[idx] = fix_arg_func(new_args[idx])
#
# return wrapped(*new_args, **kwargs)
#
# return wrapper
#
# def clamp(value, min_, max_):
# """
# Constrain a value to the specified range
#
# :param value: Input value
# :param min_: Range minimum
# :param max_: Range maximum
#
# :return: The constrained value
# """
# return max(min_, min(value, max_))
#
# def lerp(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation
#
# Return a value between start and stop at the requested percentage
#
# :param start: Range start
# :param end: Range end
# :param amount: Position in range (0.0 - 1.0)
#
# :return: The interpolated value
# """
# return start + (end - start) * amount
#
# def lerp_degrees(start: float, end: float, amount: float) -> float:
# """
# Linear interpolation between angles
#
# :param start: Range start angle in degrees
# :param end: Range end angle in degrees
# :param amount: Angle in range (0.0 - 1.0)
#
# :return: The interpolated angle in degrees
# """
# start_r = math.radians(start)
# end_r = math.radians(end)
# delta = math.atan2(math.sin(end_r - start_r), math.cos(end_r - start_r))
# return (math.degrees(start_r + delta * amount) + 360.0) % 360.0
. Output only the next line. | h = lerp_degrees(start[0], end[0], amount) |
Given the following code snippet before the placeholder: <|code_start|>GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
test_gwf.py
~~~~~~~~~~~
Tests to ensure that gwf works so we avoid all issues we've ran into
in the past.
"""
class MockProcess(object):
def __init__(self, stdout, stderr, rc):
self.stdout = stdout
self.stderr = stderr
self.returncode = rc
def communicate(self):
return self.stdout, self.stderr
def test_perform_command_and_validate_on_success():
stdout = "foo"
stderr = ""
proc = MockProcess(stdout, stderr, 0)
<|code_end|>
, predict the next line using imports from the current file:
import datetime
from mock import Mock, patch
from nose.tools import assert_raises, eq_
from raspi.scripts import gwf
and context including class names, function names, and sometimes code from other files:
# Path: raspi/scripts/gwf.py
# RANDOM_DELAY_PARAM = 300
# SYNC_TIMEOUT = 5
# ENV_TYPE = "prod"
# GET_SERIAL_FAILURE_WAIT = 60
# NETWORKING_RESTART_WAIT = 30
# def get_process(args):
# def perform_command_and_validate(process, err_msg):
# def check_if_wifi_available():
# def check_if_synced():
# def perform_ntp_sync():
# def execute_get_serial(env_type):
# def main():
. Output only the next line. | out, err = gwf.perform_command_and_validate(proc, "Shouldn't happen") |
Given snippet: <|code_start|>MOCK_SERIAL_RESPONSE = "line"
vent_disc_iterator = iter([1, 1] + [0] * 10000)
def test_get_configuration_from_main():
with patch("raspi.scripts.get_serial.open") as mock_open:
with patch("raspi.scripts.get_serial.serial") as mock_serial:
with patch("raspi.scripts.get_serial.retrieve_data") as mock_retriever:
with NamedTemporaryFile() as tmp:
with open(tmp.name, "r+w") as mock_config_file:
mock_config_file.write(MOCK_CONFIG_FILE)
mock_config_file.flush()
mock_config_file.seek(0)
mock_open.return_value = mock_config_file
mock_serial_reader = MagicMock()
mock_serial.Serial.return_value = mock_serial_reader
mock_serial_reader.inWaiting.return_value = 0
get_serial("testing")
eq_(mock_serial_reader.inWaiting.call_count, 1)
eq_(mock_retriever.call_count, 0)
def test_get_configuration_unit():
with patch("raspi.scripts.get_serial.open") as mock_open:
with NamedTemporaryFile() as tmp:
with open(tmp.name, "r+w") as mock_config_file:
mock_config_file.write(MOCK_CONFIG_FILE)
mock_config_file.flush()
mock_config_file.seek(0)
mock_open.return_value = mock_config_file
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from copy import copy
from tempfile import NamedTemporaryFile
from mock import MagicMock, patch
from nose.tools import assert_dict_equal, eq_
from raspi.scripts.get_serial import get_configuration, get_serial
and context:
# Path: raspi/scripts/get_serial.py
# def get_configuration(filename):
# config_dict = {}
# with open(filename, "r") as file_:
# parser = ConfigParser.ConfigParser()
# parser.readfp(file_)
# config_dict["baudrate"] = parser.getint("config", "baudrate")
# config_dict["tty_file"] = parser.get("config", "tty_file")
# config_dict["data_path"] = parser.get("config", "data_path")
# config_dict["refresh_rate"] = parser.getint("config", "refresh_rate")
# return config_dict
#
# def get_serial(config_type):
# try:
# config_path = {
# "prod": "/etc/b2c/script_config",
# "testing": "/etc/b2c/script_testing_config",
# "testing_bg": "/etc/b2c/script_testing_config"
# }[config_type]
# except KeyError:
# raise Exception("Configuration type not valid. Choose one of {}".format(config_path.keys()))
# config = get_configuration(config_path)
# # open port, set baudrate
# ser = serial.Serial(port=config["tty_file"], baudrate=config["baudrate"])
# flush_serial_buffers(ser)
# while True:
# # Wait until we have something to collect. If not sleep momentarily
# if ser.inWaiting() == 0:
# time.sleep(0.2)
# continue
# now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S.%f")
# filename = str(socket.gethostname() + "-" + now)
# filename = os.path.join(os.path.abspath(config["data_path"]), filename)
# filename = filename + ".csv"
# retrieve_data(filename, config, ser)
# flush_serial_buffers(ser) # not too sure why we want to have this behavior but ok
# if config_type == "testing":
# break
which might include code, classes, or functions. Output only the next line. | config_dict = get_configuration("foo/bar") |
Next line prediction: <|code_start|> "tty_file": "foo/bar",
"data_path": "pi/mock/data",
"refresh_rate": 0.01,
"vent_disconnect_tolerance": 0.01,
}
MOCK_CONFIG_FILE = """
[config]
baudrate=38400
tty_file=foo/bar
data_path=pi/mock/data
refresh_rate=1000
vent_disconnect_tolerance=1
"""
MOCK_SERIAL_RESPONSE = "line"
vent_disc_iterator = iter([1, 1] + [0] * 10000)
def test_get_configuration_from_main():
with patch("raspi.scripts.get_serial.open") as mock_open:
with patch("raspi.scripts.get_serial.serial") as mock_serial:
with patch("raspi.scripts.get_serial.retrieve_data") as mock_retriever:
with NamedTemporaryFile() as tmp:
with open(tmp.name, "r+w") as mock_config_file:
mock_config_file.write(MOCK_CONFIG_FILE)
mock_config_file.flush()
mock_config_file.seek(0)
mock_open.return_value = mock_config_file
mock_serial_reader = MagicMock()
mock_serial.Serial.return_value = mock_serial_reader
mock_serial_reader.inWaiting.return_value = 0
<|code_end|>
. Use current file imports:
(from copy import copy
from tempfile import NamedTemporaryFile
from mock import MagicMock, patch
from nose.tools import assert_dict_equal, eq_
from raspi.scripts.get_serial import get_configuration, get_serial)
and context including class names, function names, or small code snippets from other files:
# Path: raspi/scripts/get_serial.py
# def get_configuration(filename):
# config_dict = {}
# with open(filename, "r") as file_:
# parser = ConfigParser.ConfigParser()
# parser.readfp(file_)
# config_dict["baudrate"] = parser.getint("config", "baudrate")
# config_dict["tty_file"] = parser.get("config", "tty_file")
# config_dict["data_path"] = parser.get("config", "data_path")
# config_dict["refresh_rate"] = parser.getint("config", "refresh_rate")
# return config_dict
#
# def get_serial(config_type):
# try:
# config_path = {
# "prod": "/etc/b2c/script_config",
# "testing": "/etc/b2c/script_testing_config",
# "testing_bg": "/etc/b2c/script_testing_config"
# }[config_type]
# except KeyError:
# raise Exception("Configuration type not valid. Choose one of {}".format(config_path.keys()))
# config = get_configuration(config_path)
# # open port, set baudrate
# ser = serial.Serial(port=config["tty_file"], baudrate=config["baudrate"])
# flush_serial_buffers(ser)
# while True:
# # Wait until we have something to collect. If not sleep momentarily
# if ser.inWaiting() == 0:
# time.sleep(0.2)
# continue
# now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S.%f")
# filename = str(socket.gethostname() + "-" + now)
# filename = os.path.join(os.path.abspath(config["data_path"]), filename)
# filename = filename + ".csv"
# retrieve_data(filename, config, ser)
# flush_serial_buffers(ser) # not too sure why we want to have this behavior but ok
# if config_type == "testing":
# break
. Output only the next line. | get_serial("testing") |
Predict the next line for this snippet: <|code_start|>
register = template.Library()
@register.filter
def as_html(invoice):
template = loader.get_template('invoicing/formatters/html.html')
<|code_end|>
with the help of current file imports:
from django import template
from django.template import loader, Context
from ..formatters.html import HTMLFormatter
and context from other files:
# Path: invoicing/formatters/html.py
# class HTMLFormatter(InvoiceFormatter):
# template_name = 'invoicing/formatters/html.html'
#
# def get_data(self):
# return {
# "invoice": self.invoice,
# "INVOICING_DATE_FORMAT_TAG": "d.m.Y" # TODO: move to settings
# }
#
# def get_response(self, context={}):
# template = loader.get_template(self.template_name)
# data = self.get_data()
# data.update(context)
#
# try:
# response_data = template.render(Context(data))
# except TypeError:
# response_data = template.render(data)
#
# return HttpResponse(response_data)
, which may contain function names, class names, or code. Output only the next line. | formatter = HTMLFormatter(invoice) |
Here is a snippet: <|code_start|>
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
def _test_load_store_instrument(source_lsdsng, lsdinst_path, original_index):
proj = load_lsdsng(source_lsdsng)
proj.song.instruments.import_from_file(0x2a, lsdinst_path)
target_instr = proj.song.instruments[0x2a]
original_instr = proj.song.instruments[original_index]
assert_equal(original_instr, target_instr)
<|code_end|>
. Write the next line using the current file imports:
import os
import json
from nose.tools import assert_equal
from .project import load_lsdsng
from .utils import temporary_file
and context from other files:
# Path: pylsdj/project.py
# def load_lsdsng(filename):
# """Load a Project from a ``.lsdsng`` file.
#
# :param filename: the name of the file from which to load
# :rtype: :py:class:`pylsdj.Project`
# """
#
# # Load preamble data so that we know the name and version of the song
# with open(filename, 'rb') as fp:
# preamble_data = bread.parse(fp, spec.lsdsng_preamble)
#
# with open(filename, 'rb') as fp:
# # Skip the preamble this time around
# fp.seek(int(len(preamble_data) / 8))
#
# # Load compressed data into a block map and use BlockReader to
# # decompress it
# factory = BlockFactory()
#
# while True:
# block_data = bytearray(fp.read(blockutils.BLOCK_SIZE))
#
# if len(block_data) == 0:
# break
#
# block = factory.new_block()
# block.data = block_data
#
# remapped_blocks = filepack.renumber_block_keys(factory.blocks)
#
# reader = BlockReader()
# compressed_data = reader.read(remapped_blocks)
#
# # Now, decompress the raw data and use it and the preamble to construct
# # a Project
# raw_data = filepack.decompress(compressed_data)
#
# name = preamble_data.name
# version = preamble_data.version
# size_blks = int(math.ceil(
# float(len(compressed_data)) / blockutils.BLOCK_SIZE))
#
# return Project(name, version, size_blks, raw_data)
#
# Path: pylsdj/utils.py
# class temporary_file:
# def __enter__(self):
# (tmp_handle, tmp_abspath) = tempfile.mkstemp()
# os.close(tmp_handle)
# self.abspath = tmp_abspath
# return self.abspath
#
# def __exit__(self, t, value, traceback):
# if hasattr(self, 'abspath') and self.abspath is not None:
# os.unlink(self.abspath)
, which may include functions, classes, or code. Output only the next line. | with temporary_file() as tmpfile: |
Based on the snippet: <|code_start|> def command(self):
"""the effect's command"""
return self._params.fx[self._table_index][self._fx_index]
@command.setter
def command(self, value):
self._params.fx[self._table_index][self._fx_index] = value
@property
def value(self):
"""the command's parameter"""
return self._params.val[self._table_index][self._fx_index]
@value.setter
def value(self, val):
self._params.val[self._table_index][self._fx_index] = val
def __eq__(self, other):
return hasattr(other, '_params') and self._params == other._params
class Table(object):
"""Each table is a sequence of transposes, commands, and amplitude
changes that can be applied to any channel."""
def __init__(self, song, index):
self._song = song
self._index = index
self._fx1 = [TableFX(self._song.song_data.table_cmd1, self._index, i)
<|code_end|>
, predict the immediate next line with the help of imports:
from .bread_spec import STEPS_PER_TABLE
from .vendor.six.moves import range
and context (classes, functions, sometimes code) from other files:
# Path: pylsdj/bread_spec.py
# STEPS_PER_TABLE = 16
. Output only the next line. | for i in range(STEPS_PER_TABLE)] |
Based on the snippet: <|code_start|> def name(self):
"""the instrument's name (5 characters, zero-padded)"""
instr_name = self.song.song_data.instrument_names[self.index]
if type(instr_name) == bytes:
instr_name = instr_name.decode('utf-8')
return instr_name
@name.setter
def name(self, val):
if type(val) != bytes:
val = val.encode('utf-8')
self.song.song_data.instrument_names[self.index] = val
@property
def type(self):
"""the instrument's type (``pulse``, ``wave``, ``kit`` or ``noise``)"""
return self.data.instrument_type
@type.setter
def type(self, value):
self.data.instrument_type = value
@property
def table(self):
"""a ```pylsdj.Table``` referencing the instrument's table, or None
if the instrument doesn't have a table"""
if hasattr(self.data, 'table_on') and self.data.table_on:
<|code_end|>
, predict the immediate next line with the help of imports:
from .utils import assert_index_sane
import json
and context (classes, functions, sometimes code) from other files:
# Path: pylsdj/utils.py
# def assert_index_sane(index, upper_bound_exclusive):
# assert type(index) == int, "Indices should be integers; '%s' is not" % (
# index)
# assert 0 <= index < upper_bound_exclusive, (
# "Index %d out of range [%d, %d)" % (index, 0, upper_bound_exclusive))
. Output only the next line. | assert_index_sane(self.data.table, len(self.song.tables)) |
Predict the next line after this snippet: <|code_start|>
EMPTY_SAMPLE_NAME = '\0--'
# WAVs are 8 bits/sample PCM-encoded @ 11468 Hz
# These are parameters passed to Wave_write objects during writing
WAVE_NUM_CHANNELS = 1
WAVE_SAMPLE_WIDTH = 1
WAVE_FRAMERATE = 11468
WAVE_PARAMS = (WAVE_NUM_CHANNELS, WAVE_SAMPLE_WIDTH, WAVE_FRAMERATE, 0,
'NONE', 'not compressed')
class Kits(object):
"""A wrapper for an LSDJ ROM's kits"""
def __init__(self, rom_file):
"""Load kits from the provided LSDJ ROM
:param rom_file: path to the LSDJ ROM to load
"""
with open(rom_file, 'rb') as fp:
<|code_end|>
using the current file's imports:
import wave
import bread as b
from .vendor.six.moves import range
from .bread_spec import lsdj_rom_kits, KIT_SAMPLE_NAME_LENGTH, \
SAMPLES_PER_KIT, KIT_NAME_LENGTH, SAMPLE_START_ADDRESS, MAX_SAMPLE_LENGTH
from .utils import fixed_width_string
and any relevant context from other files:
# Path: pylsdj/bread_spec.py
# def padded_hex(pad_count):
# def hex_array(x):
# EMPTY_BLOCK = 0xff
# NUM_PHRASES = 255
# NUM_TABLES = 32
# NUM_SYNTHS = 16
# WAVES_PER_SYNTH = 16
# FRAMES_PER_WAVE = 32
# ENTRIES_PER_TABLE = 16
# NUM_INSTRUMENTS = 64
# NUM_SONG_CHAINS = 256
# NUM_CHAINS = 128
# PHRASES_PER_CHAIN = 16
# NUM_GROOVES = 32
# STEPS_PER_PHRASE = 16
# STEPS_PER_GROOVE = 16
# STEPS_PER_TABLE = 16
# WORD_LENGTH = 0x10
# NUM_WORDS = 42
# INSTRUMENT_TYPE_CODE = {
# "pulse": 0,
# "wave": 1,
# "kit": 2,
# "noise": 3
# }
# FX_COMMANDS = {
# 0: '-',
# 1: 'A',
# 2: 'C',
# 3: 'D',
# 4: 'E',
# 5: 'F',
# 6: 'G',
# 7: 'H',
# 8: 'K',
# 9: 'L',
# 10: 'M',
# 11: 'O',
# 12: 'P',
# 13: 'R',
# 14: 'S',
# 15: 'T',
# 16: 'V',
# 17: 'W',
# 18: 'Z',
# # Arduinoboy-specific
# 19: 'N',
# 20: 'X',
# 21: 'Q',
# 22: 'Y'
# }
# NOTES = ['---']
# NOTES_DICT = {}
# NUM_ROM_KITS = 21
# SAMPLES_PER_KIT = 15
# KIT_SAMPLE_NAME_LENGTH = 3
# KIT_NAME_LENGTH = 6
# SAMPLE_START_ADDRESS = 0x4060
# MAX_SAMPLE_LENGTH = 0x3fa0
#
# Path: pylsdj/utils.py
# def fixed_width_string(string, width, fill=' '):
# return string[:width].ljust(fill)
. Output only the next line. | self._data = b.parse(fp, lsdj_rom_kits) |
Given the following code snippet before the placeholder: <|code_start|> else:
sample_start, sample_end = self._get_sample_data_bounds(index)
return (sample_end - sample_start + 1)
def _get_sample_data(self, index):
if not self._sample_used(index):
return None
sample_start, sample_end = self._get_sample_data_bounds(index)
return bytearray(self._data.sample_data[sample_start:sample_end])
@property
def force_loop(self):
"""true if the sample will loop, false otherwise"""
return self._data.force_loop[self.force_loop_index]
@force_loop.setter
def force_loop(self, value):
self._data.force_loop[self.force_loop_index] = value
@property
def name(self):
"""the sample's name"""
return self._data.sample_names[self.index]
@name.setter
def name(self, value):
self._data.sample_names[self.index] = fixed_width_string(
<|code_end|>
, predict the next line using imports from the current file:
import wave
import bread as b
from .vendor.six.moves import range
from .bread_spec import lsdj_rom_kits, KIT_SAMPLE_NAME_LENGTH, \
SAMPLES_PER_KIT, KIT_NAME_LENGTH, SAMPLE_START_ADDRESS, MAX_SAMPLE_LENGTH
from .utils import fixed_width_string
and context including class names, function names, and sometimes code from other files:
# Path: pylsdj/bread_spec.py
# def padded_hex(pad_count):
# def hex_array(x):
# EMPTY_BLOCK = 0xff
# NUM_PHRASES = 255
# NUM_TABLES = 32
# NUM_SYNTHS = 16
# WAVES_PER_SYNTH = 16
# FRAMES_PER_WAVE = 32
# ENTRIES_PER_TABLE = 16
# NUM_INSTRUMENTS = 64
# NUM_SONG_CHAINS = 256
# NUM_CHAINS = 128
# PHRASES_PER_CHAIN = 16
# NUM_GROOVES = 32
# STEPS_PER_PHRASE = 16
# STEPS_PER_GROOVE = 16
# STEPS_PER_TABLE = 16
# WORD_LENGTH = 0x10
# NUM_WORDS = 42
# INSTRUMENT_TYPE_CODE = {
# "pulse": 0,
# "wave": 1,
# "kit": 2,
# "noise": 3
# }
# FX_COMMANDS = {
# 0: '-',
# 1: 'A',
# 2: 'C',
# 3: 'D',
# 4: 'E',
# 5: 'F',
# 6: 'G',
# 7: 'H',
# 8: 'K',
# 9: 'L',
# 10: 'M',
# 11: 'O',
# 12: 'P',
# 13: 'R',
# 14: 'S',
# 15: 'T',
# 16: 'V',
# 17: 'W',
# 18: 'Z',
# # Arduinoboy-specific
# 19: 'N',
# 20: 'X',
# 21: 'Q',
# 22: 'Y'
# }
# NOTES = ['---']
# NOTES_DICT = {}
# NUM_ROM_KITS = 21
# SAMPLES_PER_KIT = 15
# KIT_SAMPLE_NAME_LENGTH = 3
# KIT_NAME_LENGTH = 6
# SAMPLE_START_ADDRESS = 0x4060
# MAX_SAMPLE_LENGTH = 0x3fa0
#
# Path: pylsdj/utils.py
# def fixed_width_string(string, width, fill=' '):
# return string[:width].ljust(fill)
. Output only the next line. | value, KIT_SAMPLE_NAME_LENGTH, '-') |
Continue the code snippet: <|code_start|> with open(rom_file, 'rb') as fp:
self._data = b.parse(fp, lsdj_rom_kits)
# Don't include the last four kits in the kit list, which are reserved
# for the speech synthesizer
self._kits = list(map(lambda k: Kit(k), self._data.kits[:-4]))
def __getitem__(self, i):
return self._kits[i]
def __str__(self):
return '\n'.join(map(str, self._kits))
def __iter__(self):
for kit in self._kits:
yield kit
class Kit(object):
"""An individual sample kit"""
def __init__(self, data):
self._data = data
if self._data.magic_number != [0x60, 0x40]:
raise Exception(
'Expected magic number to be 0x60, 0x40, '
'but was %s' % (', '.join(map(hex, self._data.magic_number))))
self._samples = list(map(lambda i: KitSample(self._data, i),
<|code_end|>
. Use current file imports:
import wave
import bread as b
from .vendor.six.moves import range
from .bread_spec import lsdj_rom_kits, KIT_SAMPLE_NAME_LENGTH, \
SAMPLES_PER_KIT, KIT_NAME_LENGTH, SAMPLE_START_ADDRESS, MAX_SAMPLE_LENGTH
from .utils import fixed_width_string
and context (classes, functions, or code) from other files:
# Path: pylsdj/bread_spec.py
# def padded_hex(pad_count):
# def hex_array(x):
# EMPTY_BLOCK = 0xff
# NUM_PHRASES = 255
# NUM_TABLES = 32
# NUM_SYNTHS = 16
# WAVES_PER_SYNTH = 16
# FRAMES_PER_WAVE = 32
# ENTRIES_PER_TABLE = 16
# NUM_INSTRUMENTS = 64
# NUM_SONG_CHAINS = 256
# NUM_CHAINS = 128
# PHRASES_PER_CHAIN = 16
# NUM_GROOVES = 32
# STEPS_PER_PHRASE = 16
# STEPS_PER_GROOVE = 16
# STEPS_PER_TABLE = 16
# WORD_LENGTH = 0x10
# NUM_WORDS = 42
# INSTRUMENT_TYPE_CODE = {
# "pulse": 0,
# "wave": 1,
# "kit": 2,
# "noise": 3
# }
# FX_COMMANDS = {
# 0: '-',
# 1: 'A',
# 2: 'C',
# 3: 'D',
# 4: 'E',
# 5: 'F',
# 6: 'G',
# 7: 'H',
# 8: 'K',
# 9: 'L',
# 10: 'M',
# 11: 'O',
# 12: 'P',
# 13: 'R',
# 14: 'S',
# 15: 'T',
# 16: 'V',
# 17: 'W',
# 18: 'Z',
# # Arduinoboy-specific
# 19: 'N',
# 20: 'X',
# 21: 'Q',
# 22: 'Y'
# }
# NOTES = ['---']
# NOTES_DICT = {}
# NUM_ROM_KITS = 21
# SAMPLES_PER_KIT = 15
# KIT_SAMPLE_NAME_LENGTH = 3
# KIT_NAME_LENGTH = 6
# SAMPLE_START_ADDRESS = 0x4060
# MAX_SAMPLE_LENGTH = 0x3fa0
#
# Path: pylsdj/utils.py
# def fixed_width_string(string, width, fill=' '):
# return string[:width].ljust(fill)
. Output only the next line. | range(SAMPLES_PER_KIT))) |
Here is a snippet: <|code_start|>
def __str__(self):
return '\n'.join(map(str, self._kits))
def __iter__(self):
for kit in self._kits:
yield kit
class Kit(object):
"""An individual sample kit"""
def __init__(self, data):
self._data = data
if self._data.magic_number != [0x60, 0x40]:
raise Exception(
'Expected magic number to be 0x60, 0x40, '
'but was %s' % (', '.join(map(hex, self._data.magic_number))))
self._samples = list(map(lambda i: KitSample(self._data, i),
range(SAMPLES_PER_KIT)))
@property
def name(self):
"""the kit's name"""
return self._data.kit_name
@name.setter
def name(self, value):
<|code_end|>
. Write the next line using the current file imports:
import wave
import bread as b
from .vendor.six.moves import range
from .bread_spec import lsdj_rom_kits, KIT_SAMPLE_NAME_LENGTH, \
SAMPLES_PER_KIT, KIT_NAME_LENGTH, SAMPLE_START_ADDRESS, MAX_SAMPLE_LENGTH
from .utils import fixed_width_string
and context from other files:
# Path: pylsdj/bread_spec.py
# def padded_hex(pad_count):
# def hex_array(x):
# EMPTY_BLOCK = 0xff
# NUM_PHRASES = 255
# NUM_TABLES = 32
# NUM_SYNTHS = 16
# WAVES_PER_SYNTH = 16
# FRAMES_PER_WAVE = 32
# ENTRIES_PER_TABLE = 16
# NUM_INSTRUMENTS = 64
# NUM_SONG_CHAINS = 256
# NUM_CHAINS = 128
# PHRASES_PER_CHAIN = 16
# NUM_GROOVES = 32
# STEPS_PER_PHRASE = 16
# STEPS_PER_GROOVE = 16
# STEPS_PER_TABLE = 16
# WORD_LENGTH = 0x10
# NUM_WORDS = 42
# INSTRUMENT_TYPE_CODE = {
# "pulse": 0,
# "wave": 1,
# "kit": 2,
# "noise": 3
# }
# FX_COMMANDS = {
# 0: '-',
# 1: 'A',
# 2: 'C',
# 3: 'D',
# 4: 'E',
# 5: 'F',
# 6: 'G',
# 7: 'H',
# 8: 'K',
# 9: 'L',
# 10: 'M',
# 11: 'O',
# 12: 'P',
# 13: 'R',
# 14: 'S',
# 15: 'T',
# 16: 'V',
# 17: 'W',
# 18: 'Z',
# # Arduinoboy-specific
# 19: 'N',
# 20: 'X',
# 21: 'Q',
# 22: 'Y'
# }
# NOTES = ['---']
# NOTES_DICT = {}
# NUM_ROM_KITS = 21
# SAMPLES_PER_KIT = 15
# KIT_SAMPLE_NAME_LENGTH = 3
# KIT_NAME_LENGTH = 6
# SAMPLE_START_ADDRESS = 0x4060
# MAX_SAMPLE_LENGTH = 0x3fa0
#
# Path: pylsdj/utils.py
# def fixed_width_string(string, width, fill=' '):
# return string[:width].ljust(fill)
, which may include functions, classes, or code. Output only the next line. | self._data.kit_name = fixed_width_string(value, KIT_NAME_LENGTH) |
Using the snippet: <|code_start|>
# Because of data layout, indices for bits are
#
# 7, 6, 5, ..., 0, unused, 14, 13, ..., 8
#
# so the indexing logic is a little funky.
if self.index < 8:
self.force_loop_index = 7 - self.index
else:
self.force_loop_index = 15 - (self.index - 8)
def _sample_used(self, index):
return (self._data.sample_ends[index] > 0)
def _get_sample_data_bounds(self, index=None, sample_ends=None):
if index is None:
index = self.index
if sample_ends is None:
sample_ends = self._data.sample_ends
if index == 0:
sample_start = 0
sample_end = sample_ends[0]
else:
sample_start = sample_ends[index - 1]
sample_end = sample_ends[index]
# Sample end addresses are relative to the start of the kit's sample memory
<|code_end|>
, determine the next line of code. You have imports:
import wave
import bread as b
from .vendor.six.moves import range
from .bread_spec import lsdj_rom_kits, KIT_SAMPLE_NAME_LENGTH, \
SAMPLES_PER_KIT, KIT_NAME_LENGTH, SAMPLE_START_ADDRESS, MAX_SAMPLE_LENGTH
from .utils import fixed_width_string
and context (class names, function names, or code) available:
# Path: pylsdj/bread_spec.py
# def padded_hex(pad_count):
# def hex_array(x):
# EMPTY_BLOCK = 0xff
# NUM_PHRASES = 255
# NUM_TABLES = 32
# NUM_SYNTHS = 16
# WAVES_PER_SYNTH = 16
# FRAMES_PER_WAVE = 32
# ENTRIES_PER_TABLE = 16
# NUM_INSTRUMENTS = 64
# NUM_SONG_CHAINS = 256
# NUM_CHAINS = 128
# PHRASES_PER_CHAIN = 16
# NUM_GROOVES = 32
# STEPS_PER_PHRASE = 16
# STEPS_PER_GROOVE = 16
# STEPS_PER_TABLE = 16
# WORD_LENGTH = 0x10
# NUM_WORDS = 42
# INSTRUMENT_TYPE_CODE = {
# "pulse": 0,
# "wave": 1,
# "kit": 2,
# "noise": 3
# }
# FX_COMMANDS = {
# 0: '-',
# 1: 'A',
# 2: 'C',
# 3: 'D',
# 4: 'E',
# 5: 'F',
# 6: 'G',
# 7: 'H',
# 8: 'K',
# 9: 'L',
# 10: 'M',
# 11: 'O',
# 12: 'P',
# 13: 'R',
# 14: 'S',
# 15: 'T',
# 16: 'V',
# 17: 'W',
# 18: 'Z',
# # Arduinoboy-specific
# 19: 'N',
# 20: 'X',
# 21: 'Q',
# 22: 'Y'
# }
# NOTES = ['---']
# NOTES_DICT = {}
# NUM_ROM_KITS = 21
# SAMPLES_PER_KIT = 15
# KIT_SAMPLE_NAME_LENGTH = 3
# KIT_NAME_LENGTH = 6
# SAMPLE_START_ADDRESS = 0x4060
# MAX_SAMPLE_LENGTH = 0x3fa0
#
# Path: pylsdj/utils.py
# def fixed_width_string(string, width, fill=' '):
# return string[:width].ljust(fill)
. Output only the next line. | sample_start = sample_start - SAMPLE_START_ADDRESS |
Given the code snippet: <|code_start|>
@sample_data.setter
def sample_data(self, sample_data):
# For simplicity, we'll just pack samples into their new locations and
# overwrite the sample memory for the kit.
new_sample_ends = []
new_sample_data = []
for i in range(SAMPLES_PER_KIT):
if not self._sample_used(i) and i != self.index:
# We've found the first unused sample; since samples are
# presumed to be contiguous, this means we're done moving
# samples
break
if i == self.index:
new_sample_data.extend(sample_data)
else:
current_sample_data = self._get_sample_data(i)
if current_sample_data is not None:
new_sample_data.extend(current_sample_data)
new_sample_ends.append(int(len(new_sample_data) / 2))
if len(new_sample_ends) < SAMPLES_PER_KIT:
new_sample_ends.extend([0] * (SAMPLES_PER_KIT - len(new_sample_ends)))
<|code_end|>
, generate the next line using the imports in this file:
import wave
import bread as b
from .vendor.six.moves import range
from .bread_spec import lsdj_rom_kits, KIT_SAMPLE_NAME_LENGTH, \
SAMPLES_PER_KIT, KIT_NAME_LENGTH, SAMPLE_START_ADDRESS, MAX_SAMPLE_LENGTH
from .utils import fixed_width_string
and context (functions, classes, or occasionally code) from other files:
# Path: pylsdj/bread_spec.py
# def padded_hex(pad_count):
# def hex_array(x):
# EMPTY_BLOCK = 0xff
# NUM_PHRASES = 255
# NUM_TABLES = 32
# NUM_SYNTHS = 16
# WAVES_PER_SYNTH = 16
# FRAMES_PER_WAVE = 32
# ENTRIES_PER_TABLE = 16
# NUM_INSTRUMENTS = 64
# NUM_SONG_CHAINS = 256
# NUM_CHAINS = 128
# PHRASES_PER_CHAIN = 16
# NUM_GROOVES = 32
# STEPS_PER_PHRASE = 16
# STEPS_PER_GROOVE = 16
# STEPS_PER_TABLE = 16
# WORD_LENGTH = 0x10
# NUM_WORDS = 42
# INSTRUMENT_TYPE_CODE = {
# "pulse": 0,
# "wave": 1,
# "kit": 2,
# "noise": 3
# }
# FX_COMMANDS = {
# 0: '-',
# 1: 'A',
# 2: 'C',
# 3: 'D',
# 4: 'E',
# 5: 'F',
# 6: 'G',
# 7: 'H',
# 8: 'K',
# 9: 'L',
# 10: 'M',
# 11: 'O',
# 12: 'P',
# 13: 'R',
# 14: 'S',
# 15: 'T',
# 16: 'V',
# 17: 'W',
# 18: 'Z',
# # Arduinoboy-specific
# 19: 'N',
# 20: 'X',
# 21: 'Q',
# 22: 'Y'
# }
# NOTES = ['---']
# NOTES_DICT = {}
# NUM_ROM_KITS = 21
# SAMPLES_PER_KIT = 15
# KIT_SAMPLE_NAME_LENGTH = 3
# KIT_NAME_LENGTH = 6
# SAMPLE_START_ADDRESS = 0x4060
# MAX_SAMPLE_LENGTH = 0x3fa0
#
# Path: pylsdj/utils.py
# def fixed_width_string(string, width, fill=' '):
# return string[:width].ljust(fill)
. Output only the next line. | if len(new_sample_data) < MAX_SAMPLE_LENGTH * 2: |
Given the code snippet: <|code_start|>
def __str__(self):
return '\n'.join(map(str, self._kits))
def __iter__(self):
for kit in self._kits:
yield kit
class Kit(object):
"""An individual sample kit"""
def __init__(self, data):
self._data = data
if self._data.magic_number != [0x60, 0x40]:
raise Exception(
'Expected magic number to be 0x60, 0x40, '
'but was %s' % (', '.join(map(hex, self._data.magic_number))))
self._samples = list(map(lambda i: KitSample(self._data, i),
range(SAMPLES_PER_KIT)))
@property
def name(self):
"""the kit's name"""
return self._data.kit_name
@name.setter
def name(self, value):
<|code_end|>
, generate the next line using the imports in this file:
import wave
import bread as b
from .vendor.six.moves import range
from .bread_spec import lsdj_rom_kits, KIT_SAMPLE_NAME_LENGTH, \
SAMPLES_PER_KIT, KIT_NAME_LENGTH, SAMPLE_START_ADDRESS, MAX_SAMPLE_LENGTH
from .utils import fixed_width_string
and context (functions, classes, or occasionally code) from other files:
# Path: pylsdj/bread_spec.py
# def padded_hex(pad_count):
# def hex_array(x):
# EMPTY_BLOCK = 0xff
# NUM_PHRASES = 255
# NUM_TABLES = 32
# NUM_SYNTHS = 16
# WAVES_PER_SYNTH = 16
# FRAMES_PER_WAVE = 32
# ENTRIES_PER_TABLE = 16
# NUM_INSTRUMENTS = 64
# NUM_SONG_CHAINS = 256
# NUM_CHAINS = 128
# PHRASES_PER_CHAIN = 16
# NUM_GROOVES = 32
# STEPS_PER_PHRASE = 16
# STEPS_PER_GROOVE = 16
# STEPS_PER_TABLE = 16
# WORD_LENGTH = 0x10
# NUM_WORDS = 42
# INSTRUMENT_TYPE_CODE = {
# "pulse": 0,
# "wave": 1,
# "kit": 2,
# "noise": 3
# }
# FX_COMMANDS = {
# 0: '-',
# 1: 'A',
# 2: 'C',
# 3: 'D',
# 4: 'E',
# 5: 'F',
# 6: 'G',
# 7: 'H',
# 8: 'K',
# 9: 'L',
# 10: 'M',
# 11: 'O',
# 12: 'P',
# 13: 'R',
# 14: 'S',
# 15: 'T',
# 16: 'V',
# 17: 'W',
# 18: 'Z',
# # Arduinoboy-specific
# 19: 'N',
# 20: 'X',
# 21: 'Q',
# 22: 'Y'
# }
# NOTES = ['---']
# NOTES_DICT = {}
# NUM_ROM_KITS = 21
# SAMPLES_PER_KIT = 15
# KIT_SAMPLE_NAME_LENGTH = 3
# KIT_NAME_LENGTH = 6
# SAMPLE_START_ADDRESS = 0x4060
# MAX_SAMPLE_LENGTH = 0x3fa0
#
# Path: pylsdj/utils.py
# def fixed_width_string(string, width, fill=' '):
# return string[:width].ljust(fill)
. Output only the next line. | self._data.kit_name = fixed_width_string(value, KIT_NAME_LENGTH) |
Predict the next line for this snippet: <|code_start|> @phase_amount.setter
def phase_amount(self, value):
self._params.phase_amount = value
self._overwrite_lock.disable()
@property
def vertical_shift(self):
"""the amount to shift the waveform vertically"""
return self._params.vertical_shift
@vertical_shift.setter
def vertical_shift(self, value):
self._params.vertical_shift = value
self._overwrite_lock.disable()
class WaveFrames(object):
def __init__(self, song, synth_index, wave_index, overwrite_lock):
self._frames = song.song_data.wave_frames[synth_index][wave_index]
self._overwrite_lock = overwrite_lock
def __getitem__(self, index):
return self._frames[index]
def __setitem__(self, index, value):
self._frames[index] = value
self._overwrite_lock.enable()
class Waves(object):
def __init__(self, song, synth_index, overwrite_lock):
self._waves = [WaveFrames(song, synth_index, wave_index, overwrite_lock)
<|code_end|>
with the help of current file imports:
import json
from .bread_spec import NUM_SYNTHS, WAVES_PER_SYNTH
and context from other files:
# Path: pylsdj/bread_spec.py
# NUM_SYNTHS = 16
#
# WAVES_PER_SYNTH = 16
, which may contain function names, class names, or code. Output only the next line. | for wave_index in range(WAVES_PER_SYNTH)] |
Predict the next line for this snippet: <|code_start|>
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
def test_wave_synth_overwrite_locks():
test_project = os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng')
<|code_end|>
with the help of current file imports:
import os
from nose.tools import assert_false, assert_true
from .project import load_lsdsng
and context from other files:
# Path: pylsdj/project.py
# def load_lsdsng(filename):
# """Load a Project from a ``.lsdsng`` file.
#
# :param filename: the name of the file from which to load
# :rtype: :py:class:`pylsdj.Project`
# """
#
# # Load preamble data so that we know the name and version of the song
# with open(filename, 'rb') as fp:
# preamble_data = bread.parse(fp, spec.lsdsng_preamble)
#
# with open(filename, 'rb') as fp:
# # Skip the preamble this time around
# fp.seek(int(len(preamble_data) / 8))
#
# # Load compressed data into a block map and use BlockReader to
# # decompress it
# factory = BlockFactory()
#
# while True:
# block_data = bytearray(fp.read(blockutils.BLOCK_SIZE))
#
# if len(block_data) == 0:
# break
#
# block = factory.new_block()
# block.data = block_data
#
# remapped_blocks = filepack.renumber_block_keys(factory.blocks)
#
# reader = BlockReader()
# compressed_data = reader.read(remapped_blocks)
#
# # Now, decompress the raw data and use it and the preamble to construct
# # a Project
# raw_data = filepack.decompress(compressed_data)
#
# name = preamble_data.name
# version = preamble_data.version
# size_blks = int(math.ceil(
# float(len(compressed_data)) / blockutils.BLOCK_SIZE))
#
# return Project(name, version, size_blks, raw_data)
, which may contain function names, class names, or code. Output only the next line. | project = load_lsdsng(test_project) |
Given snippet: <|code_start|>
class Chain(object):
"""A chain is a sequence of phrases for a single channel. Each phrase can be
transposed by a number of semitones.
"""
def __init__(self, song, index):
self.song = song
self.index = index
self.transposes = self.song.song_data.chain_transposes[index]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .utils import ObjectLookupDict
and context:
# Path: pylsdj/utils.py
# class ObjectLookupDict(object):
#
# def __init__(self, id_list, object_list):
# self.id_list = id_list
# self.object_list = object_list
#
# def __getitem__(self, index):
# assert_index_sane(index, len(self.id_list))
#
# return self.object_list[self.id_list[index]]
#
# def __setitem__(self, index, value):
# assert_index_sane(index, len(self.id_list))
#
# self.id_list[index] = value.index
which might include code, classes, or functions. Output only the next line. | self.phrases = ObjectLookupDict( |
Given snippet: <|code_start|>
class EnvelopeMixin(object):
@property
def envelope(self):
"""the noise instrument's volume envelope (8-bit integer)"""
return self.data.envelope
@envelope.setter
def envelope(self, value):
self.data.envelope = value
@staticmethod
def import_lsdinst(obj, struct_data):
obj.envelope = struct_data['data']['envelope']
@staticmethod
def equal(a, b):
return (isinstance(a, EnvelopeMixin) and isinstance(b, EnvelopeMixin)
and a.envelope == b.envelope)
class VibratoMixin(object):
@property
def vibrato(self):
"""instrument's vibrato settings"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .vibrato import Vibrato
and context:
# Path: pylsdj/vibrato.py
# class Vibrato(object):
#
# def __init__(self, data):
# self._data = data
#
# @property
# def type(self):
# """ ``hf`` (for high frequency sine), ``sawtooth``,
# ``saw`` or ``square``"""
# return self._data.type
#
# @type.setter
# def type(self, value):
# self._data.type = value
#
# @property
# def direction(self):
# """'down' or 'up'"""
# return self._data.direction
#
# @direction.setter
# def direction(self, value):
# self._data.direction = value
#
# def import_lsdinst(self, struct_data):
# self.direction = struct_data['data']['vibrato']['direction']
# self.type = struct_data['data']['vibrato']['type']
#
# def __eq__(self, other):
# return (isinstance(other, Vibrato)
# and self.type == other.type
# and self.direction == other.direction)
which might include code, classes, or functions. Output only the next line. | return Vibrato(self.data.vibrato) |
Continue the code snippet: <|code_start|>
def test_kit_used():
test_rom = os.path.join(os.path.dirname(__file__), "test_data",
"lsdj_onlykits.gb")
<|code_end|>
. Use current file imports:
import os
import tempfile
from nose.tools import assert_equal, raises
from .kits import Kits
from .utils import name_without_zeroes
and context (classes, functions, or code) from other files:
# Path: pylsdj/kits.py
# class Kits(object):
# """A wrapper for an LSDJ ROM's kits"""
#
# def __init__(self, rom_file):
# """Load kits from the provided LSDJ ROM
#
# :param rom_file: path to the LSDJ ROM to load
# """
# with open(rom_file, 'rb') as fp:
# self._data = b.parse(fp, lsdj_rom_kits)
#
# # Don't include the last four kits in the kit list, which are reserved
# # for the speech synthesizer
# self._kits = list(map(lambda k: Kit(k), self._data.kits[:-4]))
#
# def __getitem__(self, i):
# return self._kits[i]
#
# def __str__(self):
# return '\n'.join(map(str, self._kits))
#
# def __iter__(self):
# for kit in self._kits:
# yield kit
#
# Path: pylsdj/utils.py
# def name_without_zeroes(name):
# """
# Return a human-readable name without LSDJ's trailing zeroes.
#
# :param name: the name from which to strip zeroes
# :rtype: the name, without trailing zeroes
# """
# first_zero = name.find(b'\0')
#
# if first_zero == -1:
# return name
# else:
# return str(name[:first_zero])
. Output only the next line. | kits = Kits(test_rom) |
Continue the code snippet: <|code_start|> sigma_position : float
Parameter of the Generic String Kernel controlling the penalty incurred when two n-grams are not sharing the
same position.
max_n_gram_count : int
The number of n-grams in the training string of highest length.
feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
Sparse matrix representation of the n-grams in each training string, where n_samples is the number of training
samples.
"""
def __init__(self, alphabet, n, Y, sigma_position, is_normalized):
"""Create the output feature space for the Generic String kernel
Parameters
----------
alphabet : list
list of letters
n : int
n-gram length
Y : array, [n_samples, ]
The training strings.
sigma_position : float
Parameter of the Generic String Kernel controlling the penalty incurred when two n-grams are not sharing the
same position.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
"""
self.n = int(n)
self.sigma_position = sigma_position
self._alphabet_n_gram_count = len(alphabet) ** n
<|code_end|>
. Use current file imports:
import numpy
from preimage.features.string_feature_space import build_feature_space_with_positions
from preimage.utils.position import compute_position_weights
from preimage.kernels.generic_string import element_wise_kernel
and context (classes, functions, or code) from other files:
# Path: preimage/features/string_feature_space.py
# def build_feature_space_with_positions(alphabet, n, Y):
# """Create the feature space by considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# list of letters
# n : int
# n-gram length
# Y : array, [n_samples, ]
# The training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
# Sparse matrix representation of the n-grams in each string of Y, where n_samples is the number of training
# samples and max_n_gram_count is the number of n-gram in the highest length string of Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_pointers_indexes_data_with_positions(index_pointers, indexes, data, n, n_gram_to_index, Y)
# n_columns = __get_n_columns(n, len(n_gram_to_index), Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, n_columns)
# return feature_space
#
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# Path: preimage/kernels/generic_string.py
# def element_wise_kernel(X, sigma_position, n, alphabet):
# """Compute the similarity of each string in X with itself in the Generic String kernel.
#
# Takes only in account the position penalties and the n-gram of length n. No n-gram penalties (no sigma_c).
#
# Parameters
# ----------
# X : array, shape = [n_samples]
# Strings, where n_samples is the number of examples in X.
# sigma_position : float
# Controls the penalty incurred when two n-grams are not sharing the same position.
# n : int
# N-gram length.
# alphabet : list
# List of letters.
#
# Returns
# -------
# kernel : array, shape = [n_samples]
# Similarity of each string with itself in the GS kernel, where n_samples is the number of examples in X.
# """
# X = numpy.array(X)
# x_lengths = numpy.array([len(x) for x in X], dtype=numpy.int64)
# max_length = numpy.max(x_lengths) - n + 1
# position_matrix = compute_position_weights_matrix(max_length, sigma_position)
# X_int = transform_strings_to_integer_lists(X, alphabet)
# kernel = element_wise_generic_string_kernel(X_int, x_lengths, position_matrix, n)
# return kernel
. Output only the next line. | self.feature_space = build_feature_space_with_positions(alphabet, self.n, Y) |
Given snippet: <|code_start|> def _get_n_gram_count_in_each_y(self, n, Y):
y_n_gram_counts = numpy.array([len(y) - n + 1 for y in Y])
return y_n_gram_counts
def compute_weights(self, y_weights, y_length):
"""Compute the inference graph weights
Parameters
----------
y_weights : array, [n_samples]
Weight of each training example.
y_length : int
Length of the string to predict.
Returns
-------
gs_weights : [len(alphabet)**n, y_n_gram_count * len(alphabet)**n]
Weight of each n-gram at each position.
"""
y_n_gram_count = y_length - self.n + 1
data_copy = numpy.copy(self.feature_space.data)
self.feature_space.data *= self._repeat_each_y_weight_by_y_column_count(y_weights)
weighted_degree_weights = numpy.array(self.feature_space.sum(axis=0))[0].reshape(self.max_n_gram_count, -1)
self.feature_space.data = data_copy
gs_weights = self._transform_in_gs_weights(y_n_gram_count, weighted_degree_weights)
return gs_weights
def _transform_in_gs_weights(self, y_n_gram_count, weighted_degree_weights):
gs_weights = numpy.empty((y_n_gram_count, self._alphabet_n_gram_count))
for i in range(y_n_gram_count):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy
from preimage.features.string_feature_space import build_feature_space_with_positions
from preimage.utils.position import compute_position_weights
from preimage.kernels.generic_string import element_wise_kernel
and context:
# Path: preimage/features/string_feature_space.py
# def build_feature_space_with_positions(alphabet, n, Y):
# """Create the feature space by considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# list of letters
# n : int
# n-gram length
# Y : array, [n_samples, ]
# The training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
# Sparse matrix representation of the n-grams in each string of Y, where n_samples is the number of training
# samples and max_n_gram_count is the number of n-gram in the highest length string of Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_pointers_indexes_data_with_positions(index_pointers, indexes, data, n, n_gram_to_index, Y)
# n_columns = __get_n_columns(n, len(n_gram_to_index), Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, n_columns)
# return feature_space
#
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# Path: preimage/kernels/generic_string.py
# def element_wise_kernel(X, sigma_position, n, alphabet):
# """Compute the similarity of each string in X with itself in the Generic String kernel.
#
# Takes only in account the position penalties and the n-gram of length n. No n-gram penalties (no sigma_c).
#
# Parameters
# ----------
# X : array, shape = [n_samples]
# Strings, where n_samples is the number of examples in X.
# sigma_position : float
# Controls the penalty incurred when two n-grams are not sharing the same position.
# n : int
# N-gram length.
# alphabet : list
# List of letters.
#
# Returns
# -------
# kernel : array, shape = [n_samples]
# Similarity of each string with itself in the GS kernel, where n_samples is the number of examples in X.
# """
# X = numpy.array(X)
# x_lengths = numpy.array([len(x) for x in X], dtype=numpy.int64)
# max_length = numpy.max(x_lengths) - n + 1
# position_matrix = compute_position_weights_matrix(max_length, sigma_position)
# X_int = transform_strings_to_integer_lists(X, alphabet)
# kernel = element_wise_generic_string_kernel(X_int, x_lengths, position_matrix, n)
# return kernel
which might include code, classes, or functions. Output only the next line. | position_weights = compute_position_weights(i, self.max_n_gram_count, self.sigma_position).reshape(-1, 1) |
Predict the next line for this snippet: <|code_start|> """Create the output feature space for the Generic String kernel
Parameters
----------
alphabet : list
list of letters
n : int
n-gram length
Y : array, [n_samples, ]
The training strings.
sigma_position : float
Parameter of the Generic String Kernel controlling the penalty incurred when two n-grams are not sharing the
same position.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
"""
self.n = int(n)
self.sigma_position = sigma_position
self._alphabet_n_gram_count = len(alphabet) ** n
self.feature_space = build_feature_space_with_positions(alphabet, self.n, Y)
self.max_n_gram_count = self._get_max_n_gram_count(self._alphabet_n_gram_count, self.feature_space)
self._normalize(self.feature_space, self.n, Y, sigma_position, is_normalized, alphabet)
def _get_max_n_gram_count(self, alphabet_n_gram_count, feature_space):
n_columns = feature_space.shape[1]
max_n_gram_count = int(n_columns / alphabet_n_gram_count)
return max_n_gram_count
def _normalize(self, feature_space, n, Y, sigma_position, is_normalized, alphabet):
if is_normalized:
<|code_end|>
with the help of current file imports:
import numpy
from preimage.features.string_feature_space import build_feature_space_with_positions
from preimage.utils.position import compute_position_weights
from preimage.kernels.generic_string import element_wise_kernel
and context from other files:
# Path: preimage/features/string_feature_space.py
# def build_feature_space_with_positions(alphabet, n, Y):
# """Create the feature space by considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# list of letters
# n : int
# n-gram length
# Y : array, [n_samples, ]
# The training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
# Sparse matrix representation of the n-grams in each string of Y, where n_samples is the number of training
# samples and max_n_gram_count is the number of n-gram in the highest length string of Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_pointers_indexes_data_with_positions(index_pointers, indexes, data, n, n_gram_to_index, Y)
# n_columns = __get_n_columns(n, len(n_gram_to_index), Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, n_columns)
# return feature_space
#
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# Path: preimage/kernels/generic_string.py
# def element_wise_kernel(X, sigma_position, n, alphabet):
# """Compute the similarity of each string in X with itself in the Generic String kernel.
#
# Takes only in account the position penalties and the n-gram of length n. No n-gram penalties (no sigma_c).
#
# Parameters
# ----------
# X : array, shape = [n_samples]
# Strings, where n_samples is the number of examples in X.
# sigma_position : float
# Controls the penalty incurred when two n-grams are not sharing the same position.
# n : int
# N-gram length.
# alphabet : list
# List of letters.
#
# Returns
# -------
# kernel : array, shape = [n_samples]
# Similarity of each string with itself in the GS kernel, where n_samples is the number of examples in X.
# """
# X = numpy.array(X)
# x_lengths = numpy.array([len(x) for x in X], dtype=numpy.int64)
# max_length = numpy.max(x_lengths) - n + 1
# position_matrix = compute_position_weights_matrix(max_length, sigma_position)
# X_int = transform_strings_to_integer_lists(X, alphabet)
# kernel = element_wise_generic_string_kernel(X_int, x_lengths, position_matrix, n)
# return kernel
, which may contain function names, class names, or code. Output only the next line. | y_y_similarity = element_wise_kernel(Y, sigma_position, n, alphabet) |
Using the snippet: <|code_start|>__author__ = 'amelie'
class Alphabet:
latin = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
def get_n_gram_to_index(alphabet, n):
n_grams = get_n_grams(alphabet, n)
indexes = numpy.arange(len(n_grams))
n_gram_to_index = dict(zip(n_grams, indexes))
return n_gram_to_index
def get_index_to_n_gram(alphabet, n):
n_grams = get_n_grams(alphabet, n)
indexes = numpy.arange(len(n_grams))
index_to_n_gram = dict(zip(indexes, n_grams))
return index_to_n_gram
def get_n_grams(alphabet, n):
n = int(n)
if n <= 0:
<|code_end|>
, determine the next line of code. You have imports:
from itertools import product
from preimage.exceptions.n_gram import InvalidNGramLengthError
import numpy
and context (class names, function names, or code) available:
# Path: preimage/exceptions/n_gram.py
# class InvalidNGramLengthError(ValueError):
# def __init__(self, n, min_n=0):
# self.n = n
# self.min_n = min_n
#
# def __str__(self):
# error_message = 'n must be greater than {:d}. Got: n={:d}'.format(self.min_n, self.n)
# return error_message
. Output only the next line. | raise InvalidNGramLengthError(n) |
Continue the code snippet: <|code_start|>__author__ = 'amelie'
class TestPolynomialKernel(unittest2.TestCase):
def setUp(self):
self.X_one = [[1, 2]]
self.X_two = [[1, 0], [1, 3]]
self.gram_matrix_degree_one_x_one_x_one = [[5.]]
self.gram_matrix_degree_two_x_one_x_one = [[25.]]
self.gram_matrix_degree_one_bias_one_x_one_x_one = [[6.]]
self.gram_matrix_normalized_x_one_x_one = [[1.]]
self.gram_matrix_normalized_x_one_x_two = [[0.447213595, 0.989949494]]
def test_degree_one_x_one_x_one_polynomial_kernel_returns_expected_value(self):
<|code_end|>
. Use current file imports:
import unittest2
import numpy.testing
from preimage.kernels.polynomial import PolynomialKernel
and context (classes, functions, or code) from other files:
# Path: preimage/kernels/polynomial.py
# class PolynomialKernel(BaseEstimator):
# """Polynomial kernel.
#
# Attributes
# ----------
# degree : int
# Degree.
# bias : float
# Bias.
# is_normalized : bool
# True if the kernel should be normalized, False otherwise.
# """
# def __init__(self, degree=2, bias=1., is_normalized=True):
# self.degree = degree
# self.bias = bias
# self.is_normalized = is_normalized
#
# def __call__(self, X_one, X_two):
# """Compute the similarity of all the vectors in X1 with all the vectors in X2.
#
# Parameters
# ----------
# X1 : array, shape=[n_samples, n_features]
# Vectors, where n_samples is the number of samples in X1 and n_features is the number of features.
# X2 : array, shape=[n_samples, n_features]
# Vectors, where n_samples is the number of samples in X2 and n_features is the number of features.
#
# Returns
# -------
# gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
# Similarity of each vector of X1 with each vector of X2, where n_samples_x1 is the number of samples in X1
# and n_samples_x2 is the number of samples in X2.
# """
# X_one = numpy.array(X_one)
# X_two = numpy.array(X_two)
# gram_matrix = (numpy.dot(X_one, X_two.T) + self.bias) ** self.degree
# if self.is_normalized:
# gram_matrix = self._normalize_gram_matrix(X_one, X_two, gram_matrix)
# return gram_matrix
#
# def _normalize_gram_matrix(self, X_one, X_two, gram_matrix):
# x_one_diagonal = self._compute_element_wise_similarity(X_one)
# x_two_diagonal = self._compute_element_wise_similarity(X_two)
# gram_matrix = ((gram_matrix / numpy.sqrt(x_one_diagonal)).T / numpy.sqrt(x_two_diagonal)).T
# return gram_matrix
#
# def _compute_element_wise_similarity(self, X):
# x_x_similarity = ((X * X).sum(axis=1) + self.bias) ** self.degree
# x_x_similarity = x_x_similarity.reshape(-1, 1)
# return x_x_similarity
. Output only the next line. | kernel = PolynomialKernel(degree=1, bias=0, is_normalized=False) |
Based on the snippet: <|code_start|>
def get_gs_node_creator(n, graph, graph_weights, y_length, n_gram_to_index, n_grams, sigma_position):
"""Create the bounds and the node creator for the branch and bound search of the generic string kernel.
Only takes in account the position penalties when comparing strings, no n-gram similarity (no sigma_c).
Parameters
----------
n : int
N-gram length.
graph : array, shape = [n_partitions, len(alphabet)**n]
Array representation of the graph. graph[i, j] represents the maximum value of a string of length i + n ending
with the jth n-gram.
graph_weights : array, shape = [n_partitions, len(alphabet)**n]
Weight of each n-gram.
y_length : int
Length of the string to predict.
n_gram_to_index : dict
Dictionary of n-grams and their corresponding index.
n_grams : list
List of n-grams.
sigma_position : float
Parameter of the Generic String Kernel controlling the penalty incurred when two n-grams are not sharing the
same position.
Returns
-------
node_creator : NodeCreator
Node creator for the branch and bound search instantiated with the generic string bounds
"""
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy
from preimage.inference.bound_calculator import OCRMinBoundCalculator, MaxBoundCalculator, PeptideMinBoundCalculator
from preimage.inference.node_creator import NodeCreator
from preimage.utils.position import compute_position_weights
from preimage.utils.alphabet import get_n_gram_to_index, get_n_grams
and context (classes, functions, sometimes code) from other files:
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# Path: preimage/utils/alphabet.py
# def get_n_gram_to_index(alphabet, n):
# n_grams = get_n_grams(alphabet, n)
# indexes = numpy.arange(len(n_grams))
# n_gram_to_index = dict(zip(n_grams, indexes))
# return n_gram_to_index
#
# def get_n_grams(alphabet, n):
# n = int(n)
# if n <= 0:
# raise InvalidNGramLengthError(n)
# n_grams = [''.join(n_gram) for n_gram in product(alphabet, repeat=n)]
# return n_grams
. Output only the next line. | position_weights = compute_position_weights(0, y_length, sigma_position) |
Using the snippet: <|code_start|> return node_creator
def get_gs_similarity_node_creator(alphabet, n, graph, graph_weights, y_length, gs_kernel):
"""Create the bounds and the node creator for the branch and bound search of the generic string kernel.
Takes in account the position and the n-gram penalties when comparing strings (sigma_p and sigma_c in the
gs kernel).
Parameters
----------
alphabet : list
List of letters.
n : int
N-gram length.
graph : array, shape = [n_partitions, len(alphabet)**n]
Array representation of the graph. graph[i, j] represents the maximum value of a string of length i + n ending
with the jth n-gram.
graph_weights : array, shape = [n_partitions, len(alphabet)**n]
Weight of each n-gram.
y_length : int
Length of the string to predict.
gs_kernel : GenericStringKernel
Generic String Kernel with position and n-gram penalties.
Returns
-------
node_creator : NodeCreator
Node creator for the branch and bound search instantiated with the generic string bounds
"""
<|code_end|>
, determine the next line of code. You have imports:
import numpy
from preimage.inference.bound_calculator import OCRMinBoundCalculator, MaxBoundCalculator, PeptideMinBoundCalculator
from preimage.inference.node_creator import NodeCreator
from preimage.utils.position import compute_position_weights
from preimage.utils.alphabet import get_n_gram_to_index, get_n_grams
and context (class names, function names, or code) available:
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# Path: preimage/utils/alphabet.py
# def get_n_gram_to_index(alphabet, n):
# n_grams = get_n_grams(alphabet, n)
# indexes = numpy.arange(len(n_grams))
# n_gram_to_index = dict(zip(n_grams, indexes))
# return n_gram_to_index
#
# def get_n_grams(alphabet, n):
# n = int(n)
# if n <= 0:
# raise InvalidNGramLengthError(n)
# n_grams = [''.join(n_gram) for n_gram in product(alphabet, repeat=n)]
# return n_grams
. Output only the next line. | n_gram_to_index = get_n_gram_to_index(alphabet, n) |
Continue the code snippet: <|code_start|>
def get_gs_similarity_node_creator(alphabet, n, graph, graph_weights, y_length, gs_kernel):
"""Create the bounds and the node creator for the branch and bound search of the generic string kernel.
Takes in account the position and the n-gram penalties when comparing strings (sigma_p and sigma_c in the
gs kernel).
Parameters
----------
alphabet : list
List of letters.
n : int
N-gram length.
graph : array, shape = [n_partitions, len(alphabet)**n]
Array representation of the graph. graph[i, j] represents the maximum value of a string of length i + n ending
with the jth n-gram.
graph_weights : array, shape = [n_partitions, len(alphabet)**n]
Weight of each n-gram.
y_length : int
Length of the string to predict.
gs_kernel : GenericStringKernel
Generic String Kernel with position and n-gram penalties.
Returns
-------
node_creator : NodeCreator
Node creator for the branch and bound search instantiated with the generic string bounds
"""
n_gram_to_index = get_n_gram_to_index(alphabet, n)
letter_to_index = get_n_gram_to_index(alphabet, 1)
<|code_end|>
. Use current file imports:
import numpy
from preimage.inference.bound_calculator import OCRMinBoundCalculator, MaxBoundCalculator, PeptideMinBoundCalculator
from preimage.inference.node_creator import NodeCreator
from preimage.utils.position import compute_position_weights
from preimage.utils.alphabet import get_n_gram_to_index, get_n_grams
and context (classes, functions, or code) from other files:
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# Path: preimage/utils/alphabet.py
# def get_n_gram_to_index(alphabet, n):
# n_grams = get_n_grams(alphabet, n)
# indexes = numpy.arange(len(n_grams))
# n_gram_to_index = dict(zip(n_grams, indexes))
# return n_gram_to_index
#
# def get_n_grams(alphabet, n):
# n = int(n)
# if n <= 0:
# raise InvalidNGramLengthError(n)
# n_grams = [''.join(n_gram) for n_gram in product(alphabet, repeat=n)]
# return n_grams
. Output only the next line. | n_grams = get_n_grams(alphabet, n) |
Given the code snippet: <|code_start|>__author__ = 'amelie'
class TestStructuredKernelRidgeRegression(unittest2.TestCase):
def setUp(self):
self.setup_x_and_y()
self.setup_kernel()
self.setup_learners()
def setup_x_and_y(self):
self.X_train = [[1., 0], [0.5, 1]]
self.X_test = [[0, 0], [1, 0], [0, 1]]
self.Y_train = ['abc', 'ba']
self.Y_test = ['ab', 'baa', 'a']
self.y_test_lengths = [2, 3, 1]
self.y_train_lengths = [1, 2]
def setup_kernel(self):
self.gram_matrix = numpy.array([[1., 0.5], [0.5, 1.]])
self.gram_matrix_copy = numpy.array([[1., 0.5], [0.5, 1.]])
self.gram_matrix_inverse = [[1.33333333, -0.66666667], [-0.66666667, 1.33333333]]
self.gram_matrix_plus_one_half_diagonal_inverse = [[0.75, -0.25], [-0.25, 0.75]]
self.gram_matrix_x_train_x_test = [[0, 1, 0], [0, 0, 1]]
self.Y_weights = [[0, 0], [1.33333333, -0.66666667], [-0.66666667, 1.33333333]]
self.kernel_mock = Mock(side_effect=[self.gram_matrix, self.gram_matrix_x_train_x_test])
def setup_learners(self):
self.model_mock = Mock()
<|code_end|>
, generate the next line using the imports in this file:
import unittest2
import numpy
import numpy.testing
from mock import Mock
from preimage.learners.structured_krr import StructuredKernelRidgeRegression, InferenceFitParameters
and context (functions, classes, or occasionally code) from other files:
# Path: preimage/learners/structured_krr.py
# class StructuredKernelRidgeRegression(BaseEstimator):
# """Structured Kernel Ridge Regression.
#
# Attributes
# ----------
# alpha : float
# Regularization term.
# kernel : Callable
# Kernel function that computes the similarity between the samples.
# inference_model : Model
# Inference model used to solve the pre-image problem.
# weights_ : array, shape=[n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# X_train_ : array, shape=[n_samples, n_features]
# Training samples.
# """
# def __init__(self, alpha, kernel, inference_model):
# self.alpha = alpha
# self.kernel = kernel
# self.inference_model = inference_model
# self.weights_ = None
# self.X_train_ = None
#
# def fit(self, X, Y, y_lengths=None):
# """Learn the weights.
#
# Parameters
# ----------
# X : array, shape=[n_samples, n_features]
# Training vectors, where n_samples is the number of samples and and n_features is the number of features
# in X.
# Y : array, shape=[n_samples, ]
# Target strings, where n_samples is the number of training samples.
# y_lengths : array, shape=[n_samples]
# Length of the training strings.
#
# Returns
# -------
# gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
# Similarity of each string of X1 with each string of X2, n_samples_x1 is the number of samples in X1 and
# n_samples_x2 is the number of samples in X2.
# """
# gram_matrix = self.kernel(X, X)
# self.weights_ = self._solve(gram_matrix)
# self.X_train_ = X
# inference_parameters = InferenceFitParameters(self.weights_, gram_matrix, Y, y_lengths)
# self.inference_model.fit(inference_parameters)
# return self
#
# def _solve(self, gram_matrix):
# diagonal = numpy.copy(gram_matrix.diagonal())
# numpy.fill_diagonal(gram_matrix, diagonal + self.alpha)
# weights = linalg.inv(gram_matrix)
# numpy.fill_diagonal(gram_matrix, diagonal)
# return weights
#
# def predict(self, X, y_lengths=None):
# """Predict the target strings.
#
# Parameters
# ----------
# X : array, shape=[n_samples, n_features]
# Testing vectors, where n_samples is the number of samples and and n_features is the number of features
# in X.
# y_lengths : array, shape=[n_samples]
# Length of the strings to predict, where n_samples is the number of testing samples.
#
# Returns
# -------
# Y_predicted : array, shape = [n_samples]
# Predicted strings, where n_samples is the number of testing samples.
# """
# if self.weights_ is None:
# raise ValueError("The fit function must be called before predict")
# gram_matrix = self.kernel(self.X_train_, X)
# Y_weights = numpy.dot(self.weights_, gram_matrix).T
# Y_predicted = self.inference_model.predict(Y_weights, y_lengths)
# return Y_predicted
#
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
. Output only the next line. | self.structured_krr = StructuredKernelRidgeRegression(alpha=0, kernel=self.kernel_mock, |
Here is a snippet: <|code_start|> self.gram_matrix_plus_one_half_diagonal_inverse = [[0.75, -0.25], [-0.25, 0.75]]
self.gram_matrix_x_train_x_test = [[0, 1, 0], [0, 0, 1]]
self.Y_weights = [[0, 0], [1.33333333, -0.66666667], [-0.66666667, 1.33333333]]
self.kernel_mock = Mock(side_effect=[self.gram_matrix, self.gram_matrix_x_train_x_test])
def setup_learners(self):
self.model_mock = Mock()
self.structured_krr = StructuredKernelRidgeRegression(alpha=0, kernel=self.kernel_mock,
inference_model=self.model_mock)
def test_alpha_zero_structured_krr_fit_weights_is_gram_matrix_inverse(self):
self.structured_krr.fit(self.X_train, self.Y_train)
numpy.testing.assert_almost_equal(self.structured_krr.weights_, self.gram_matrix_inverse)
def test_alpha_one_half_structured_krr_fit_weights_is_inverse_of_gram_matrix_plus_one_half_diagonal(self):
structured_krr = StructuredKernelRidgeRegression(alpha=0.5, kernel=self.kernel_mock,
inference_model=self.model_mock)
structured_krr.fit(self.X_train, self.Y_train)
numpy.testing.assert_almost_equal(structured_krr.weights_, self.gram_matrix_plus_one_half_diagonal_inverse)
def test_structured_krr_fit_x_train_is_equal_to_x(self):
self.structured_krr.fit(self.X_train, self.Y_train)
numpy.testing.assert_almost_equal(self.structured_krr.X_train_, self.X_train)
def test_structured_krr_fit_creates_correct_inference_fit_parameters(self):
inference_parameter_mock = Mock(return_value=None)
<|code_end|>
. Write the next line using the current file imports:
import unittest2
import numpy
import numpy.testing
from mock import Mock
from preimage.learners.structured_krr import StructuredKernelRidgeRegression, InferenceFitParameters
and context from other files:
# Path: preimage/learners/structured_krr.py
# class StructuredKernelRidgeRegression(BaseEstimator):
# """Structured Kernel Ridge Regression.
#
# Attributes
# ----------
# alpha : float
# Regularization term.
# kernel : Callable
# Kernel function that computes the similarity between the samples.
# inference_model : Model
# Inference model used to solve the pre-image problem.
# weights_ : array, shape=[n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# X_train_ : array, shape=[n_samples, n_features]
# Training samples.
# """
# def __init__(self, alpha, kernel, inference_model):
# self.alpha = alpha
# self.kernel = kernel
# self.inference_model = inference_model
# self.weights_ = None
# self.X_train_ = None
#
# def fit(self, X, Y, y_lengths=None):
# """Learn the weights.
#
# Parameters
# ----------
# X : array, shape=[n_samples, n_features]
# Training vectors, where n_samples is the number of samples and and n_features is the number of features
# in X.
# Y : array, shape=[n_samples, ]
# Target strings, where n_samples is the number of training samples.
# y_lengths : array, shape=[n_samples]
# Length of the training strings.
#
# Returns
# -------
# gram_matrix : array, shape = [n_samples_x1, n_samples_x2]
# Similarity of each string of X1 with each string of X2, n_samples_x1 is the number of samples in X1 and
# n_samples_x2 is the number of samples in X2.
# """
# gram_matrix = self.kernel(X, X)
# self.weights_ = self._solve(gram_matrix)
# self.X_train_ = X
# inference_parameters = InferenceFitParameters(self.weights_, gram_matrix, Y, y_lengths)
# self.inference_model.fit(inference_parameters)
# return self
#
# def _solve(self, gram_matrix):
# diagonal = numpy.copy(gram_matrix.diagonal())
# numpy.fill_diagonal(gram_matrix, diagonal + self.alpha)
# weights = linalg.inv(gram_matrix)
# numpy.fill_diagonal(gram_matrix, diagonal)
# return weights
#
# def predict(self, X, y_lengths=None):
# """Predict the target strings.
#
# Parameters
# ----------
# X : array, shape=[n_samples, n_features]
# Testing vectors, where n_samples is the number of samples and and n_features is the number of features
# in X.
# y_lengths : array, shape=[n_samples]
# Length of the strings to predict, where n_samples is the number of testing samples.
#
# Returns
# -------
# Y_predicted : array, shape = [n_samples]
# Predicted strings, where n_samples is the number of testing samples.
# """
# if self.weights_ is None:
# raise ValueError("The fit function must be called before predict")
# gram_matrix = self.kernel(self.X_train_, X)
# Y_weights = numpy.dot(self.weights_, gram_matrix).T
# Y_predicted = self.inference_model.predict(Y_weights, y_lengths)
# return Y_predicted
#
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
, which may include functions, classes, or code. Output only the next line. | InferenceFitParameters.__init__ = inference_parameter_mock |
Predict the next line after this snippet: <|code_start|> self.setup_alphabet()
self.setup_feature_space()
self.setup_patch()
def setup_alphabet(self):
self.alphabet = ['a', 'b']
self.b = ['b']
self.abb = ['abb']
self.abaaa = ['abaaa']
self.abb_abaaa = self.abb + self.abaaa
def setup_feature_space(self):
self.feature_space_one_gram_b = [[0, 1]]
self.feature_space_one_gram_abb = [[1, 2]]
self.feature_space_two_gram_abaaa = [[2, 1, 1, 0]]
self.feature_space_two_gram_abb_abaaa = [[0, 1, 0, 1], [2, 1, 1, 0]]
self.weighted_degree_one_gram_abb = [[1, 0, 0, 1, 0, 1]]
self.weighted_degree_two_gram_abaaa = [[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0]]
self.weighted_degree_two_gram_abb_abaaa = [[0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0]]
def setup_patch(self):
self.one_gram_to_index = {'a': 0, 'b': 1}
self.two_gram_to_index = {'aa': 0, 'ab': 1, 'ba': 2, 'bb': 3}
self.two_gram_to_index_without_bb = {'aa': 0, 'ab': 1, 'ba': 2}
self.n_gram_to_index_patch = patch('preimage.features.string_feature_space.get_n_gram_to_index')
def test_one_gram_one_letter_y_n_gram_feature_space_has_one_n_gram(self):
self.n_gram_to_index_patch.start().return_value = self.one_gram_to_index
<|code_end|>
using the current file's imports:
import unittest2
import numpy.testing
from mock import patch
from preimage.features.string_feature_space import build_feature_space_without_positions
from preimage.features.string_feature_space import build_feature_space_with_positions
from preimage.exceptions.n_gram import InvalidNGramError
and any relevant context from other files:
# Path: preimage/features/string_feature_space.py
# def build_feature_space_without_positions(alphabet, n, Y):
# """Create the feature space without considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# List of letters.
# n : int
# N-gram length.
# Y : array, [n_samples, ]
# Training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, len(alphabet)**n]
# Sparse matrix representation of the n-grams in each training string, where n_samples is the number of samples
# in Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_y_pointers_indexes_data(index_pointers, indexes, data, n, n_gram_to_index, Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, len(n_gram_to_index))
# return feature_space
#
# Path: preimage/features/string_feature_space.py
# def build_feature_space_with_positions(alphabet, n, Y):
# """Create the feature space by considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# list of letters
# n : int
# n-gram length
# Y : array, [n_samples, ]
# The training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
# Sparse matrix representation of the n-grams in each string of Y, where n_samples is the number of training
# samples and max_n_gram_count is the number of n-gram in the highest length string of Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_pointers_indexes_data_with_positions(index_pointers, indexes, data, n, n_gram_to_index, Y)
# n_columns = __get_n_columns(n, len(n_gram_to_index), Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, n_columns)
# return feature_space
#
# Path: preimage/exceptions/n_gram.py
# class InvalidNGramError(ValueError):
# def __init__(self, n, n_gram):
# self.n = n
# self.n_gram = n_gram
#
# def __str__(self):
# error_message = "{} is not a possible {:d}_gram for this alphabet".format(self.n_gram, self.n)
# return error_message
. Output only the next line. | feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=1, Y=self.b) |
Given the code snippet: <|code_start|> def test_one_gram_three_letter_y_n_gram_feature_space_has_three_n_grams(self):
self.n_gram_to_index_patch.start().return_value = self.one_gram_to_index
feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=1, Y=self.abb)
numpy.testing.assert_array_equal(feature_space.toarray(), self.feature_space_one_gram_abb)
def test_two_gram_five_letter_y_n_gram_feature_space_has_four_two_grams(self):
self.n_gram_to_index_patch.start().return_value = self.two_gram_to_index
feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=2, Y=self.abaaa)
numpy.testing.assert_array_equal(feature_space.toarray(), self.feature_space_two_gram_abaaa)
def test_two_gram_two_y_n_gram_feature_space_builds_expected_feature_space(self):
self.n_gram_to_index_patch.start().return_value = self.two_gram_to_index
feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=2, Y=self.abb_abaaa)
numpy.testing.assert_array_equal(feature_space.toarray(), self.feature_space_two_gram_abb_abaaa)
def test_two_gram_not_in_alphabet_n_gram_feature_space_raises_error(self):
self.n_gram_to_index_patch.start().return_value = self.two_gram_to_index_without_bb
with self.assertRaises(InvalidNGramError):
build_feature_space_without_positions(alphabet=self.alphabet, n=2, Y=self.abb)
def test_one_gram_one_letter_y_weighted_degree_feature_space_has_one_n_gram(self):
self.n_gram_to_index_patch.start().return_value = self.one_gram_to_index
<|code_end|>
, generate the next line using the imports in this file:
import unittest2
import numpy.testing
from mock import patch
from preimage.features.string_feature_space import build_feature_space_without_positions
from preimage.features.string_feature_space import build_feature_space_with_positions
from preimage.exceptions.n_gram import InvalidNGramError
and context (functions, classes, or occasionally code) from other files:
# Path: preimage/features/string_feature_space.py
# def build_feature_space_without_positions(alphabet, n, Y):
# """Create the feature space without considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# List of letters.
# n : int
# N-gram length.
# Y : array, [n_samples, ]
# Training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, len(alphabet)**n]
# Sparse matrix representation of the n-grams in each training string, where n_samples is the number of samples
# in Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_y_pointers_indexes_data(index_pointers, indexes, data, n, n_gram_to_index, Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, len(n_gram_to_index))
# return feature_space
#
# Path: preimage/features/string_feature_space.py
# def build_feature_space_with_positions(alphabet, n, Y):
# """Create the feature space by considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# list of letters
# n : int
# n-gram length
# Y : array, [n_samples, ]
# The training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
# Sparse matrix representation of the n-grams in each string of Y, where n_samples is the number of training
# samples and max_n_gram_count is the number of n-gram in the highest length string of Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_pointers_indexes_data_with_positions(index_pointers, indexes, data, n, n_gram_to_index, Y)
# n_columns = __get_n_columns(n, len(n_gram_to_index), Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, n_columns)
# return feature_space
#
# Path: preimage/exceptions/n_gram.py
# class InvalidNGramError(ValueError):
# def __init__(self, n, n_gram):
# self.n = n
# self.n_gram = n_gram
#
# def __str__(self):
# error_message = "{} is not a possible {:d}_gram for this alphabet".format(self.n_gram, self.n)
# return error_message
. Output only the next line. | feature_space = build_feature_space_with_positions(n=1, alphabet=self.alphabet, Y=self.b) |
Using the snippet: <|code_start|> self.n_gram_to_index_patch.start().return_value = self.one_gram_to_index
feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=1, Y=self.b)
numpy.testing.assert_array_equal(feature_space.toarray(), self.feature_space_one_gram_b)
def test_one_gram_three_letter_y_n_gram_feature_space_has_three_n_grams(self):
self.n_gram_to_index_patch.start().return_value = self.one_gram_to_index
feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=1, Y=self.abb)
numpy.testing.assert_array_equal(feature_space.toarray(), self.feature_space_one_gram_abb)
def test_two_gram_five_letter_y_n_gram_feature_space_has_four_two_grams(self):
self.n_gram_to_index_patch.start().return_value = self.two_gram_to_index
feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=2, Y=self.abaaa)
numpy.testing.assert_array_equal(feature_space.toarray(), self.feature_space_two_gram_abaaa)
def test_two_gram_two_y_n_gram_feature_space_builds_expected_feature_space(self):
self.n_gram_to_index_patch.start().return_value = self.two_gram_to_index
feature_space = build_feature_space_without_positions(alphabet=self.alphabet, n=2, Y=self.abb_abaaa)
numpy.testing.assert_array_equal(feature_space.toarray(), self.feature_space_two_gram_abb_abaaa)
def test_two_gram_not_in_alphabet_n_gram_feature_space_raises_error(self):
self.n_gram_to_index_patch.start().return_value = self.two_gram_to_index_without_bb
<|code_end|>
, determine the next line of code. You have imports:
import unittest2
import numpy.testing
from mock import patch
from preimage.features.string_feature_space import build_feature_space_without_positions
from preimage.features.string_feature_space import build_feature_space_with_positions
from preimage.exceptions.n_gram import InvalidNGramError
and context (class names, function names, or code) available:
# Path: preimage/features/string_feature_space.py
# def build_feature_space_without_positions(alphabet, n, Y):
# """Create the feature space without considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# List of letters.
# n : int
# N-gram length.
# Y : array, [n_samples, ]
# Training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, len(alphabet)**n]
# Sparse matrix representation of the n-grams in each training string, where n_samples is the number of samples
# in Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_y_pointers_indexes_data(index_pointers, indexes, data, n, n_gram_to_index, Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, len(n_gram_to_index))
# return feature_space
#
# Path: preimage/features/string_feature_space.py
# def build_feature_space_with_positions(alphabet, n, Y):
# """Create the feature space by considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# list of letters
# n : int
# n-gram length
# Y : array, [n_samples, ]
# The training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
# Sparse matrix representation of the n-grams in each string of Y, where n_samples is the number of training
# samples and max_n_gram_count is the number of n-gram in the highest length string of Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_pointers_indexes_data_with_positions(index_pointers, indexes, data, n, n_gram_to_index, Y)
# n_columns = __get_n_columns(n, len(n_gram_to_index), Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, n_columns)
# return feature_space
#
# Path: preimage/exceptions/n_gram.py
# class InvalidNGramError(ValueError):
# def __init__(self, n, n_gram):
# self.n = n
# self.n_gram = n_gram
#
# def __str__(self):
# error_message = "{} is not a possible {:d}_gram for this alphabet".format(self.n_gram, self.n)
# return error_message
. Output only the next line. | with self.assertRaises(InvalidNGramError): |
Based on the snippet: <|code_start|> Training dataset.
"""
return __load_peptide_dataset('camps.pickle')
def load_bpps_dataset():
"""Load the BPPs dataset consisting of 31 bradykinin-potentiating pentapeptides.
Returns
-------
train_dataset: StandardDataset
Training dataset.
"""
return __load_peptide_dataset('bpps.pickle')
def __load_peptide_dataset(file_name):
data = __load_pickle_file(file_name)
X = numpy.array(data['X'], dtype=numpy.str)
train_dataset = StandardDataset(X, data['y'])
return train_dataset
def __load_pickle_file(file_name):
module_path = dirname(__file__)
data_file = open(join(module_path, file_name), 'rb')
data = pickle.load(data_file)
return data
<|code_end|>
, predict the immediate next line with the help of imports:
import pickle
import gzip
import numpy
from os.path import dirname, join
from preimage.datasets.amino_acid_file import AminoAcidFile
and context (classes, functions, sometimes code) from other files:
# Path: preimage/datasets/amino_acid_file.py
# class AminoAcidFile:
# """Contain file name of amino acid property matrix from the literature.
#
# For more amino acid files see: http://graal.ift.ulaval.ca/bioinformatics/gs-kernel/.
#
# Attributes
# ----------
# blosum62_natural : string
# file name of an amino acid matrix.
# """
# blosum62_natural = 'AA.blosum62.natural.dat'
. Output only the next line. | def load_amino_acids_and_descriptors(file_name=AminoAcidFile.blosum62_natural): |
Here is a snippet: <|code_start|> Creates a sparse matrix representation of the n-grams in each training string. The representation takes in account
the positions of the n-grams in the strings, This is used to compute the weights of the graph during the inference
phase.
Attributes
----------
n : int
N-gram length.
max_n_gram_count : int
Number of n-grams in the training string of highest length.
feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
Sparse matrix representation of the n-grams in each training string, where n_samples is the number of training
samples.
"""
def __init__(self, alphabet, n, Y, is_normalized):
"""Create the output feature space for the Weighted Degree kernel
Parameters
----------
alphabet : list
list of letters
n : int
n-gram length
Y : array, [n_samples, ]
The training strings.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
"""
self.n = int(n)
self._alphabet_n_gram_count = len(alphabet) ** n
<|code_end|>
. Write the next line using the current file imports:
import numpy
from preimage.features.string_feature_space import build_feature_space_with_positions
and context from other files:
# Path: preimage/features/string_feature_space.py
# def build_feature_space_with_positions(alphabet, n, Y):
# """Create the feature space by considering the position of the n-gram in the strings
#
# Parameters
# ----------
# alphabet : list
# list of letters
# n : int
# n-gram length
# Y : array, [n_samples, ]
# The training strings.
#
# Returns
# -------
# feature_space : sparse matrix, shape = [n_samples, max_n_gram_count * len(alphabet)**n]
# Sparse matrix representation of the n-grams in each string of Y, where n_samples is the number of training
# samples and max_n_gram_count is the number of n-gram in the highest length string of Y.
# """
# n = int(n)
# n_examples = numpy.array(Y).shape[0]
# n_gram_to_index = get_n_gram_to_index(alphabet, n)
# index_pointers, indexes, data = __initialize_pointers_indexes_data(n_examples)
# __build_pointers_indexes_data_with_positions(index_pointers, indexes, data, n, n_gram_to_index, Y)
# n_columns = __get_n_columns(n, len(n_gram_to_index), Y)
# feature_space = __build_csr_matrix(index_pointers, indexes, data, n_examples, n_columns)
# return feature_space
, which may include functions, classes, or code. Output only the next line. | self.feature_space = build_feature_space_with_positions(alphabet, self.n, Y) |
Next line prediction: <|code_start|> max_length : int
Maximum position.
Returns
-------
position_matrix : array, shape = [max_length, max_length]
Similarity of each position with all the other positions.
"""
position_matrix = compute_position_weights_matrix(max_length, self.sigma_position)
return position_matrix
def get_alphabet_similarity_matrix(self):
"""Compute the alphabet similarity weights
Returns
-------
similarity_matrix : array, shape = [len(alphabet), len(alphabet)]
Similarity of each amino acid (letter) with all the other amino acids.
"""
distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))
numpy.fill_diagonal(distance_matrix, 0)
for index_one, descriptor_one in enumerate(self.descriptors):
for index_two, descriptor_two in enumerate(self.descriptors):
distance = descriptor_one - descriptor_two
squared_distance = numpy.dot(distance, distance)
distance_matrix[index_one, index_two] = squared_distance
distance_matrix /= 2. * (self.sigma_amino_acid ** 2)
return numpy.exp(-distance_matrix)
def _load_amino_acids_and_normalized_descriptors(self):
<|code_end|>
. Use current file imports:
(import numpy
from preimage.datasets.loader import load_amino_acids_and_descriptors
from preimage.kernels._generic_string import element_wise_generic_string_kernel, generic_string_kernel_with_sigma_c
from preimage.kernels._generic_string import element_wise_generic_string_kernel_with_sigma_c
from preimage.datasets.amino_acid_file import AminoAcidFile
from preimage.utils.position import compute_position_weights_matrix
from preimage.utils.alphabet import transform_strings_to_integer_lists)
and context including class names, function names, or small code snippets from other files:
# Path: preimage/datasets/loader.py
# def load_amino_acids_and_descriptors(file_name=AminoAcidFile.blosum62_natural):
# """Load amino acids and descriptors
#
# Parameters
# ----------
# file_name : string
# file name of the amino acid matrix.
#
# Returns
# -------
# amino_acids: list
# A list of amino acids (letters).
# descriptors: array, shape = [n_amino_acids, n_amino_acids]
# Substitution cost of each amino acid with all the other amino acids, where n_amino_acids is the number of
# amino acids.
# """
# path_to_file = join(dirname(__file__), 'amino_acid_matrix', file_name)
# with open(path_to_file, 'r') as data_file:
# lines = data_file.readlines()
# splitted_lines = numpy.array([line.split() for line in lines])
# amino_acids = [str(letter) for letter in splitted_lines[:, 0]]
# descriptors = numpy.array(splitted_lines[:, 1:], dtype=numpy.float)
# return amino_acids, descriptors
#
# Path: preimage/datasets/amino_acid_file.py
# class AminoAcidFile:
# """Contain file name of amino acid property matrix from the literature.
#
# For more amino acid files see: http://graal.ift.ulaval.ca/bioinformatics/gs-kernel/.
#
# Attributes
# ----------
# blosum62_natural : string
# file name of an amino acid matrix.
# """
# blosum62_natural = 'AA.blosum62.natural.dat'
#
# Path: preimage/utils/position.py
# def compute_position_weights_matrix(max_position, sigma_position):
# position_penalties = numpy.array([(i - j) for i in range(max_position) for j in range(max_position)],
# dtype=numpy.float)
# position_penalties = position_penalties.reshape(max_position, max_position)
# position_penalties = numpy.square(position_penalties) / (-2 * (sigma_position ** 2))
# return numpy.exp(position_penalties)
#
# Path: preimage/utils/alphabet.py
# def transform_strings_to_integer_lists(Y, alphabet):
# letter_to_int = get_n_gram_to_index(alphabet, 1)
# n_examples = numpy.array(Y).shape[0]
# max_length = numpy.max([len(y) for y in Y])
# Y_int = numpy.zeros((n_examples, max_length), dtype=numpy.int8) - 1
# for y_index, y in enumerate(Y):
# for letter_index, letter in enumerate(y):
# Y_int[y_index, letter_index] = letter_to_int[letter]
# return Y_int
. Output only the next line. | amino_acids, descriptors = load_amino_acids_and_descriptors(self.amino_acid_file_name) |
Predict the next line for this snippet: <|code_start|> Computes the similarity between two strings by comparing each of their l-gram of length 1 to n. Each l-gram
comparison yields a score that depends on the similarity of their respective amino acids (letters) and a shifting
contribution term that decays exponentially rapidly with the distance between the starting positions of the two
substrings. The sigma_position parameter controls the shifting contribution term. The sigma_amino_acid parameter
controls the amount of penalty incurred when the encoding vectors differ as measured by the squared Euclidean
distance between these two vectors. The GS kernel outputs the sum of all the l-gram-comparison scores.
Attributes
----------
amino_acid_file_name : string
Name of the file containing the amino acid substitution matrix.
sigma_position : float
Controls the penalty incurred when two n-grams are not sharing the same position.
sigma_amino_acid : float
Controls the penalty incurred when the encoding vectors of two amino acids differ.
n : int
N-gram length.
is_normalized : bool
True if the kernel should be normalized, False otherwise.
Notes
-----
See http://graal.ift.ulaval.ca/bioinformatics/gs-kernel/ for the original code developed by Sebastien Giguere [1]_.
References
----------
.. [1] Sebastien Giguere, Mario Marchand, Francois Laviolette, Alexandre Drouin, and Jacques Corbeil. "Learning a
peptide-protein binding affinity predictor with kernel ridge regression." BMC bioinformatics 14, no. 1 (2013):
82.
"""
<|code_end|>
with the help of current file imports:
import numpy
from preimage.datasets.loader import load_amino_acids_and_descriptors
from preimage.kernels._generic_string import element_wise_generic_string_kernel, generic_string_kernel_with_sigma_c
from preimage.kernels._generic_string import element_wise_generic_string_kernel_with_sigma_c
from preimage.datasets.amino_acid_file import AminoAcidFile
from preimage.utils.position import compute_position_weights_matrix
from preimage.utils.alphabet import transform_strings_to_integer_lists
and context from other files:
# Path: preimage/datasets/loader.py
# def load_amino_acids_and_descriptors(file_name=AminoAcidFile.blosum62_natural):
# """Load amino acids and descriptors
#
# Parameters
# ----------
# file_name : string
# file name of the amino acid matrix.
#
# Returns
# -------
# amino_acids: list
# A list of amino acids (letters).
# descriptors: array, shape = [n_amino_acids, n_amino_acids]
# Substitution cost of each amino acid with all the other amino acids, where n_amino_acids is the number of
# amino acids.
# """
# path_to_file = join(dirname(__file__), 'amino_acid_matrix', file_name)
# with open(path_to_file, 'r') as data_file:
# lines = data_file.readlines()
# splitted_lines = numpy.array([line.split() for line in lines])
# amino_acids = [str(letter) for letter in splitted_lines[:, 0]]
# descriptors = numpy.array(splitted_lines[:, 1:], dtype=numpy.float)
# return amino_acids, descriptors
#
# Path: preimage/datasets/amino_acid_file.py
# class AminoAcidFile:
# """Contain file name of amino acid property matrix from the literature.
#
# For more amino acid files see: http://graal.ift.ulaval.ca/bioinformatics/gs-kernel/.
#
# Attributes
# ----------
# blosum62_natural : string
# file name of an amino acid matrix.
# """
# blosum62_natural = 'AA.blosum62.natural.dat'
#
# Path: preimage/utils/position.py
# def compute_position_weights_matrix(max_position, sigma_position):
# position_penalties = numpy.array([(i - j) for i in range(max_position) for j in range(max_position)],
# dtype=numpy.float)
# position_penalties = position_penalties.reshape(max_position, max_position)
# position_penalties = numpy.square(position_penalties) / (-2 * (sigma_position ** 2))
# return numpy.exp(position_penalties)
#
# Path: preimage/utils/alphabet.py
# def transform_strings_to_integer_lists(Y, alphabet):
# letter_to_int = get_n_gram_to_index(alphabet, 1)
# n_examples = numpy.array(Y).shape[0]
# max_length = numpy.max([len(y) for y in Y])
# Y_int = numpy.zeros((n_examples, max_length), dtype=numpy.int8) - 1
# for y_index, y in enumerate(Y):
# for letter_index, letter in enumerate(y):
# Y_int[y_index, letter_index] = letter_to_int[letter]
# return Y_int
, which may contain function names, class names, or code. Output only the next line. | def __init__(self, amino_acid_file_name=AminoAcidFile.blosum62_natural, sigma_position=1.0, sigma_amino_acid=1.0, |
Given snippet: <|code_start|>__author__ = 'amelie'
def element_wise_kernel(X, sigma_position, n, alphabet):
"""Compute the similarity of each string in X with itself in the Generic String kernel.
Takes only in account the position penalties and the n-gram of length n. No n-gram penalties (no sigma_c).
Parameters
----------
X : array, shape = [n_samples]
Strings, where n_samples is the number of examples in X.
sigma_position : float
Controls the penalty incurred when two n-grams are not sharing the same position.
n : int
N-gram length.
alphabet : list
List of letters.
Returns
-------
kernel : array, shape = [n_samples]
Similarity of each string with itself in the GS kernel, where n_samples is the number of examples in X.
"""
X = numpy.array(X)
x_lengths = numpy.array([len(x) for x in X], dtype=numpy.int64)
max_length = numpy.max(x_lengths) - n + 1
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy
from preimage.datasets.loader import load_amino_acids_and_descriptors
from preimage.kernels._generic_string import element_wise_generic_string_kernel, generic_string_kernel_with_sigma_c
from preimage.kernels._generic_string import element_wise_generic_string_kernel_with_sigma_c
from preimage.datasets.amino_acid_file import AminoAcidFile
from preimage.utils.position import compute_position_weights_matrix
from preimage.utils.alphabet import transform_strings_to_integer_lists
and context:
# Path: preimage/datasets/loader.py
# def load_amino_acids_and_descriptors(file_name=AminoAcidFile.blosum62_natural):
# """Load amino acids and descriptors
#
# Parameters
# ----------
# file_name : string
# file name of the amino acid matrix.
#
# Returns
# -------
# amino_acids: list
# A list of amino acids (letters).
# descriptors: array, shape = [n_amino_acids, n_amino_acids]
# Substitution cost of each amino acid with all the other amino acids, where n_amino_acids is the number of
# amino acids.
# """
# path_to_file = join(dirname(__file__), 'amino_acid_matrix', file_name)
# with open(path_to_file, 'r') as data_file:
# lines = data_file.readlines()
# splitted_lines = numpy.array([line.split() for line in lines])
# amino_acids = [str(letter) for letter in splitted_lines[:, 0]]
# descriptors = numpy.array(splitted_lines[:, 1:], dtype=numpy.float)
# return amino_acids, descriptors
#
# Path: preimage/datasets/amino_acid_file.py
# class AminoAcidFile:
# """Contain file name of amino acid property matrix from the literature.
#
# For more amino acid files see: http://graal.ift.ulaval.ca/bioinformatics/gs-kernel/.
#
# Attributes
# ----------
# blosum62_natural : string
# file name of an amino acid matrix.
# """
# blosum62_natural = 'AA.blosum62.natural.dat'
#
# Path: preimage/utils/position.py
# def compute_position_weights_matrix(max_position, sigma_position):
# position_penalties = numpy.array([(i - j) for i in range(max_position) for j in range(max_position)],
# dtype=numpy.float)
# position_penalties = position_penalties.reshape(max_position, max_position)
# position_penalties = numpy.square(position_penalties) / (-2 * (sigma_position ** 2))
# return numpy.exp(position_penalties)
#
# Path: preimage/utils/alphabet.py
# def transform_strings_to_integer_lists(Y, alphabet):
# letter_to_int = get_n_gram_to_index(alphabet, 1)
# n_examples = numpy.array(Y).shape[0]
# max_length = numpy.max([len(y) for y in Y])
# Y_int = numpy.zeros((n_examples, max_length), dtype=numpy.int8) - 1
# for y_index, y in enumerate(Y):
# for letter_index, letter in enumerate(y):
# Y_int[y_index, letter_index] = letter_to_int[letter]
# return Y_int
which might include code, classes, or functions. Output only the next line. | position_matrix = compute_position_weights_matrix(max_length, sigma_position) |
Here is a snippet: <|code_start|>__author__ = 'amelie'
def element_wise_kernel(X, sigma_position, n, alphabet):
"""Compute the similarity of each string in X with itself in the Generic String kernel.
Takes only in account the position penalties and the n-gram of length n. No n-gram penalties (no sigma_c).
Parameters
----------
X : array, shape = [n_samples]
Strings, where n_samples is the number of examples in X.
sigma_position : float
Controls the penalty incurred when two n-grams are not sharing the same position.
n : int
N-gram length.
alphabet : list
List of letters.
Returns
-------
kernel : array, shape = [n_samples]
Similarity of each string with itself in the GS kernel, where n_samples is the number of examples in X.
"""
X = numpy.array(X)
x_lengths = numpy.array([len(x) for x in X], dtype=numpy.int64)
max_length = numpy.max(x_lengths) - n + 1
position_matrix = compute_position_weights_matrix(max_length, sigma_position)
<|code_end|>
. Write the next line using the current file imports:
import numpy
from preimage.datasets.loader import load_amino_acids_and_descriptors
from preimage.kernels._generic_string import element_wise_generic_string_kernel, generic_string_kernel_with_sigma_c
from preimage.kernels._generic_string import element_wise_generic_string_kernel_with_sigma_c
from preimage.datasets.amino_acid_file import AminoAcidFile
from preimage.utils.position import compute_position_weights_matrix
from preimage.utils.alphabet import transform_strings_to_integer_lists
and context from other files:
# Path: preimage/datasets/loader.py
# def load_amino_acids_and_descriptors(file_name=AminoAcidFile.blosum62_natural):
# """Load amino acids and descriptors
#
# Parameters
# ----------
# file_name : string
# file name of the amino acid matrix.
#
# Returns
# -------
# amino_acids: list
# A list of amino acids (letters).
# descriptors: array, shape = [n_amino_acids, n_amino_acids]
# Substitution cost of each amino acid with all the other amino acids, where n_amino_acids is the number of
# amino acids.
# """
# path_to_file = join(dirname(__file__), 'amino_acid_matrix', file_name)
# with open(path_to_file, 'r') as data_file:
# lines = data_file.readlines()
# splitted_lines = numpy.array([line.split() for line in lines])
# amino_acids = [str(letter) for letter in splitted_lines[:, 0]]
# descriptors = numpy.array(splitted_lines[:, 1:], dtype=numpy.float)
# return amino_acids, descriptors
#
# Path: preimage/datasets/amino_acid_file.py
# class AminoAcidFile:
# """Contain file name of amino acid property matrix from the literature.
#
# For more amino acid files see: http://graal.ift.ulaval.ca/bioinformatics/gs-kernel/.
#
# Attributes
# ----------
# blosum62_natural : string
# file name of an amino acid matrix.
# """
# blosum62_natural = 'AA.blosum62.natural.dat'
#
# Path: preimage/utils/position.py
# def compute_position_weights_matrix(max_position, sigma_position):
# position_penalties = numpy.array([(i - j) for i in range(max_position) for j in range(max_position)],
# dtype=numpy.float)
# position_penalties = position_penalties.reshape(max_position, max_position)
# position_penalties = numpy.square(position_penalties) / (-2 * (sigma_position ** 2))
# return numpy.exp(position_penalties)
#
# Path: preimage/utils/alphabet.py
# def transform_strings_to_integer_lists(Y, alphabet):
# letter_to_int = get_n_gram_to_index(alphabet, 1)
# n_examples = numpy.array(Y).shape[0]
# max_length = numpy.max([len(y) for y in Y])
# Y_int = numpy.zeros((n_examples, max_length), dtype=numpy.int8) - 1
# for y_index, y in enumerate(Y):
# for letter_index, letter in enumerate(y):
# Y_int[y_index, letter_index] = letter_to_int[letter]
# return Y_int
, which may include functions, classes, or code. Output only the next line. | X_int = transform_strings_to_integer_lists(X, alphabet) |
Given the following code snippet before the placeholder: <|code_start|>__author__ = 'amelie'
class GraphBuilder:
"""Graph builder for the pre-image of multiple string kernels.
Solves the pre-image problem of string kernels with constant norms (Hamming, Weighted Degree). For string kernel
where the norm is not constant, it builds a graph that can be used to compute the bounds of partial solutions in
a branch and bound search. The graph is constructed by dynamic programming.
Attributes
----------
alphabet : list
List of letters.
n : int
N-gram length.
"""
def __init__(self, alphabet, n):
self.alphabet = alphabet
self.n = int(n)
self._n_gram_count = len(self.alphabet) ** self.n
self._entering_edges = self._get_entering_edge_indexes(self._n_gram_count, alphabet, n)
<|code_end|>
, predict the next line using imports from the current file:
import numpy
from preimage.utils.alphabet import get_index_to_n_gram
from preimage.exceptions.shape import InvalidShapeError
from preimage.exceptions.n_gram import InvalidYLengthError, InvalidMinLengthError
and context including class names, function names, and sometimes code from other files:
# Path: preimage/utils/alphabet.py
# def get_index_to_n_gram(alphabet, n):
# n_grams = get_n_grams(alphabet, n)
# indexes = numpy.arange(len(n_grams))
# index_to_n_gram = dict(zip(indexes, n_grams))
# return index_to_n_gram
#
# Path: preimage/exceptions/shape.py
# class InvalidShapeError(ValueError):
# def __init__(self, parameter_name, parameter_shape, valid_shapes):
# self.parameter_name = parameter_name
# self.parameter_shape = parameter_shape
# self.valid_shapes = [str(valid_shape) for valid_shape in valid_shapes]
#
# def __str__(self):
# valid_shapes_string = ' or '.join(self.valid_shapes)
# error_message = "{} wrong shape: Expected: {} Got: {}".format(self.parameter_name, valid_shapes_string,
# str(self.parameter_shape))
# return error_message
#
# Path: preimage/exceptions/n_gram.py
# class InvalidYLengthError(ValueError):
# def __init__(self, n, y_length):
# self.n = n
# self.y_length = y_length
#
# def __str__(self):
# error_message = 'y_length must be >= n. Got: y_length={:d}, n={:d}'.format(self.y_length, self.n)
# return error_message
#
# class InvalidMinLengthError(ValueError):
# def __init__(self, min_length, max_length):
# self.min_length = min_length
# self.max_length = max_length
#
# def __str__(self):
# error_message = 'min_length must be <= max_length. ' \
# 'Got: min_length={:d}, max_length={:d}'.format(self.min_length, self.max_length)
# return error_message
. Output only the next line. | self._index_to_n_gram = get_index_to_n_gram(alphabet, self.n) |
Given the code snippet: <|code_start|> partition_weights = graph_weights
else:
partition_weights = graph_weights[i, :]
return partition_weights
def _get_max_string_end_indexes_in_range(self, graph, min_partition, n_partitions, is_normalized):
norm = [n_gram_count for n_gram_count in range(min_partition + 1, n_partitions + 1)]
norm = numpy.array(norm).reshape(-1, 1)
if is_normalized:
graph[min_partition:, :] *= 1. / numpy.sqrt(norm)
end_indexes = numpy.unravel_index(numpy.argmax(graph[min_partition:, :]), graph[min_partition:, :].shape)
else:
graph[min_partition:, :] = norm - 2 * graph[min_partition:, :]
end_indexes = numpy.unravel_index(numpy.argmin(graph[min_partition:, :]), graph[min_partition:, :].shape)
partition_index = end_indexes[0] + min_partition - 1
return partition_index, end_indexes[1]
def _build_max_string(self, partition_index, n_gram_index, predecessors):
max_string = self._index_to_n_gram[n_gram_index]
best_index = n_gram_index
for i in range(partition_index, -1, -1):
best_index = predecessors[i, best_index]
max_string = self._index_to_n_gram[best_index][0] + max_string
return max_string
def _verify_graph_weights_and_y_length(self, graph_weights, n_partitions):
if n_partitions <= 0:
raise InvalidYLengthError(self.n, n_partitions + self.n - 1)
valid_shapes = [(self._n_gram_count,), (n_partitions, self._n_gram_count)]
if graph_weights.shape not in valid_shapes:
<|code_end|>
, generate the next line using the imports in this file:
import numpy
from preimage.utils.alphabet import get_index_to_n_gram
from preimage.exceptions.shape import InvalidShapeError
from preimage.exceptions.n_gram import InvalidYLengthError, InvalidMinLengthError
and context (functions, classes, or occasionally code) from other files:
# Path: preimage/utils/alphabet.py
# def get_index_to_n_gram(alphabet, n):
# n_grams = get_n_grams(alphabet, n)
# indexes = numpy.arange(len(n_grams))
# index_to_n_gram = dict(zip(indexes, n_grams))
# return index_to_n_gram
#
# Path: preimage/exceptions/shape.py
# class InvalidShapeError(ValueError):
# def __init__(self, parameter_name, parameter_shape, valid_shapes):
# self.parameter_name = parameter_name
# self.parameter_shape = parameter_shape
# self.valid_shapes = [str(valid_shape) for valid_shape in valid_shapes]
#
# def __str__(self):
# valid_shapes_string = ' or '.join(self.valid_shapes)
# error_message = "{} wrong shape: Expected: {} Got: {}".format(self.parameter_name, valid_shapes_string,
# str(self.parameter_shape))
# return error_message
#
# Path: preimage/exceptions/n_gram.py
# class InvalidYLengthError(ValueError):
# def __init__(self, n, y_length):
# self.n = n
# self.y_length = y_length
#
# def __str__(self):
# error_message = 'y_length must be >= n. Got: y_length={:d}, n={:d}'.format(self.y_length, self.n)
# return error_message
#
# class InvalidMinLengthError(ValueError):
# def __init__(self, min_length, max_length):
# self.min_length = min_length
# self.max_length = max_length
#
# def __str__(self):
# error_message = 'min_length must be <= max_length. ' \
# 'Got: min_length={:d}, max_length={:d}'.format(self.min_length, self.max_length)
# return error_message
. Output only the next line. | raise InvalidShapeError('graph_weights', graph_weights.shape, valid_shapes) |
Next line prediction: <|code_start|>
def _get_weights(self, i, graph_weights):
if graph_weights.ndim == 1:
partition_weights = graph_weights
else:
partition_weights = graph_weights[i, :]
return partition_weights
def _get_max_string_end_indexes_in_range(self, graph, min_partition, n_partitions, is_normalized):
norm = [n_gram_count for n_gram_count in range(min_partition + 1, n_partitions + 1)]
norm = numpy.array(norm).reshape(-1, 1)
if is_normalized:
graph[min_partition:, :] *= 1. / numpy.sqrt(norm)
end_indexes = numpy.unravel_index(numpy.argmax(graph[min_partition:, :]), graph[min_partition:, :].shape)
else:
graph[min_partition:, :] = norm - 2 * graph[min_partition:, :]
end_indexes = numpy.unravel_index(numpy.argmin(graph[min_partition:, :]), graph[min_partition:, :].shape)
partition_index = end_indexes[0] + min_partition - 1
return partition_index, end_indexes[1]
def _build_max_string(self, partition_index, n_gram_index, predecessors):
max_string = self._index_to_n_gram[n_gram_index]
best_index = n_gram_index
for i in range(partition_index, -1, -1):
best_index = predecessors[i, best_index]
max_string = self._index_to_n_gram[best_index][0] + max_string
return max_string
def _verify_graph_weights_and_y_length(self, graph_weights, n_partitions):
if n_partitions <= 0:
<|code_end|>
. Use current file imports:
(import numpy
from preimage.utils.alphabet import get_index_to_n_gram
from preimage.exceptions.shape import InvalidShapeError
from preimage.exceptions.n_gram import InvalidYLengthError, InvalidMinLengthError)
and context including class names, function names, or small code snippets from other files:
# Path: preimage/utils/alphabet.py
# def get_index_to_n_gram(alphabet, n):
# n_grams = get_n_grams(alphabet, n)
# indexes = numpy.arange(len(n_grams))
# index_to_n_gram = dict(zip(indexes, n_grams))
# return index_to_n_gram
#
# Path: preimage/exceptions/shape.py
# class InvalidShapeError(ValueError):
# def __init__(self, parameter_name, parameter_shape, valid_shapes):
# self.parameter_name = parameter_name
# self.parameter_shape = parameter_shape
# self.valid_shapes = [str(valid_shape) for valid_shape in valid_shapes]
#
# def __str__(self):
# valid_shapes_string = ' or '.join(self.valid_shapes)
# error_message = "{} wrong shape: Expected: {} Got: {}".format(self.parameter_name, valid_shapes_string,
# str(self.parameter_shape))
# return error_message
#
# Path: preimage/exceptions/n_gram.py
# class InvalidYLengthError(ValueError):
# def __init__(self, n, y_length):
# self.n = n
# self.y_length = y_length
#
# def __str__(self):
# error_message = 'y_length must be >= n. Got: y_length={:d}, n={:d}'.format(self.y_length, self.n)
# return error_message
#
# class InvalidMinLengthError(ValueError):
# def __init__(self, min_length, max_length):
# self.min_length = min_length
# self.max_length = max_length
#
# def __str__(self):
# error_message = 'min_length must be <= max_length. ' \
# 'Got: min_length={:d}, max_length={:d}'.format(self.min_length, self.max_length)
# return error_message
. Output only the next line. | raise InvalidYLengthError(self.n, n_partitions + self.n - 1) |
Given the code snippet: <|code_start|>
def _get_max_string_end_indexes_in_range(self, graph, min_partition, n_partitions, is_normalized):
norm = [n_gram_count for n_gram_count in range(min_partition + 1, n_partitions + 1)]
norm = numpy.array(norm).reshape(-1, 1)
if is_normalized:
graph[min_partition:, :] *= 1. / numpy.sqrt(norm)
end_indexes = numpy.unravel_index(numpy.argmax(graph[min_partition:, :]), graph[min_partition:, :].shape)
else:
graph[min_partition:, :] = norm - 2 * graph[min_partition:, :]
end_indexes = numpy.unravel_index(numpy.argmin(graph[min_partition:, :]), graph[min_partition:, :].shape)
partition_index = end_indexes[0] + min_partition - 1
return partition_index, end_indexes[1]
def _build_max_string(self, partition_index, n_gram_index, predecessors):
max_string = self._index_to_n_gram[n_gram_index]
best_index = n_gram_index
for i in range(partition_index, -1, -1):
best_index = predecessors[i, best_index]
max_string = self._index_to_n_gram[best_index][0] + max_string
return max_string
def _verify_graph_weights_and_y_length(self, graph_weights, n_partitions):
if n_partitions <= 0:
raise InvalidYLengthError(self.n, n_partitions + self.n - 1)
valid_shapes = [(self._n_gram_count,), (n_partitions, self._n_gram_count)]
if graph_weights.shape not in valid_shapes:
raise InvalidShapeError('graph_weights', graph_weights.shape, valid_shapes)
def _verify_min_max_length(self, min_length, max_length):
if min_length > max_length:
<|code_end|>
, generate the next line using the imports in this file:
import numpy
from preimage.utils.alphabet import get_index_to_n_gram
from preimage.exceptions.shape import InvalidShapeError
from preimage.exceptions.n_gram import InvalidYLengthError, InvalidMinLengthError
and context (functions, classes, or occasionally code) from other files:
# Path: preimage/utils/alphabet.py
# def get_index_to_n_gram(alphabet, n):
# n_grams = get_n_grams(alphabet, n)
# indexes = numpy.arange(len(n_grams))
# index_to_n_gram = dict(zip(indexes, n_grams))
# return index_to_n_gram
#
# Path: preimage/exceptions/shape.py
# class InvalidShapeError(ValueError):
# def __init__(self, parameter_name, parameter_shape, valid_shapes):
# self.parameter_name = parameter_name
# self.parameter_shape = parameter_shape
# self.valid_shapes = [str(valid_shape) for valid_shape in valid_shapes]
#
# def __str__(self):
# valid_shapes_string = ' or '.join(self.valid_shapes)
# error_message = "{} wrong shape: Expected: {} Got: {}".format(self.parameter_name, valid_shapes_string,
# str(self.parameter_shape))
# return error_message
#
# Path: preimage/exceptions/n_gram.py
# class InvalidYLengthError(ValueError):
# def __init__(self, n, y_length):
# self.n = n
# self.y_length = y_length
#
# def __str__(self):
# error_message = 'y_length must be >= n. Got: y_length={:d}, n={:d}'.format(self.y_length, self.n)
# return error_message
#
# class InvalidMinLengthError(ValueError):
# def __init__(self, min_length, max_length):
# self.min_length = min_length
# self.max_length = max_length
#
# def __str__(self):
# error_message = 'min_length must be <= max_length. ' \
# 'Got: min_length={:d}, max_length={:d}'.format(self.min_length, self.max_length)
# return error_message
. Output only the next line. | raise InvalidMinLengthError(min_length, max_length) |
Next line prediction: <|code_start|>__author__ = 'amelie'
class TestWeightedDegreeModel(unittest2.TestCase):
def setUp(self):
self.setup_feature_space()
self.setup_fit_parameters()
self.setup_graph_builder()
self.alphabet = ['a', 'b', 'c']
<|code_end|>
. Use current file imports:
(import unittest2
import numpy
import numpy.testing
from mock import patch, Mock
from preimage.models.weighted_degree_model import WeightedDegreeModel
from preimage.learners.structured_krr import InferenceFitParameters
from preimage.exceptions.n_gram import NoYLengthsError)
and context including class names, function names, or small code snippets from other files:
# Path: preimage/models/weighted_degree_model.py
# class WeightedDegreeModel(Model):
# def __init__(self, alphabet, n, is_using_length=True):
# self._graph_builder = GraphBuilder(alphabet, n)
# self._is_normalized = True
# Model.__init__(self, alphabet, n, is_using_length)
#
# def fit(self, inference_parameters):
# Model.fit(self, inference_parameters)
# self._feature_space_ = WeightedDegreeFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
# self._is_normalized)
#
# def predict(self, Y_weights, y_lengths):
# if self._is_using_length:
# self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
# Y_predictions = self._predict_with_length(Y_weights, y_lengths)
# else:
# Y_predictions = self._predict_without_length(Y_weights)
# return Y_predictions
#
# def _predict_with_length(self, Y_weights, y_lengths):
# Y_predictions = []
# for y_weights, y_length in zip(Y_weights, y_lengths):
# n_gram_weights = self._feature_space_.compute_weights(y_weights, y_length)
# y_predicted = self._graph_builder.find_max_string(n_gram_weights, y_length)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# def _predict_without_length(self, Y_weights):
# Y_predictions = []
# for y_weights in Y_weights:
# n_gram_weights = self._feature_space_.compute_weights(y_weights, self._max_length_)
# y_predicted = self._graph_builder.find_max_string_in_length_range(n_gram_weights, self._min_length_,
# self._max_length_, self._is_normalized)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# Path: preimage/learners/structured_krr.py
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
#
# Path: preimage/exceptions/n_gram.py
# class NoYLengthsError(ValueError):
# def __str__(self):
# error_message = "y_lengths can't be None if is_using_length = True"
# return error_message
. Output only the next line. | self.model_with_length = WeightedDegreeModel(self.alphabet, n=2, is_using_length=True) |
Given the following code snippet before the placeholder: <|code_start|>__author__ = 'amelie'
class TestWeightedDegreeModel(unittest2.TestCase):
def setUp(self):
self.setup_feature_space()
self.setup_fit_parameters()
self.setup_graph_builder()
self.alphabet = ['a', 'b', 'c']
self.model_with_length = WeightedDegreeModel(self.alphabet, n=2, is_using_length=True)
self.model_no_length = WeightedDegreeModel(self.alphabet, n=2, is_using_length=False)
self.Y_weights = [[1, 2, 3], [4, 5, 6]]
self.y_lengths = [3, 4]
def setup_fit_parameters(self):
self.max_train_length = 2
self.min_train_length = 1
self.weights = numpy.array([[1, 2]])
self.gram_matrix = numpy.array([[1, 0], [0, 1]])
<|code_end|>
, predict the next line using imports from the current file:
import unittest2
import numpy
import numpy.testing
from mock import patch, Mock
from preimage.models.weighted_degree_model import WeightedDegreeModel
from preimage.learners.structured_krr import InferenceFitParameters
from preimage.exceptions.n_gram import NoYLengthsError
and context including class names, function names, and sometimes code from other files:
# Path: preimage/models/weighted_degree_model.py
# class WeightedDegreeModel(Model):
# def __init__(self, alphabet, n, is_using_length=True):
# self._graph_builder = GraphBuilder(alphabet, n)
# self._is_normalized = True
# Model.__init__(self, alphabet, n, is_using_length)
#
# def fit(self, inference_parameters):
# Model.fit(self, inference_parameters)
# self._feature_space_ = WeightedDegreeFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
# self._is_normalized)
#
# def predict(self, Y_weights, y_lengths):
# if self._is_using_length:
# self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
# Y_predictions = self._predict_with_length(Y_weights, y_lengths)
# else:
# Y_predictions = self._predict_without_length(Y_weights)
# return Y_predictions
#
# def _predict_with_length(self, Y_weights, y_lengths):
# Y_predictions = []
# for y_weights, y_length in zip(Y_weights, y_lengths):
# n_gram_weights = self._feature_space_.compute_weights(y_weights, y_length)
# y_predicted = self._graph_builder.find_max_string(n_gram_weights, y_length)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# def _predict_without_length(self, Y_weights):
# Y_predictions = []
# for y_weights in Y_weights:
# n_gram_weights = self._feature_space_.compute_weights(y_weights, self._max_length_)
# y_predicted = self._graph_builder.find_max_string_in_length_range(n_gram_weights, self._min_length_,
# self._max_length_, self._is_normalized)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# Path: preimage/learners/structured_krr.py
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
#
# Path: preimage/exceptions/n_gram.py
# class NoYLengthsError(ValueError):
# def __str__(self):
# error_message = "y_lengths can't be None if is_using_length = True"
# return error_message
. Output only the next line. | self.fit_parameters = InferenceFitParameters(self.weights, self.gram_matrix, Y=['a', 'ab'], |
Given snippet: <|code_start|> self.n_gram_weights = [0, 1, 0]
self.feature_space_mock = Mock()
self.feature_space_mock.compute_weights.return_value = self.n_gram_weights
self.feature_space_patch = patch('preimage.models.weighted_degree_model.WeightedDegreeFeatureSpace')
self.feature_space_patch.start().return_value = self.feature_space_mock
def setup_graph_builder(self):
self.Y_test_with_length = ['aab', 'baba']
self.Y_test_no_length = ['bb', 'aaa']
self.graph_builder_mock = Mock()
self.graph_builder_mock.find_max_string.side_effect = self.Y_test_with_length
self.graph_builder_mock.find_max_string_in_length_range.side_effect = self.Y_test_no_length
self.graph_builder_path = patch('preimage.models.weighted_degree_model.GraphBuilder')
self.graph_builder_path.start().return_value = self.graph_builder_mock
def test_model_with_length_fit_has_correct_min_max_lengths(self):
self.model_with_length.fit(self.fit_parameters)
self.assertEqual(self.model_with_length._min_length_, self.min_train_length)
self.assertEqual(self.model_with_length._max_length_, self.max_train_length)
def test_model_no_y_lengths_fit_has_correct_min_max_lengths(self):
self.model_with_length.fit(self.fit_parameters_no_length)
self.assertEqual(self.model_with_length._min_length_, self.min_train_length)
self.assertEqual(self.model_with_length._max_length_, self.max_train_length)
def test_no_y_lengths_model_with_length_raises_error(self):
self.model_with_length.fit(self.fit_parameters)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest2
import numpy
import numpy.testing
from mock import patch, Mock
from preimage.models.weighted_degree_model import WeightedDegreeModel
from preimage.learners.structured_krr import InferenceFitParameters
from preimage.exceptions.n_gram import NoYLengthsError
and context:
# Path: preimage/models/weighted_degree_model.py
# class WeightedDegreeModel(Model):
# def __init__(self, alphabet, n, is_using_length=True):
# self._graph_builder = GraphBuilder(alphabet, n)
# self._is_normalized = True
# Model.__init__(self, alphabet, n, is_using_length)
#
# def fit(self, inference_parameters):
# Model.fit(self, inference_parameters)
# self._feature_space_ = WeightedDegreeFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
# self._is_normalized)
#
# def predict(self, Y_weights, y_lengths):
# if self._is_using_length:
# self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
# Y_predictions = self._predict_with_length(Y_weights, y_lengths)
# else:
# Y_predictions = self._predict_without_length(Y_weights)
# return Y_predictions
#
# def _predict_with_length(self, Y_weights, y_lengths):
# Y_predictions = []
# for y_weights, y_length in zip(Y_weights, y_lengths):
# n_gram_weights = self._feature_space_.compute_weights(y_weights, y_length)
# y_predicted = self._graph_builder.find_max_string(n_gram_weights, y_length)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# def _predict_without_length(self, Y_weights):
# Y_predictions = []
# for y_weights in Y_weights:
# n_gram_weights = self._feature_space_.compute_weights(y_weights, self._max_length_)
# y_predicted = self._graph_builder.find_max_string_in_length_range(n_gram_weights, self._min_length_,
# self._max_length_, self._is_normalized)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# Path: preimage/learners/structured_krr.py
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
#
# Path: preimage/exceptions/n_gram.py
# class NoYLengthsError(ValueError):
# def __str__(self):
# error_message = "y_lengths can't be None if is_using_length = True"
# return error_message
which might include code, classes, or functions. Output only the next line. | with self.assertRaises(NoYLengthsError): |
Predict the next line for this snippet: <|code_start|>
class Model(BaseEstimator):
__metaclass__ = abc.ABCMeta
def __init__(self, alphabet, n, is_using_length=True):
self._n = n
self._alphabet = alphabet
self._is_using_length = is_using_length
self._feature_space_ = None
self._min_length_ = None
self._max_length_ = None
def fit(self, inference_parameters):
self._find_min_max_length(inference_parameters.y_lengths, inference_parameters.Y_train)
def _find_min_max_length(self, y_lengths, Y):
if y_lengths is None:
y_lengths = numpy.array([len(y) for y in Y])
self._min_length_ = numpy.min(y_lengths)
self._max_length_ = numpy.max(y_lengths)
@abc.abstractmethod
def predict(self, Y_weights, y_lengths):
return
def _verify_y_lengths_is_not_none_when_use_length(self, y_lengths):
if self._is_using_length and y_lengths is None:
<|code_end|>
with the help of current file imports:
import abc
import numpy
from sklearn.base import BaseEstimator
from preimage.exceptions.n_gram import NoYLengthsError
and context from other files:
# Path: preimage/exceptions/n_gram.py
# class NoYLengthsError(ValueError):
# def __str__(self):
# error_message = "y_lengths can't be None if is_using_length = True"
# return error_message
, which may contain function names, class names, or code. Output only the next line. | raise NoYLengthsError() |
Next line prediction: <|code_start|>__author__ = 'amelie'
class TestPosition(unittest2.TestCase):
def setUp(self):
self.position_weights_one_sigma_p_index_zero = [1., 0.6065307, 0.1353353]
self.position_weights_one_sigma_p_index_one = [0.6065307, 1., 0.6065307]
self.position_weights_half_sigma_p_index_zero = [1., 0.1353353, 0.0003355]
self.position_matrix_one_sigma_p_three_positions = [[1., 0.6065307, 0.1353353], [0.6065307, 1., 0.6065307],
[0.1353353, 0.6065307, 1.]]
self.position_weights_half_sigma_p_two_positions = [[1., 0.1353353], [0.1353353, 1.]]
def test_one_sigma_p_index_zero_to_one_compute_position_weights_returns_expected_weights(self):
<|code_end|>
. Use current file imports:
(import unittest2
import numpy.testing
from preimage.utils.position import compute_position_weights, compute_position_weights_matrix)
and context including class names, function names, or small code snippets from other files:
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# def compute_position_weights_matrix(max_position, sigma_position):
# position_penalties = numpy.array([(i - j) for i in range(max_position) for j in range(max_position)],
# dtype=numpy.float)
# position_penalties = position_penalties.reshape(max_position, max_position)
# position_penalties = numpy.square(position_penalties) / (-2 * (sigma_position ** 2))
# return numpy.exp(position_penalties)
. Output only the next line. | position_weights = compute_position_weights(position_index=0, max_position=1, sigma_position=1) |
Here is a snippet: <|code_start|>class TestPosition(unittest2.TestCase):
def setUp(self):
self.position_weights_one_sigma_p_index_zero = [1., 0.6065307, 0.1353353]
self.position_weights_one_sigma_p_index_one = [0.6065307, 1., 0.6065307]
self.position_weights_half_sigma_p_index_zero = [1., 0.1353353, 0.0003355]
self.position_matrix_one_sigma_p_three_positions = [[1., 0.6065307, 0.1353353], [0.6065307, 1., 0.6065307],
[0.1353353, 0.6065307, 1.]]
self.position_weights_half_sigma_p_two_positions = [[1., 0.1353353], [0.1353353, 1.]]
def test_one_sigma_p_index_zero_to_one_compute_position_weights_returns_expected_weights(self):
position_weights = compute_position_weights(position_index=0, max_position=1, sigma_position=1)
numpy.testing.assert_array_almost_equal(position_weights, [1.])
def test_one_sigma_p_index_zero_compute_position_weights_returns_expected_weights(self):
position_weights = compute_position_weights(position_index=0, max_position=3, sigma_position=1)
numpy.testing.assert_array_almost_equal(position_weights, self.position_weights_one_sigma_p_index_zero)
def test_one_sigma_p_index_zero_compute_position_weights_returns_expected_weights(self):
position_weights = compute_position_weights(position_index=1, max_position=3, sigma_position=1)
numpy.testing.assert_array_almost_equal(position_weights, self.position_weights_one_sigma_p_index_one)
def test_half_sigma_p_index_zero_compute_position_weights_returns_expected_weights(self):
position_weights = compute_position_weights(position_index=0, max_position=3, sigma_position=0.5)
numpy.testing.assert_array_almost_equal(position_weights, self.position_weights_half_sigma_p_index_zero)
def test_one_sigma_p_one_position_compute_position_weights_matrix_returns_one_weight(self):
<|code_end|>
. Write the next line using the current file imports:
import unittest2
import numpy.testing
from preimage.utils.position import compute_position_weights, compute_position_weights_matrix
and context from other files:
# Path: preimage/utils/position.py
# def compute_position_weights(position_index, max_position, sigma_position):
# position_penalties = numpy.array([(position_index - j) ** 2 for j in range(max_position)], dtype=numpy.float)
# position_penalties /= -2. * (sigma_position ** 2)
# return numpy.exp(position_penalties)
#
# def compute_position_weights_matrix(max_position, sigma_position):
# position_penalties = numpy.array([(i - j) for i in range(max_position) for j in range(max_position)],
# dtype=numpy.float)
# position_penalties = position_penalties.reshape(max_position, max_position)
# position_penalties = numpy.square(position_penalties) / (-2 * (sigma_position ** 2))
# return numpy.exp(position_penalties)
, which may include functions, classes, or code. Output only the next line. | position_weights = compute_position_weights_matrix(max_position=1, sigma_position=1) |
Predict the next line after this snippet: <|code_start|>__author__ = 'amelie'
class TestEulerianPathModel(unittest2.TestCase):
def setUp(self):
self.setup_fit_parameters()
self.setup_predict_parameters()
self.setup_thresholds()
self.setup_eulerian_path_algorithm()
self.alphabet = ['a', 'b']
<|code_end|>
using the current file's imports:
import unittest2
import numpy
import numpy.testing
from mock import patch, Mock
from preimage.models.eulerian_path_model import EulerianPathModel
from preimage.learners.structured_krr import InferenceFitParameters
and any relevant context from other files:
# Path: preimage/models/eulerian_path_model.py
# class EulerianPathModel(Model):
#
# def __init__(self, alphabet, n, is_using_length=True, seed=42):
# Model.__init__(self, alphabet, n, is_using_length)
# self._graph_builder = GraphBuilder(alphabet, n)
# self._is_normalized = False
# self._is_merging_path = True
# self._seed = seed
# self._n_grams = list(get_n_grams(alphabet, n))
# self._n_gram_to_index = get_n_gram_to_index(alphabet, n)
# self._thresholds_ = None
#
# def fit(self, inference_parameters):
# Model.fit(self, inference_parameters)
# self._feature_space_ = NGramFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
# self._is_normalized)
# if not self._is_using_length:
# Y_weights = numpy.dot(inference_parameters.weights, inference_parameters.gram_matrix).T
# self._find_thresholds(Y_weights)
#
# def _find_thresholds(self, Y_weights):
# n_examples = Y_weights.shape[0]
# Y_n_gram_weights = self._get_n_gram_weights(Y_weights, n_examples)
# n_gram_counts = self._feature_space_.compute_weights(numpy.ones(n_examples))
# n_gram_counts = numpy.array(n_gram_counts, dtype=numpy.int)
# self._thresholds_ = self._find_weights_where_sum_weights_above_is_n_gram_count(n_gram_counts, Y_n_gram_weights)
#
# def _get_n_gram_weights(self, Y_weights, n_training_examples):
# Y_n_gram_weights = numpy.empty((n_training_examples, len(self._alphabet) ** self._n))
# for y_index, y_weight in enumerate(Y_weights):
# Y_n_gram_weights[y_index] = self._feature_space_.compute_weights(y_weight)
# return Y_n_gram_weights
#
# def _find_weights_where_sum_weights_above_is_n_gram_count(self, n_gram_counts, Y_n_gram_weights):
# thresholds = numpy.zeros(len(self._alphabet) ** self._n)
# for n_gram_index, n_gram_count in enumerate(n_gram_counts):
# if n_gram_count > 0:
# n_gram_weights = Y_n_gram_weights[:, n_gram_index]
# threshold_index = numpy.argpartition(-n_gram_weights, n_gram_count)[n_gram_count]
# thresholds[n_gram_index] = n_gram_weights[threshold_index]
# return thresholds
#
# def predict(self, Y_weights, y_lengths):
# if self._is_using_length:
# self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
# Y_predictions = self._predict_with_length(Y_weights, y_lengths)
# else:
# Y_predictions = self._predict_without_length(Y_weights)
# return Y_predictions
#
# def _predict_with_length(self, Y_weights, y_lengths):
# Y_predictions = []
# eulerian_path = EulerianPath(self._alphabet, self._n, self._min_length_, self._is_merging_path)
# for y_weights, y_length in zip(Y_weights, y_lengths):
# n_gram_weights = self._feature_space_.compute_weights(y_weights)
# y_predicted = eulerian_path.find_eulerian_path(n_gram_weights, y_length=y_length)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# def _predict_without_length(self, Y_weights):
# Y_predictions = []
# eulerian_path = EulerianPath(self._alphabet, self._n, self._min_length_, self._is_merging_path)
# for y_weights in Y_weights:
# n_gram_weights = self._feature_space_.compute_weights(y_weights)
# y_predicted = eulerian_path.find_eulerian_path(n_gram_weights, thresholds=self._thresholds_)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# Path: preimage/learners/structured_krr.py
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
. Output only the next line. | self.model_with_length = EulerianPathModel(self.alphabet, n=2, is_using_length=True) |
Based on the snippet: <|code_start|>__author__ = 'amelie'
class TestAlphabet(unittest2.TestCase):
def setUp(self):
self.a_b_alphabet = ['a', 'b']
self.abc_alphabet = ['a', 'b', 'c']
self.two_grams = ['aa', 'ab', 'ba', 'bb']
self.one_gram_to_index = {'a': 0, 'b': 1}
self.two_gram_to_index = {'aa': 0, 'ab': 1, 'ba': 2, 'bb': 3}
self.index_to_one_gram = {0: 'a', 1: 'b'}
self.index_to_two_gram = {0: 'aa', 1: 'ab', 2: 'ba', 3: 'bb'}
self.cab = ['cab']
self.cab_aa = ['cab', 'aa']
self.cab_int = [[2, 0, 1]]
self.cab_aa_int = [[2, 0, 1], [0, 0, -1]]
def test_integer_n_is_zero_get_n_grams_raises_value_error(self):
with self.assertRaises(ValueError):
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest2
import numpy.testing
from preimage.utils import alphabet
from preimage.exceptions.n_gram import InvalidNGramLengthError
and context (classes, functions, sometimes code) from other files:
# Path: preimage/utils/alphabet.py
# class Alphabet:
# def get_n_gram_to_index(alphabet, n):
# def get_index_to_n_gram(alphabet, n):
# def get_n_grams(alphabet, n):
# def transform_strings_to_integer_lists(Y, alphabet):
#
# Path: preimage/exceptions/n_gram.py
# class InvalidNGramLengthError(ValueError):
# def __init__(self, n, min_n=0):
# self.n = n
# self.min_n = min_n
#
# def __str__(self):
# error_message = 'n must be greater than {:d}. Got: n={:d}'.format(self.min_n, self.n)
# return error_message
. Output only the next line. | alphabet.get_n_grams(self.a_b_alphabet, n=0.5) |
Predict the next line for this snippet: <|code_start|> self.cab = ['cab']
self.cab_aa = ['cab', 'aa']
self.cab_int = [[2, 0, 1]]
self.cab_aa_int = [[2, 0, 1], [0, 0, -1]]
def test_integer_n_is_zero_get_n_grams_raises_value_error(self):
with self.assertRaises(ValueError):
alphabet.get_n_grams(self.a_b_alphabet, n=0.5)
def test_get_one_grams_returns_alphabet(self):
n_grams = alphabet.get_n_grams(self.a_b_alphabet, n=1)
numpy.testing.assert_array_equal(n_grams, self.a_b_alphabet)
def test_get_two_grams_returns_expected_two_grams(self):
n_grams = alphabet.get_n_grams(self.a_b_alphabet, n=2)
numpy.testing.assert_array_equal(n_grams, self.two_grams)
def test_get_one_gram_to_index_returns_expected_dict(self):
n_gram_to_index = alphabet.get_n_gram_to_index(self.a_b_alphabet, n=1)
self.assertDictEqual(n_gram_to_index, self.one_gram_to_index)
def test_get_two_gram_to_index_returns_expected_dict(self):
n_gram_to_index = alphabet.get_n_gram_to_index(self.a_b_alphabet, n=2)
self.assertDictEqual(n_gram_to_index, self.two_gram_to_index)
def test_n_zero_get_n_gram_to_index_raises_value_error(self):
<|code_end|>
with the help of current file imports:
import unittest2
import numpy.testing
from preimage.utils import alphabet
from preimage.exceptions.n_gram import InvalidNGramLengthError
and context from other files:
# Path: preimage/utils/alphabet.py
# class Alphabet:
# def get_n_gram_to_index(alphabet, n):
# def get_index_to_n_gram(alphabet, n):
# def get_n_grams(alphabet, n):
# def transform_strings_to_integer_lists(Y, alphabet):
#
# Path: preimage/exceptions/n_gram.py
# class InvalidNGramLengthError(ValueError):
# def __init__(self, n, min_n=0):
# self.n = n
# self.min_n = min_n
#
# def __str__(self):
# error_message = 'n must be greater than {:d}. Got: n={:d}'.format(self.min_n, self.n)
# return error_message
, which may contain function names, class names, or code. Output only the next line. | with self.assertRaises(InvalidNGramLengthError): |
Given the following code snippet before the placeholder: <|code_start|>__author__ = 'amelie'
# Shouldn't label this as "feature-space" since we don't use a sparse matrix representation here.
class GenericStringSimilarityFeatureSpace:
"""Output space for the Generic String kernel with position and n-gram similarity.
Doesn't use a sparse matrix representation because it takes in account the similarity between the n-grams.
This is used to compute the weights of the graph during the inference phase.
Attributes
----------
n : int
N-gram length.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
max_train_length : int
Length of the longest string in the training dataset.
gs_kernel : GenericStringKernel
Generic string kernel.
"""
def __init__(self, alphabet, n, Y, is_normalized, gs_kernel):
self.n = int(n)
self.is_normalized = is_normalized
self._y_lengths = numpy.array([len(y) for y in Y])
self.max_train_length = numpy.max(self._y_lengths)
self.gs_kernel = gs_kernel
<|code_end|>
, predict the next line using imports from the current file:
import numpy
from preimage.features.gs_similarity_weights import compute_gs_similarity_weights
from preimage.utils.alphabet import transform_strings_to_integer_lists, get_n_grams
and context including class names, function names, and sometimes code from other files:
# Path: preimage/utils/alphabet.py
# def transform_strings_to_integer_lists(Y, alphabet):
# letter_to_int = get_n_gram_to_index(alphabet, 1)
# n_examples = numpy.array(Y).shape[0]
# max_length = numpy.max([len(y) for y in Y])
# Y_int = numpy.zeros((n_examples, max_length), dtype=numpy.int8) - 1
# for y_index, y in enumerate(Y):
# for letter_index, letter in enumerate(y):
# Y_int[y_index, letter_index] = letter_to_int[letter]
# return Y_int
#
# def get_n_grams(alphabet, n):
# n = int(n)
# if n <= 0:
# raise InvalidNGramLengthError(n)
# n_grams = [''.join(n_gram) for n_gram in product(alphabet, repeat=n)]
# return n_grams
. Output only the next line. | self._Y_int = transform_strings_to_integer_lists(Y, alphabet) |
Given the following code snippet before the placeholder: <|code_start|>__author__ = 'amelie'
# Shouldn't label this as "feature-space" since we don't use a sparse matrix representation here.
class GenericStringSimilarityFeatureSpace:
"""Output space for the Generic String kernel with position and n-gram similarity.
Doesn't use a sparse matrix representation because it takes in account the similarity between the n-grams.
This is used to compute the weights of the graph during the inference phase.
Attributes
----------
n : int
N-gram length.
is_normalized : bool
True if the feature space should be normalized, False otherwise.
max_train_length : int
Length of the longest string in the training dataset.
gs_kernel : GenericStringKernel
Generic string kernel.
"""
def __init__(self, alphabet, n, Y, is_normalized, gs_kernel):
self.n = int(n)
self.is_normalized = is_normalized
self._y_lengths = numpy.array([len(y) for y in Y])
self.max_train_length = numpy.max(self._y_lengths)
self.gs_kernel = gs_kernel
self._Y_int = transform_strings_to_integer_lists(Y, alphabet)
<|code_end|>
, predict the next line using imports from the current file:
import numpy
from preimage.features.gs_similarity_weights import compute_gs_similarity_weights
from preimage.utils.alphabet import transform_strings_to_integer_lists, get_n_grams
and context including class names, function names, and sometimes code from other files:
# Path: preimage/utils/alphabet.py
# def transform_strings_to_integer_lists(Y, alphabet):
# letter_to_int = get_n_gram_to_index(alphabet, 1)
# n_examples = numpy.array(Y).shape[0]
# max_length = numpy.max([len(y) for y in Y])
# Y_int = numpy.zeros((n_examples, max_length), dtype=numpy.int8) - 1
# for y_index, y in enumerate(Y):
# for letter_index, letter in enumerate(y):
# Y_int[y_index, letter_index] = letter_to_int[letter]
# return Y_int
#
# def get_n_grams(alphabet, n):
# n = int(n)
# if n <= 0:
# raise InvalidNGramLengthError(n)
# n_grams = [''.join(n_gram) for n_gram in product(alphabet, repeat=n)]
# return n_grams
. Output only the next line. | self._n_grams_int = transform_strings_to_integer_lists(get_n_grams(alphabet, n), alphabet) |
Here is a snippet: <|code_start|>__author__ = 'amelie'
def branch_and_bound_side_effect(node_creator, y_length, alphabet, max_time):
solution_dict = {1: ('a', 1), 2: ('ba', 3)}
return solution_dict[y_length]
class TestNGramModel(unittest2.TestCase):
def setUp(self):
self.setup_feature_space()
self.setup_fit_parameters()
self.setup_graph_builder()
self.setup_branch_and_bound()
self.alphabet = ['a', 'b', 'c']
<|code_end|>
. Write the next line using the current file imports:
import unittest2
import numpy
import numpy.testing
from mock import patch, Mock
from preimage.models.n_gram_model import NGramModel
from preimage.learners.structured_krr import InferenceFitParameters
and context from other files:
# Path: preimage/models/n_gram_model.py
# class NGramModel(Model):
# def __init__(self, alphabet, n, is_using_length=True, seed=42, max_time=30):
# Model.__init__(self, alphabet, n, is_using_length)
# self._graph_builder = GraphBuilder(list(alphabet), n)
# self._is_normalized = True
# self._seed = seed
# self._max_time = max_time
# self._n_grams = list(get_n_grams(alphabet, n))
# self._n_gram_to_index = get_n_gram_to_index(alphabet, n)
#
# def fit(self, inference_parameters):
# Model.fit(self, inference_parameters)
# self._feature_space_ = NGramFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
# self._is_normalized)
#
# def predict(self, Y_weights, y_lengths):
# if self._is_using_length:
# self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
# Y_predictions = self._predict_with_length(Y_weights, y_lengths)
# else:
# Y_predictions = self._predict_without_length(Y_weights)
# return Y_predictions
#
# def _predict_with_length(self, Y_weights, y_lengths):
# Y_predictions = []
# for y_weights, y_length in zip(Y_weights, y_lengths):
# n_gram_weights = self._feature_space_.compute_weights(y_weights)
# graph = self._graph_builder.build_graph(n_gram_weights, y_length)
# node_creator = get_n_gram_node_creator(self._n, graph, n_gram_weights, y_length, self._n_gram_to_index,
# self._n_grams)
# y_predicted, y_bound = branch_and_bound(node_creator, y_length, self._alphabet, self._max_time)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# def _predict_without_length(self, Y_weights):
# Y_predictions = []
# for y_weights in Y_weights:
# n_gram_weights = self._feature_space_.compute_weights(y_weights)
# graph = self._graph_builder.build_graph(n_gram_weights, self._max_length_)
# node_creator = get_n_gram_node_creator(self._n, graph, n_gram_weights, self._max_length_,
# self._n_gram_to_index, self._n_grams)
# y_predicted, y_bound = branch_and_bound_no_length(node_creator, self._min_length_, self._max_length_,
# self._alphabet, self._max_time)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# Path: preimage/learners/structured_krr.py
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
, which may include functions, classes, or code. Output only the next line. | self.model_with_length = NGramModel(self.alphabet, n=1, is_using_length=True) |
Predict the next line after this snippet: <|code_start|>__author__ = 'amelie'
def branch_and_bound_side_effect(node_creator, y_length, alphabet, max_time):
solution_dict = {1: ('a', 1), 2: ('ba', 3)}
return solution_dict[y_length]
class TestNGramModel(unittest2.TestCase):
def setUp(self):
self.setup_feature_space()
self.setup_fit_parameters()
self.setup_graph_builder()
self.setup_branch_and_bound()
self.alphabet = ['a', 'b', 'c']
self.model_with_length = NGramModel(self.alphabet, n=1, is_using_length=True)
self.model_no_length = NGramModel(self.alphabet, n=1, is_using_length=False)
self.Y_weights = numpy.array([[1], [0]])
self.y_lengths = [1, 2]
def setup_fit_parameters(self):
self.max_train_length = 2
self.min_train_length = 1
self.weights = numpy.array([[1, 2]])
self.gram_matrix = numpy.array([[1, 0], [0, 1]])
<|code_end|>
using the current file's imports:
import unittest2
import numpy
import numpy.testing
from mock import patch, Mock
from preimage.models.n_gram_model import NGramModel
from preimage.learners.structured_krr import InferenceFitParameters
and any relevant context from other files:
# Path: preimage/models/n_gram_model.py
# class NGramModel(Model):
# def __init__(self, alphabet, n, is_using_length=True, seed=42, max_time=30):
# Model.__init__(self, alphabet, n, is_using_length)
# self._graph_builder = GraphBuilder(list(alphabet), n)
# self._is_normalized = True
# self._seed = seed
# self._max_time = max_time
# self._n_grams = list(get_n_grams(alphabet, n))
# self._n_gram_to_index = get_n_gram_to_index(alphabet, n)
#
# def fit(self, inference_parameters):
# Model.fit(self, inference_parameters)
# self._feature_space_ = NGramFeatureSpace(self._alphabet, self._n, inference_parameters.Y_train,
# self._is_normalized)
#
# def predict(self, Y_weights, y_lengths):
# if self._is_using_length:
# self._verify_y_lengths_is_not_none_when_use_length(y_lengths)
# Y_predictions = self._predict_with_length(Y_weights, y_lengths)
# else:
# Y_predictions = self._predict_without_length(Y_weights)
# return Y_predictions
#
# def _predict_with_length(self, Y_weights, y_lengths):
# Y_predictions = []
# for y_weights, y_length in zip(Y_weights, y_lengths):
# n_gram_weights = self._feature_space_.compute_weights(y_weights)
# graph = self._graph_builder.build_graph(n_gram_weights, y_length)
# node_creator = get_n_gram_node_creator(self._n, graph, n_gram_weights, y_length, self._n_gram_to_index,
# self._n_grams)
# y_predicted, y_bound = branch_and_bound(node_creator, y_length, self._alphabet, self._max_time)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# def _predict_without_length(self, Y_weights):
# Y_predictions = []
# for y_weights in Y_weights:
# n_gram_weights = self._feature_space_.compute_weights(y_weights)
# graph = self._graph_builder.build_graph(n_gram_weights, self._max_length_)
# node_creator = get_n_gram_node_creator(self._n, graph, n_gram_weights, self._max_length_,
# self._n_gram_to_index, self._n_grams)
# y_predicted, y_bound = branch_and_bound_no_length(node_creator, self._min_length_, self._max_length_,
# self._alphabet, self._max_time)
# Y_predictions.append(y_predicted)
# return Y_predictions
#
# Path: preimage/learners/structured_krr.py
# class InferenceFitParameters:
# """Parameters for the inference model.
#
# That way inference_model.fit(parameters) doesn't have unused parameters but only access the one it needs
# .
# Attributes
# ----------
# weights : array, shape = [n_samples, n_samples]
# Learned weights, where n_samples is the number of training samples.
# gram_matrix : array, shape = [n_samples, n_samples]
# Gram_matrix of the training samples.
# Y_train : array, shape = [n_samples, ]
# Training strings.
# y_lengths : array, shape = [n_samples]
# Length of each training string in Y_train.
# """
# def __init__(self, weights, gram_matrix, Y, y_lengths):
# self.weights = weights
# self.gram_matrix = gram_matrix
# self.Y_train = Y
# self.y_lengths = y_lengths
. Output only the next line. | self.fit_parameters = InferenceFitParameters(self.weights, self.gram_matrix, Y=['a', 'ab'], |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.