Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
try:
except ImportError:
test_path = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.dirname(test_path)
class TestUpdater(unittest.TestCase):
def setUp(self):
<|code_end|>
with the help of current file imports:
import os
import os.path
import shutil
import unittest
from unittest.mock import patch
from mock import patch
from simiki import utils, updater
from simiki.config import get_default_config
and context from other files:
# Path: simiki/utils.py
# COLOR_CODES = {
# "reset": "\033[0m",
# "black": "\033[1;30m",
# "red": "\033[1;31m",
# "green": "\033[1;32m",
# "yellow": "\033[1;33m",
# "blue": "\033[1;34m",
# "magenta": "\033[1;35m",
# "cyan": "\033[1;36m",
# "white": "\033[1;37m",
# "bgred": "\033[1;41m",
# "bggrey": "\033[1;100m",
# }
# def color_msg(color, msg):
# def check_extension(filename):
# def copytree(src, dst, symlinks=False, ignore=None):
# def emptytree(directory, exclude_list=None):
# def mkdir_p(path):
# def listdir_nohidden(path):
# def write_file(filename, content):
# def get_md5(filename):
# def get_dir_md5(dirname):
# def import_string(import_name, silent=False):
#
# Path: simiki/updater.py
# def get_input(text):
# def _update_file(filename, local_path, original_path):
# def _update_dir(dirname, local_dir, original_dir, tag='directory'):
# def update_builtin(**kwargs):
#
# Path: simiki/config.py
# def get_default_config():
# return _post_process(_set_default_config())
, which may contain function names, class names, or code. Output only the next line. | self.default_config = get_default_config() |
Here is a snippet: <|code_start|>
def check_extension(filename):
"""Check if the file extension is in the allowed extensions
The `fnmatch` module can also get the suffix:
patterns = ["*.md", "*.mkd", "*.markdown"]
fnmatch.filter(files, pattern)
"""
exts = ['.{0}'.format(e) for e in simiki.allowed_extensions]
return os.path.splitext(filename)[1] in exts
def copytree(src, dst, symlinks=False, ignore=None):
"""Copy from source directory to destination"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def emptytree(directory, exclude_list=None):
"""Delete all the files and dirs under specified directory"""
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
import os.path
import shutil
import errno
import logging
import io
import hashlib
import simiki
from simiki.compat import unicode
and context from other files:
# Path: simiki/compat.py
, which may include functions, classes, or code. Output only the next line. | if not isinstance(directory, unicode): |
Based on the snippet: <|code_start|>@task
def commit():
"""git commit source changes from all tracked files
include:
- add all tracked files in the work tree, include modified(M), deleted(D)
- commit all files in the index, include added(A), modified(M),
renamed(R), deleted(D)
- untracked files should be manually added to the index before
run this task
before do commit, it requires to confirm the files to be committed; and
the requirement before do add is a future feature, it is currently
disabled.
"""
message = 'Update Documentation'
yes_ans = ('y', 'yes')
with settings(warn_only=True):
# Changes in the work tree to add
add_file = '--update .' # include tracked files
# hack of res.return_code without warning info
res = local('git diff --quiet --exit-code; echo $?', capture=True)
if int(res.strip()):
if False: # future feature?
# TODO: there use diff to uniform with below, and the
# output can be formatted like `git add --dry-run --update .`
test_res = local('git diff --name-status', capture=True)
try:
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import ftplib
import getpass
import fabric.contrib.project as project
from fabric.api import env, local, task, settings
from fabric.colors import blue, red
from simiki import config
from simiki.compat import raw_input
and context (classes, functions, sometimes code) from other files:
# Path: simiki/config.py
# class ConfigFileNotFound(Exception):
# def _set_default_config():
# def _post_process(config):
# def get_default_config():
# def parse_config(config_file):
#
# Path: simiki/compat.py
. Output only the next line. | _ans = raw_input('\n{0}\nAdd these files to index? (y/N) ' |
Predict the next line after this snippet: <|code_start|>
class Reuse_TCPServer(socket_server.TCPServer):
allow_reuse_address = True
class YARequestHandler(http_server.SimpleHTTPRequestHandler):
def translate_path(self, path):
"""map url path to local file system.
path and return path are str type
in py3, builtin translate_path input is str(but it's unicode) and
return str. so there is no need to do with codecs, system can locate
file with unicode path.
in py2, buildin translate_path input is str and return str. we need
to decode to unicode and then encode path with filesystemencoding(),
as mentioned above, unicode path can be located, but will have problem
with py2's translate_path, for uniformity, we also return the
corresponding type of translate_path in manual part.
TODO:
- fspath with os.sep from url always slash
- URL_ROOT codecs simplify?
- in the end of if body use super translate_path directly?
"""
path = urllib_request.unquote(path)
if not isinstance(path, unicode):
path = path.decode('utf-8')
fsenc = sys.getfilesystemencoding()
<|code_end|>
using the current file's imports:
import os
import os.path
import sys
import logging
import traceback
import SimpleHTTPServer as http_server
import http.server as http_server
import SocketServer as socket_server
import socketserver as socket_server
import urllib2 as urllib_request
import urllib.request as urllib_request
from simiki.compat import is_py2, unicode
from os import getcwdu
from os import getcwd as getcwdu
and any relevant context from other files:
# Path: simiki/compat.py
. Output only the next line. | if is_py2: |
Given the following code snippet before the placeholder: <|code_start|>
URL_ROOT = None
PUBLIC_DIRECTORY = None
class Reuse_TCPServer(socket_server.TCPServer):
allow_reuse_address = True
class YARequestHandler(http_server.SimpleHTTPRequestHandler):
def translate_path(self, path):
"""map url path to local file system.
path and return path are str type
in py3, builtin translate_path input is str(but it's unicode) and
return str. so there is no need to do with codecs, system can locate
file with unicode path.
in py2, buildin translate_path input is str and return str. we need
to decode to unicode and then encode path with filesystemencoding(),
as mentioned above, unicode path can be located, but will have problem
with py2's translate_path, for uniformity, we also return the
corresponding type of translate_path in manual part.
TODO:
- fspath with os.sep from url always slash
- URL_ROOT codecs simplify?
- in the end of if body use super translate_path directly?
"""
path = urllib_request.unquote(path)
<|code_end|>
, predict the next line using imports from the current file:
import os
import os.path
import sys
import logging
import traceback
import SimpleHTTPServer as http_server
import http.server as http_server
import SocketServer as socket_server
import socketserver as socket_server
import urllib2 as urllib_request
import urllib.request as urllib_request
from simiki.compat import is_py2, unicode
from os import getcwdu
from os import getcwd as getcwdu
and context including class names, function names, and sometimes code from other files:
# Path: simiki/compat.py
. Output only the next line. | if not isinstance(path, unicode): |
Based on the snippet: <|code_start|> def setUp(self):
self.default_config = get_default_config()
self.args = deepcopy(INIT_ARGS)
self.target_path = "_build"
if os.path.exists(self.target_path):
shutil.rmtree(self.target_path)
self.files = [
"_config.yml",
"fabfile.py",
os.path.join(self.default_config['source'], "intro",
"gettingstarted.md"),
os.path.join(self.default_config['themes_dir'],
self.default_config['theme'],
"page.html"),
os.path.join(self.default_config['themes_dir'],
self.default_config['theme'],
"static", "css", "style.css")
]
self.dirs = [
self.default_config['source'],
self.default_config['destination'],
self.default_config['themes_dir'],
os.path.join(self.default_config['themes_dir'],
self.default_config['theme']),
]
def test_init(self):
os.chdir(test_path)
self.args.update({u'init': True, u'-p': self.target_path})
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import os.path
import shutil
import unittest
import io
from copy import deepcopy
from simiki import cli
from simiki.utils import copytree, emptytree
from simiki.config import get_default_config
and context (classes, functions, sometimes code) from other files:
# Path: simiki/cli.py
# def init_site(target_path):
# def create_new_wiki(category, title, filename):
# def preview_site(host, port, dest, root, do_watch):
# def method_proxy(cls_instance, method_name, *args, **kwargs):
# def __init__(self, target_path):
# def generate(self, include_draft=False):
# def generate_tags(self):
# def generate_feed(self, pages, feed_fn):
# def generate_catalog(self, pages):
# def generate_pages(self):
# def generate_multiple_pages(self, md_files):
# def generate_single_page(self, generator, md_file):
# def _generate_callback(self, result):
# def install_theme(self):
# def copy_attach(self):
# def unicode_docopt(args):
# def main(args=None):
# class Generator(object):
#
# Path: simiki/utils.py
# def copytree(src, dst, symlinks=False, ignore=None):
# """Copy from source directory to destination"""
#
# if not os.path.exists(dst):
# os.makedirs(dst)
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
#
# def emptytree(directory, exclude_list=None):
# """Delete all the files and dirs under specified directory"""
#
# if not isinstance(directory, unicode):
# directory = unicode(directory, 'utf-8')
# if not exclude_list:
# exclude_list = []
# for p in os.listdir(directory):
# if p in exclude_list:
# continue
# fp = os.path.join(directory, p)
# if os.path.isdir(fp):
# try:
# shutil.rmtree(fp)
# logger.debug("Delete directory %s", fp)
# except OSError as e:
# logger.error("Unable to delete directory %s: %s",
# fp, unicode(e))
# elif os.path.isfile(fp):
# try:
# logging.debug("Delete file %s", fp)
# os.remove(fp)
# except OSError as e:
# logger.error("Unable to delete file %s: %s", fp, unicode(e))
# else:
# logger.error("Unable to delete %s, unknown filetype", fp)
#
# Path: simiki/config.py
# def get_default_config():
# return _post_process(_set_default_config())
. Output only the next line. | cli.main(self.args) |
Continue the code snippet: <|code_start|> cli.main(self.args)
for f in self.files:
self.assertTrue(os.path.isfile(os.path.join(self.target_path, f)))
for d in self.dirs:
self.assertTrue(os.path.isdir(os.path.join(self.target_path, d)))
def tearDown(self):
if os.path.exists(self.target_path):
shutil.rmtree(self.target_path)
class TestCliGenerate(unittest.TestCase):
def setUp(self):
self.args = deepcopy(INIT_ARGS)
self.wiki_path = os.path.join(test_path, "mywiki_for_cli")
self.output_path = os.path.join(self.wiki_path, "output")
if os.path.exists(self.output_path):
emptytree(self.output_path)
config_file_tpl = os.path.join(base_path, 'simiki',
'conf_templates', '_config.yml.in')
self.config_file_dst = os.path.join(self.wiki_path, '_config.yml')
shutil.copyfile(config_file_tpl, self.config_file_dst)
s_themes_path = os.path.join(base_path, 'simiki', 'themes')
self.d_themes_path = os.path.join(self.wiki_path, 'themes')
if os.path.exists(self.d_themes_path):
shutil.rmtree(self.d_themes_path)
<|code_end|>
. Use current file imports:
import os
import os.path
import shutil
import unittest
import io
from copy import deepcopy
from simiki import cli
from simiki.utils import copytree, emptytree
from simiki.config import get_default_config
and context (classes, functions, or code) from other files:
# Path: simiki/cli.py
# def init_site(target_path):
# def create_new_wiki(category, title, filename):
# def preview_site(host, port, dest, root, do_watch):
# def method_proxy(cls_instance, method_name, *args, **kwargs):
# def __init__(self, target_path):
# def generate(self, include_draft=False):
# def generate_tags(self):
# def generate_feed(self, pages, feed_fn):
# def generate_catalog(self, pages):
# def generate_pages(self):
# def generate_multiple_pages(self, md_files):
# def generate_single_page(self, generator, md_file):
# def _generate_callback(self, result):
# def install_theme(self):
# def copy_attach(self):
# def unicode_docopt(args):
# def main(args=None):
# class Generator(object):
#
# Path: simiki/utils.py
# def copytree(src, dst, symlinks=False, ignore=None):
# """Copy from source directory to destination"""
#
# if not os.path.exists(dst):
# os.makedirs(dst)
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
#
# def emptytree(directory, exclude_list=None):
# """Delete all the files and dirs under specified directory"""
#
# if not isinstance(directory, unicode):
# directory = unicode(directory, 'utf-8')
# if not exclude_list:
# exclude_list = []
# for p in os.listdir(directory):
# if p in exclude_list:
# continue
# fp = os.path.join(directory, p)
# if os.path.isdir(fp):
# try:
# shutil.rmtree(fp)
# logger.debug("Delete directory %s", fp)
# except OSError as e:
# logger.error("Unable to delete directory %s: %s",
# fp, unicode(e))
# elif os.path.isfile(fp):
# try:
# logging.debug("Delete file %s", fp)
# os.remove(fp)
# except OSError as e:
# logger.error("Unable to delete file %s: %s", fp, unicode(e))
# else:
# logger.error("Unable to delete %s, unknown filetype", fp)
#
# Path: simiki/config.py
# def get_default_config():
# return _post_process(_set_default_config())
. Output only the next line. | copytree(s_themes_path, self.d_themes_path) |
Next line prediction: <|code_start|> self.dirs = [
self.default_config['source'],
self.default_config['destination'],
self.default_config['themes_dir'],
os.path.join(self.default_config['themes_dir'],
self.default_config['theme']),
]
def test_init(self):
os.chdir(test_path)
self.args.update({u'init': True, u'-p': self.target_path})
cli.main(self.args)
for f in self.files:
self.assertTrue(os.path.isfile(os.path.join(self.target_path, f)))
for d in self.dirs:
self.assertTrue(os.path.isdir(os.path.join(self.target_path, d)))
def tearDown(self):
if os.path.exists(self.target_path):
shutil.rmtree(self.target_path)
class TestCliGenerate(unittest.TestCase):
def setUp(self):
self.args = deepcopy(INIT_ARGS)
self.wiki_path = os.path.join(test_path, "mywiki_for_cli")
self.output_path = os.path.join(self.wiki_path, "output")
if os.path.exists(self.output_path):
<|code_end|>
. Use current file imports:
(import os
import os.path
import shutil
import unittest
import io
from copy import deepcopy
from simiki import cli
from simiki.utils import copytree, emptytree
from simiki.config import get_default_config)
and context including class names, function names, or small code snippets from other files:
# Path: simiki/cli.py
# def init_site(target_path):
# def create_new_wiki(category, title, filename):
# def preview_site(host, port, dest, root, do_watch):
# def method_proxy(cls_instance, method_name, *args, **kwargs):
# def __init__(self, target_path):
# def generate(self, include_draft=False):
# def generate_tags(self):
# def generate_feed(self, pages, feed_fn):
# def generate_catalog(self, pages):
# def generate_pages(self):
# def generate_multiple_pages(self, md_files):
# def generate_single_page(self, generator, md_file):
# def _generate_callback(self, result):
# def install_theme(self):
# def copy_attach(self):
# def unicode_docopt(args):
# def main(args=None):
# class Generator(object):
#
# Path: simiki/utils.py
# def copytree(src, dst, symlinks=False, ignore=None):
# """Copy from source directory to destination"""
#
# if not os.path.exists(dst):
# os.makedirs(dst)
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
#
# def emptytree(directory, exclude_list=None):
# """Delete all the files and dirs under specified directory"""
#
# if not isinstance(directory, unicode):
# directory = unicode(directory, 'utf-8')
# if not exclude_list:
# exclude_list = []
# for p in os.listdir(directory):
# if p in exclude_list:
# continue
# fp = os.path.join(directory, p)
# if os.path.isdir(fp):
# try:
# shutil.rmtree(fp)
# logger.debug("Delete directory %s", fp)
# except OSError as e:
# logger.error("Unable to delete directory %s: %s",
# fp, unicode(e))
# elif os.path.isfile(fp):
# try:
# logging.debug("Delete file %s", fp)
# os.remove(fp)
# except OSError as e:
# logger.error("Unable to delete file %s: %s", fp, unicode(e))
# else:
# logger.error("Unable to delete %s, unknown filetype", fp)
#
# Path: simiki/config.py
# def get_default_config():
# return _post_process(_set_default_config())
. Output only the next line. | emptytree(self.output_path) |
Using the snippet: <|code_start|>from __future__ import print_function, with_statement, unicode_literals
test_path = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.dirname(test_path)
INIT_ARGS = {
u'--help': False,
u'--version': False,
u'-c': None,
u'-f': None,
u'-p': None,
u'-t': None,
u'--host': None,
u'--port': None,
u'-w': None,
u'--draft': None,
u'generate': False,
u'g': False,
u'init': False,
u'new': False,
u'n': False,
u'preview': False,
u'p': False
}
class TestCliInit(unittest.TestCase):
def setUp(self):
<|code_end|>
, determine the next line of code. You have imports:
import os
import os.path
import shutil
import unittest
import io
from copy import deepcopy
from simiki import cli
from simiki.utils import copytree, emptytree
from simiki.config import get_default_config
and context (class names, function names, or code) available:
# Path: simiki/cli.py
# def init_site(target_path):
# def create_new_wiki(category, title, filename):
# def preview_site(host, port, dest, root, do_watch):
# def method_proxy(cls_instance, method_name, *args, **kwargs):
# def __init__(self, target_path):
# def generate(self, include_draft=False):
# def generate_tags(self):
# def generate_feed(self, pages, feed_fn):
# def generate_catalog(self, pages):
# def generate_pages(self):
# def generate_multiple_pages(self, md_files):
# def generate_single_page(self, generator, md_file):
# def _generate_callback(self, result):
# def install_theme(self):
# def copy_attach(self):
# def unicode_docopt(args):
# def main(args=None):
# class Generator(object):
#
# Path: simiki/utils.py
# def copytree(src, dst, symlinks=False, ignore=None):
# """Copy from source directory to destination"""
#
# if not os.path.exists(dst):
# os.makedirs(dst)
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
#
# def emptytree(directory, exclude_list=None):
# """Delete all the files and dirs under specified directory"""
#
# if not isinstance(directory, unicode):
# directory = unicode(directory, 'utf-8')
# if not exclude_list:
# exclude_list = []
# for p in os.listdir(directory):
# if p in exclude_list:
# continue
# fp = os.path.join(directory, p)
# if os.path.isdir(fp):
# try:
# shutil.rmtree(fp)
# logger.debug("Delete directory %s", fp)
# except OSError as e:
# logger.error("Unable to delete directory %s: %s",
# fp, unicode(e))
# elif os.path.isfile(fp):
# try:
# logging.debug("Delete file %s", fp)
# os.remove(fp)
# except OSError as e:
# logger.error("Unable to delete file %s: %s", fp, unicode(e))
# else:
# logger.error("Unable to delete %s, unknown filetype", fp)
#
# Path: simiki/config.py
# def get_default_config():
# return _post_process(_set_default_config())
. Output only the next line. | self.default_config = get_default_config() |
Given the code snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
logger = logging.getLogger(__name__)
yes_answer = ('y', 'yes')
def get_input(text):
<|code_end|>
, generate the next line using the imports in this file:
import os
import shutil
import logging
from simiki.compat import raw_input
from simiki.utils import copytree, get_md5
and context (functions, classes, or occasionally code) from other files:
# Path: simiki/compat.py
#
# Path: simiki/utils.py
# def copytree(src, dst, symlinks=False, ignore=None):
# """Copy from source directory to destination"""
#
# if not os.path.exists(dst):
# os.makedirs(dst)
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
#
# def get_md5(filename):
# # py3 require md5 with bytes object, otherwise raise
# # TypeError: Unicode-objects must be encoded before hashing
# with open(filename, 'rb') as fd:
# md5_hash = hashlib.md5(fd.read()).hexdigest()
# return md5_hash
. Output only the next line. | return raw_input(text) |
Predict the next line for this snippet: <|code_start|>
try:
if os.path.exists(local_dir):
_need_update = False
for root, dirs, files in os.walk(original_dir):
files = [f for f in files if not f.startswith(".")]
dirs[:] = [d for d in dirs if not d.startswith(".")]
rel_dir = os.path.relpath(root, original_dir)
for fn in files:
original_fn_md5 = get_md5(os.path.join(root, fn))
local_fn = os.path.join(local_dir, rel_dir, fn)
if not os.path.exists(local_fn):
_need_update = True
break
local_fn_md5 = get_md5(local_fn)
if local_fn_md5 != original_fn_md5:
_need_update = True
break
if _need_update:
break
if _need_update:
up_to_date = False
try:
_ans = get_input('Overwrite {0} {1}? (y/N) '
.format(tag, dirname))
if _ans.lower() in yes_answer:
shutil.rmtree(local_dir)
<|code_end|>
with the help of current file imports:
import os
import shutil
import logging
from simiki.compat import raw_input
from simiki.utils import copytree, get_md5
and context from other files:
# Path: simiki/compat.py
#
# Path: simiki/utils.py
# def copytree(src, dst, symlinks=False, ignore=None):
# """Copy from source directory to destination"""
#
# if not os.path.exists(dst):
# os.makedirs(dst)
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
#
# def get_md5(filename):
# # py3 require md5 with bytes object, otherwise raise
# # TypeError: Unicode-objects must be encoded before hashing
# with open(filename, 'rb') as fd:
# md5_hash = hashlib.md5(fd.read()).hexdigest()
# return md5_hash
, which may contain function names, class names, or code. Output only the next line. | copytree(original_dir, local_dir) |
Given snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
logger = logging.getLogger(__name__)
yes_answer = ('y', 'yes')
def get_input(text):
return raw_input(text)
def _update_file(filename, local_path, original_path):
"""
:filename: file name to be updated, without directory
:local_path: directory of local filename
:original_path: directory of original filename
"""
up_to_date = True
original_fn = os.path.join(original_path, filename)
local_fn = os.path.join(local_path, filename)
try:
if os.path.exists(local_fn):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import shutil
import logging
from simiki.compat import raw_input
from simiki.utils import copytree, get_md5
and context:
# Path: simiki/compat.py
#
# Path: simiki/utils.py
# def copytree(src, dst, symlinks=False, ignore=None):
# """Copy from source directory to destination"""
#
# if not os.path.exists(dst):
# os.makedirs(dst)
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
#
# def get_md5(filename):
# # py3 require md5 with bytes object, otherwise raise
# # TypeError: Unicode-objects must be encoded before hashing
# with open(filename, 'rb') as fd:
# md5_hash = hashlib.md5(fd.read()).hexdigest()
# return md5_hash
which might include code, classes, or functions. Output only the next line. | original_fn_md5 = get_md5(original_fn) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
class ANSIFormatter(Formatter):
"""Use ANSI escape sequences to colored log"""
def format(self, record):
try:
msg = super(ANSIFormatter, self).format(record)
except:
# 2017-05-15: not support py26
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
lvl2color = {
"DEBUG": "blue",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "bgred"
}
rln = record.levelname
if rln in lvl2color:
return "[{0}]: {1}".format(
<|code_end|>
, predict the next line using imports from the current file:
import logging
from logging import getLogger, Formatter, StreamHandler
from simiki import utils
from simiki.compat import is_linux, is_osx
and context including class names, function names, and sometimes code from other files:
# Path: simiki/utils.py
# COLOR_CODES = {
# "reset": "\033[0m",
# "black": "\033[1;30m",
# "red": "\033[1;31m",
# "green": "\033[1;32m",
# "yellow": "\033[1;33m",
# "blue": "\033[1;34m",
# "magenta": "\033[1;35m",
# "cyan": "\033[1;36m",
# "white": "\033[1;37m",
# "bgred": "\033[1;41m",
# "bggrey": "\033[1;100m",
# }
# def color_msg(color, msg):
# def check_extension(filename):
# def copytree(src, dst, symlinks=False, ignore=None):
# def emptytree(directory, exclude_list=None):
# def mkdir_p(path):
# def listdir_nohidden(path):
# def write_file(filename, content):
# def get_md5(filename):
# def get_dir_md5(dirname):
# def import_string(import_name, silent=False):
#
# Path: simiki/compat.py
. Output only the next line. | utils.color_msg(lvl2color[rln], rln), |
Given snippet: <|code_start|>
rln = record.levelname
if rln in lvl2color:
return "[{0}]: {1}".format(
utils.color_msg(lvl2color[rln], rln),
msg
)
else:
return msg
class NonANSIFormatter(Formatter):
"""Non ANSI color format"""
def format(self, record):
try:
msg = super(NonANSIFormatter, self).format(record)
except:
# 2017-05-15: not support py26
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
rln = record.levelname
return "[{0}]: {1}".format(rln, msg)
def _is_platform_allowed_ansi():
"""ansi be used on linux/macos"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
from logging import getLogger, Formatter, StreamHandler
from simiki import utils
from simiki.compat import is_linux, is_osx
and context:
# Path: simiki/utils.py
# COLOR_CODES = {
# "reset": "\033[0m",
# "black": "\033[1;30m",
# "red": "\033[1;31m",
# "green": "\033[1;32m",
# "yellow": "\033[1;33m",
# "blue": "\033[1;34m",
# "magenta": "\033[1;35m",
# "cyan": "\033[1;36m",
# "white": "\033[1;37m",
# "bgred": "\033[1;41m",
# "bggrey": "\033[1;100m",
# }
# def color_msg(color, msg):
# def check_extension(filename):
# def copytree(src, dst, symlinks=False, ignore=None):
# def emptytree(directory, exclude_list=None):
# def mkdir_p(path):
# def listdir_nohidden(path):
# def write_file(filename, content):
# def get_md5(filename):
# def get_dir_md5(dirname):
# def import_string(import_name, silent=False):
#
# Path: simiki/compat.py
which might include code, classes, or functions. Output only the next line. | if is_linux or is_osx: |
Using the snippet: <|code_start|>
rln = record.levelname
if rln in lvl2color:
return "[{0}]: {1}".format(
utils.color_msg(lvl2color[rln], rln),
msg
)
else:
return msg
class NonANSIFormatter(Formatter):
"""Non ANSI color format"""
def format(self, record):
try:
msg = super(NonANSIFormatter, self).format(record)
except:
# 2017-05-15: not support py26
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
rln = record.levelname
return "[{0}]: {1}".format(rln, msg)
def _is_platform_allowed_ansi():
"""ansi be used on linux/macos"""
<|code_end|>
, determine the next line of code. You have imports:
import logging
from logging import getLogger, Formatter, StreamHandler
from simiki import utils
from simiki.compat import is_linux, is_osx
and context (class names, function names, or code) available:
# Path: simiki/utils.py
# COLOR_CODES = {
# "reset": "\033[0m",
# "black": "\033[1;30m",
# "red": "\033[1;31m",
# "green": "\033[1;32m",
# "yellow": "\033[1;33m",
# "blue": "\033[1;34m",
# "magenta": "\033[1;35m",
# "cyan": "\033[1;36m",
# "white": "\033[1;37m",
# "bgred": "\033[1;41m",
# "bggrey": "\033[1;100m",
# }
# def color_msg(color, msg):
# def check_extension(filename):
# def copytree(src, dst, symlinks=False, ignore=None):
# def emptytree(directory, exclude_list=None):
# def mkdir_p(path):
# def listdir_nohidden(path):
# def write_file(filename, content):
# def get_md5(filename):
# def get_dir_md5(dirname):
# def import_string(import_name, silent=False):
#
# Path: simiki/compat.py
. Output only the next line. | if is_linux or is_osx: |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
test_path = os.path.dirname(os.path.abspath(__file__))
class TestParseConfig(unittest.TestCase):
def setUp(self):
wiki_path = os.path.join(test_path, 'mywiki_for_others')
self.expected_config = get_default_config()
self.expected_config.update({
"author": "Tanky Woo",
"debug": True,
"default_ext": "markdown",
"description": "This is a simiki's config sample,"
" \u6d4b\u8bd5\u6837\u4f8b",
"destination": "destination",
"keywords": "wiki, simiki, python, \u7ef4\u57fa",
"root": "/wiki/",
"source": "source",
"attach": "attach",
"theme": "mytheme",
"themes_dir": "simiki_themes",
"title": "\u6211\u7684Wiki",
"url": "http://wiki.tankywoo.com"
})
self.config_file = os.path.join(wiki_path, "config_sample.yml")
def test_parse_config(self):
<|code_end|>
, predict the next line using imports from the current file:
import os.path
import unittest
import datetime
from simiki.config import parse_config, get_default_config
and context including class names, function names, and sometimes code from other files:
# Path: simiki/config.py
# def parse_config(config_file):
# if not os.path.exists(config_file):
# raise ConfigFileNotFound("{0} not exists".format(config_file))
#
# default_config = _set_default_config()
#
# with io.open(config_file, "rt", encoding="utf-8") as fd:
# config = yaml.load(fd, Loader=yaml.FullLoader)
#
# default_config.update(config)
# config = _post_process(default_config)
#
# return config
#
# def get_default_config():
# return _post_process(_set_default_config())
. Output only the next line. | config = parse_config(self.config_file) |
Predict the next line after this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
test_path = os.path.dirname(os.path.abspath(__file__))
class TestParseConfig(unittest.TestCase):
def setUp(self):
wiki_path = os.path.join(test_path, 'mywiki_for_others')
<|code_end|>
using the current file's imports:
import os.path
import unittest
import datetime
from simiki.config import parse_config, get_default_config
and any relevant context from other files:
# Path: simiki/config.py
# def parse_config(config_file):
# if not os.path.exists(config_file):
# raise ConfigFileNotFound("{0} not exists".format(config_file))
#
# default_config = _set_default_config()
#
# with io.open(config_file, "rt", encoding="utf-8") as fd:
# config = yaml.load(fd, Loader=yaml.FullLoader)
#
# default_config.update(config)
# config = _post_process(default_config)
#
# return config
#
# def get_default_config():
# return _post_process(_set_default_config())
. Output only the next line. | self.expected_config = get_default_config() |
Continue the code snippet: <|code_start|> the file doesn't exist, the script will exit with code 1 and tell the
user how to generate it.
:return: the cookie secret as bytes
"""
try:
with open(os.path.join(_pwd, 'cookie_secret'), 'rb') as cookie_file:
cookie_secret = cookie_file.read()
return cookie_secret
except IOError:
print(textwrap.fill(
'{error} no secret key found for creating secure user session'
' cookies. Create it by running the following command:'.format(
error=modify_text('Error:', bold)
)
))
print('head -c 24 /dev/urandom > cookie_secret')
sys.exit(1)
def ensure_that_user_wants_to_drop_schema():
"""Check that user asked to drop the schema intentionally.
Interrogates the user to make sure that the schema specified by
options.schema should be dropped. If the user decides against it,
exits the application.
"""
answer = input(textwrap.fill(
'Do you really want to drop the schema {schema}? Doing so will {erase}'
' all the data {permanently} y/n (default n)'.format(
<|code_end|>
. Use current file imports:
import os
import textwrap
import signal
import subprocess
import sys
import logging
import mimetypes
import tornado.log
import tornado.httpserver
import tornado.web
import dokomoforms.handlers as handlers
from time import sleep
from sqlalchemy import DDL
from sqlalchemy.orm import sessionmaker
from tornado.web import url
from dokomoforms.options import options
from dokomoforms.options import parse_options
from dokomoforms.models import create_engine, Base, UUID_REGEX
from dokomoforms.handlers.api.v0 import (
SurveyResource, SubmissionResource, PhotoResource, NodeResource,
UserResource
)
from dokomoforms.handlers.debug import revisit_debug
and context (classes, functions, or code) from other files:
# Path: dokomoforms/options.py
# def inject_options(**kwargs):
# def parse_options():
. Output only the next line. | schema=options.schema, |
Predict the next line for this snippet: <|code_start|> ("utf8","utf8_estonian_ci",False), # 198
("utf8","utf8_spanish_ci",False), # 199
("utf8","utf8_swedish_ci",False), # 200
("utf8","utf8_turkish_ci",False), # 201
("utf8","utf8_czech_ci",False), # 202
("utf8","utf8_danish_ci",False), # 203
("utf8","utf8_lithuanian_ci",False), # 204
("utf8","utf8_slovak_ci",False), # 205
("utf8","utf8_spanish2_ci",False), # 206
("utf8","utf8_roman_ci",False), # 207
("utf8","utf8_persian_ci",False), # 208
("utf8","utf8_esperanto_ci",False), # 209
("utf8","utf8_hungarian_ci",False), # 210
]
@classmethod
def get_info(cls,setid):
"""Retrieves character set information as tuple using an ID
Retrieves character set and collation information based on the
given MySQL ID.
Returns a tuple.
"""
try:
r = cls.desc[setid]
if r is None:
raise
return r[0:2]
except:
<|code_end|>
with the help of current file imports:
from .errors import ProgrammingError
and context from other files:
# Path: resources/lib/mysql-connector-python/python3/mysql/connector/errors.py
# class ProgrammingError(DatabaseError):
# """Exception for errors programming errors"""
# pass
, which may contain function names, class names, or code. Output only the next line. | raise ProgrammingError("Character set '%d' unsupported" % (setid)) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""
solace.views.themes
~~~~~~~~~~~~~~~~~~~
Implements support for the themes.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
def get_resource(request, theme, file):
"""Returns a file from the theme."""
<|code_end|>
, generate the next line using the imports in this file:
import os
import mimetypes
from werkzeug import Response, wrap_file
from werkzeug.exceptions import NotFound
from solace.templating import get_theme
from solace import settings
and context (functions, classes, or occasionally code) from other files:
# Path: solace/templating.py
# def get_theme(name=None):
# """Returns the specified theme of the one from the config. If the
# theme does not exist, `None` is returned.
# """
# global _theme
# set_theme = False
# with _theme_lock:
# if name is None:
# if _theme is not None:
# return _theme
# name = settings.THEME
# set_theme = True
# for folder in chain(settings.THEME_PATH, DEFAULT_THEME_PATH):
# theme_dir = path.join(folder, name)
# if path.isfile(path.join(theme_dir, 'theme.ini')):
# rv = Theme(theme_dir)
# if set_theme:
# _theme = rv
# return rv
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
. Output only the next line. | theme = get_theme(theme) |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
solace.utils.ctxlocal
~~~~~~~~~~~~~~~~~~~~~
The context local that is used in the application and i18n system. The
application makes this request-bound.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
local = Local()
local_mgr = LocalManager([local])
class LocalProperty(object):
"""Class/Instance property that returns something from the local."""
def __init__(self, name):
self.__name__ = name
def __get__(self, obj, type=None):
return getattr(local, self.__name__, None)
# make sure the request local is removed at the end of the request
<|code_end|>
, predict the immediate next line with the help of imports:
from werkzeug import Local, LocalManager
from solace.signals import after_request_shutdown
and context (classes, functions, sometimes code) from other files:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
. Output only the next line. | after_request_shutdown.connect(local_mgr.cleanup) |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
solace.utils.mail
~~~~~~~~~~~~~~~~~
This module can be used to send mails.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
(c) 2009 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
except ImportError:
def send_email(subject, text, to_addrs, quiet=True):
"""Send a mail using the `EMail` class. This will log the email instead
if the application configuration wants to log email.
"""
e = EMail(subject, text, to_addrs)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import re
import socket
from email.mime.text import MIMEText
from email.MIMEText import MIMEText
from smtplib import SMTP, SMTPException
from urlparse import urlparse
from solace import settings
and context (classes, functions, sometimes code) from other files:
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
. Output only the next line. | if settings.MAIL_LOG_FILE is not None: |
Given snippet: <|code_start|> Column('upvotes', Integer, nullable=False),
# the number of downvotes casted
Column('downvotes', Integer, nullable=False),
# the number of bronce badges
Column('bronce_badges', Integer, nullable=False),
# the number of silver badges
Column('silver_badges', Integer, nullable=False),
# the number of gold badges
Column('gold_badges', Integer, nullable=False),
# the number of platin badges
Column('platin_badges', Integer, nullable=False),
# true if the user is an administrator
Column('is_admin', Boolean, nullable=False),
# true if the user is banned
Column('is_banned', Boolean, nullable=False),
# the date of the last login
Column('last_login', DateTime),
# the user's activation key. If this is NULL, the user is already
# activated, otherwise this is the key the user has to enter on the
# activation page (it's part of the link actually) to activate the
# account.
Column('activation_key', String(10))
)
user_activities = Table('user_activities', metadata,
# the id of the actitity, exists only for the database
Column('activity_id', Integer, primary_key=True),
# the user the activity is for
Column('user_id', Integer, ForeignKey('users.user_id')),
# the language code for this activity stat
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from sqlalchemy import Table, Column, Integer, String, Text, DateTime, \
ForeignKey, Boolean, Float
from solace.database import LocaleType, BadgeType, metadata
and context:
# Path: solace/database.py
# def get_engine():
# def refresh_engine():
# def atomic_add(obj, column, delta, expire=False):
# def mapper(model, table, **options):
# def cursor_execute(self, execute, cursor, statement, parameters,
# context, executemany):
# def after_delete(self, mapper, connection, instance):
# def after_insert(self, mapper, connection, instance):
# def after_update(self, mapper, connection, instance):
# def _record(self, model, operation):
# def before_commit(self, session):
# def after_commit(self, session):
# def after_rollback(self, session):
# def __init__(self):
# def __init__(self):
# def process_bind_param(self, value, dialect):
# def process_result_value(self, value, dialect):
# def is_mutable(self):
# def __init__(self):
# def process_bind_param(self, value, dialect):
# def process_result_value(self, value, dialect):
# def is_mutable(self):
# def init():
# def drop_tables():
# def add_query_debug_headers(request, response):
# def request_track_query(cursor, statement, parameters, time):
# class ConnectionQueryTrackingProxy(ConnectionProxy):
# class SignalTrackingMapperExtension(MapperExtension):
# class SignalEmittingSessionExtension(SessionExtension):
# class SignalTrackingSession(Session):
# class LocaleType(TypeDecorator):
# class BadgeType(TypeDecorator):
which might include code, classes, or functions. Output only the next line. | Column('locale', LocaleType, index=True), |
Continue the code snippet: <|code_start|> # the date of the last login
Column('last_login', DateTime),
# the user's activation key. If this is NULL, the user is already
# activated, otherwise this is the key the user has to enter on the
# activation page (it's part of the link actually) to activate the
# account.
Column('activation_key', String(10))
)
user_activities = Table('user_activities', metadata,
# the id of the actitity, exists only for the database
Column('activity_id', Integer, primary_key=True),
# the user the activity is for
Column('user_id', Integer, ForeignKey('users.user_id')),
# the language code for this activity stat
Column('locale', LocaleType, index=True),
# the internal activity counter
Column('counter', Integer, nullable=False),
# the date of the first activity in a language
Column('first_activity', DateTime, nullable=False),
# the date of the last activity in the language
Column('last_activity', DateTime, nullable=False)
)
user_badges = Table('user_badges', metadata,
# the internal id
Column('badge_id', Integer, primary_key=True),
# who was the badge awarded to?
Column('user_id', Integer, ForeignKey('users.user_id')),
# which badge?
<|code_end|>
. Use current file imports:
from sqlalchemy import Table, Column, Integer, String, Text, DateTime, \
ForeignKey, Boolean, Float
from solace.database import LocaleType, BadgeType, metadata
and context (classes, functions, or code) from other files:
# Path: solace/database.py
# def get_engine():
# def refresh_engine():
# def atomic_add(obj, column, delta, expire=False):
# def mapper(model, table, **options):
# def cursor_execute(self, execute, cursor, statement, parameters,
# context, executemany):
# def after_delete(self, mapper, connection, instance):
# def after_insert(self, mapper, connection, instance):
# def after_update(self, mapper, connection, instance):
# def _record(self, model, operation):
# def before_commit(self, session):
# def after_commit(self, session):
# def after_rollback(self, session):
# def __init__(self):
# def __init__(self):
# def process_bind_param(self, value, dialect):
# def process_result_value(self, value, dialect):
# def is_mutable(self):
# def __init__(self):
# def process_bind_param(self, value, dialect):
# def process_result_value(self, value, dialect):
# def is_mutable(self):
# def init():
# def drop_tables():
# def add_query_debug_headers(request, response):
# def request_track_query(cursor, statement, parameters, time):
# class ConnectionQueryTrackingProxy(ConnectionProxy):
# class SignalTrackingMapperExtension(MapperExtension):
# class SignalEmittingSessionExtension(SessionExtension):
# class SignalTrackingSession(Session):
# class LocaleType(TypeDecorator):
# class BadgeType(TypeDecorator):
. Output only the next line. | Column('badge', BadgeType(), index=True), |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""
solace.schema
~~~~~~~~~~~~~
This module defines the solace schema. The structure is pretty simple
and should scale up to the number of posts we expect. Not much magic
happening here.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
<|code_end|>
, generate the next line using the imports in this file:
from sqlalchemy import Table, Column, Integer, String, Text, DateTime, \
ForeignKey, Boolean, Float
from solace.database import LocaleType, BadgeType, metadata
and context (functions, classes, or occasionally code) from other files:
# Path: solace/database.py
# def get_engine():
# def refresh_engine():
# def atomic_add(obj, column, delta, expire=False):
# def mapper(model, table, **options):
# def cursor_execute(self, execute, cursor, statement, parameters,
# context, executemany):
# def after_delete(self, mapper, connection, instance):
# def after_insert(self, mapper, connection, instance):
# def after_update(self, mapper, connection, instance):
# def _record(self, model, operation):
# def before_commit(self, session):
# def after_commit(self, session):
# def after_rollback(self, session):
# def __init__(self):
# def __init__(self):
# def process_bind_param(self, value, dialect):
# def process_result_value(self, value, dialect):
# def is_mutable(self):
# def __init__(self):
# def process_bind_param(self, value, dialect):
# def process_result_value(self, value, dialect):
# def is_mutable(self):
# def init():
# def drop_tables():
# def add_query_debug_headers(request, response):
# def request_track_query(cursor, statement, parameters, time):
# class ConnectionQueryTrackingProxy(ConnectionProxy):
# class SignalTrackingMapperExtension(MapperExtension):
# class SignalEmittingSessionExtension(SessionExtension):
# class SignalTrackingSession(Session):
# class LocaleType(TypeDecorator):
# class BadgeType(TypeDecorator):
. Output only the next line. | users = Table('users', metadata, |
Here is a snippet: <|code_start|> mysql_charset=settings.MYSQL_TABLE_CHARSET)
metadata.create_all(bind=engine)
def drop_tables():
"""Drops all tables again."""
metadata.drop_all(bind=get_engine())
def add_query_debug_headers(request, response):
"""Add headers with the SQL info."""
if settings.TRACK_QUERIES:
count = len(request.sql_queries)
sql_time = 0.0
for stmt, param, time in request.sql_queries:
sql_time += time
response.headers['X-SQL-Query-Count'] = str(count)
response.headers['X-SQL-Query-Time'] = str(sql_time)
def request_track_query(cursor, statement, parameters, time):
"""If there is an active request, it logs the query on it."""
if settings.TRACK_QUERIES:
request = Request.current
if request is not None:
request.sql_queries.append((statement, parameters, time))
# make sure the session is removed at the end of the request and that
# query logging for the request works.
<|code_end|>
. Write the next line using the current file imports:
import sys
import time
import solace.schema
import solace.schema
from threading import Lock
from datetime import datetime
from babel import Locale
from sqlalchemy.types import TypeDecorator
from sqlalchemy.engine.url import make_url
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.interfaces import SessionExtension, MapperExtension, \
EXT_CONTINUE
from sqlalchemy.util import to_list
from sqlalchemy import String, orm, sql, create_engine, MetaData
from solace.badges import badges_by_id
from solace.application import Request
from solace.signals import after_request_shutdown, before_response_sent, \
after_cursor_executed, before_cursor_executed, before_models_committed, \
after_models_committed
from solace import settings
and context from other files:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
, which may include functions, classes, or code. Output only the next line. | after_request_shutdown.connect(session.remove) |
Predict the next line for this snippet: <|code_start|> metadata.create_all(bind=engine)
def drop_tables():
"""Drops all tables again."""
metadata.drop_all(bind=get_engine())
def add_query_debug_headers(request, response):
"""Add headers with the SQL info."""
if settings.TRACK_QUERIES:
count = len(request.sql_queries)
sql_time = 0.0
for stmt, param, time in request.sql_queries:
sql_time += time
response.headers['X-SQL-Query-Count'] = str(count)
response.headers['X-SQL-Query-Time'] = str(sql_time)
def request_track_query(cursor, statement, parameters, time):
"""If there is an active request, it logs the query on it."""
if settings.TRACK_QUERIES:
request = Request.current
if request is not None:
request.sql_queries.append((statement, parameters, time))
# make sure the session is removed at the end of the request and that
# query logging for the request works.
after_request_shutdown.connect(session.remove)
<|code_end|>
with the help of current file imports:
import sys
import time
import solace.schema
import solace.schema
from threading import Lock
from datetime import datetime
from babel import Locale
from sqlalchemy.types import TypeDecorator
from sqlalchemy.engine.url import make_url
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.interfaces import SessionExtension, MapperExtension, \
EXT_CONTINUE
from sqlalchemy.util import to_list
from sqlalchemy import String, orm, sql, create_engine, MetaData
from solace.badges import badges_by_id
from solace.application import Request
from solace.signals import after_request_shutdown, before_response_sent, \
after_cursor_executed, before_cursor_executed, before_models_committed, \
after_models_committed
from solace import settings
and context from other files:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
, which may contain function names, class names, or code. Output only the next line. | before_response_sent.connect(add_query_debug_headers) |
Here is a snippet: <|code_start|> orm.attributes.instance_state(obj).expire_attributes(dict_, [column])
else:
orm.attributes.set_committed_value(obj, column, val + delta)
table = mapper.tables[0]
stmt = sql.update(table, mapper.primary_key[0] == pk[0], {
column: table.c[column] + delta
})
sess.execute(stmt)
def mapper(model, table, **options):
"""A mapper that hooks in standard extensions."""
extensions = to_list(options.pop('extension', None), [])
extensions.append(SignalTrackingMapperExtension())
options['extension'] = extensions
return orm.mapper(model, table, **options)
class ConnectionQueryTrackingProxy(ConnectionProxy):
"""A proxy that if enabled counts the queries."""
def cursor_execute(self, execute, cursor, statement, parameters,
context, executemany):
before_cursor_executed.emit(cursor=self, statement=statement,
parameters=parameters)
start = _timer()
try:
return execute(cursor, statement, parameters, context)
finally:
<|code_end|>
. Write the next line using the current file imports:
import sys
import time
import solace.schema
import solace.schema
from threading import Lock
from datetime import datetime
from babel import Locale
from sqlalchemy.types import TypeDecorator
from sqlalchemy.engine.url import make_url
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.interfaces import SessionExtension, MapperExtension, \
EXT_CONTINUE
from sqlalchemy.util import to_list
from sqlalchemy import String, orm, sql, create_engine, MetaData
from solace.badges import badges_by_id
from solace.application import Request
from solace.signals import after_request_shutdown, before_response_sent, \
after_cursor_executed, before_cursor_executed, before_models_committed, \
after_models_committed
from solace import settings
and context from other files:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
, which may include functions, classes, or code. Output only the next line. | after_cursor_executed.emit(cursor=self, statement=statement, |
Based on the snippet: <|code_start|> assert len(pk) == 1, 'atomic_add not supported for classes with ' \
'more than one primary key'
val = orm.attributes.get_attribute(obj, column)
if expire:
dict_ = orm.attributes.instance_dict(obj)
orm.attributes.instance_state(obj).expire_attributes(dict_, [column])
else:
orm.attributes.set_committed_value(obj, column, val + delta)
table = mapper.tables[0]
stmt = sql.update(table, mapper.primary_key[0] == pk[0], {
column: table.c[column] + delta
})
sess.execute(stmt)
def mapper(model, table, **options):
"""A mapper that hooks in standard extensions."""
extensions = to_list(options.pop('extension', None), [])
extensions.append(SignalTrackingMapperExtension())
options['extension'] = extensions
return orm.mapper(model, table, **options)
class ConnectionQueryTrackingProxy(ConnectionProxy):
"""A proxy that if enabled counts the queries."""
def cursor_execute(self, execute, cursor, statement, parameters,
context, executemany):
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import time
import solace.schema
import solace.schema
from threading import Lock
from datetime import datetime
from babel import Locale
from sqlalchemy.types import TypeDecorator
from sqlalchemy.engine.url import make_url
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.interfaces import SessionExtension, MapperExtension, \
EXT_CONTINUE
from sqlalchemy.util import to_list
from sqlalchemy import String, orm, sql, create_engine, MetaData
from solace.badges import badges_by_id
from solace.application import Request
from solace.signals import after_request_shutdown, before_response_sent, \
after_cursor_executed, before_cursor_executed, before_models_committed, \
after_models_committed
from solace import settings
and context (classes, functions, sometimes code) from other files:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
. Output only the next line. | before_cursor_executed.emit(cursor=self, statement=statement, |
Next line prediction: <|code_start|> finally:
after_cursor_executed.emit(cursor=self, statement=statement,
parameters=parameters,
time=_timer() - start)
class SignalTrackingMapperExtension(MapperExtension):
"""Remembers model changes for the session commit code."""
def after_delete(self, mapper, connection, instance):
return self._record(instance, 'delete')
def after_insert(self, mapper, connection, instance):
return self._record(instance, 'insert')
def after_update(self, mapper, connection, instance):
return self._record(instance, 'update')
def _record(self, model, operation):
pk = tuple(orm.object_mapper(model).primary_key_from_instance(model))
orm.object_session(model)._model_changes[pk] = (model, operation)
return EXT_CONTINUE
class SignalEmittingSessionExtension(SessionExtension):
"""Emits signals the mapper extension accumulated."""
def before_commit(self, session):
d = session._model_changes
if d:
<|code_end|>
. Use current file imports:
(import sys
import time
import solace.schema
import solace.schema
from threading import Lock
from datetime import datetime
from babel import Locale
from sqlalchemy.types import TypeDecorator
from sqlalchemy.engine.url import make_url
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.interfaces import SessionExtension, MapperExtension, \
EXT_CONTINUE
from sqlalchemy.util import to_list
from sqlalchemy import String, orm, sql, create_engine, MetaData
from solace.badges import badges_by_id
from solace.application import Request
from solace.signals import after_request_shutdown, before_response_sent, \
after_cursor_executed, before_cursor_executed, before_models_committed, \
after_models_committed
from solace import settings)
and context including class names, function names, or small code snippets from other files:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
. Output only the next line. | before_models_committed.emit(changes=d.values()) |
Using the snippet: <|code_start|>class SignalTrackingMapperExtension(MapperExtension):
"""Remembers model changes for the session commit code."""
def after_delete(self, mapper, connection, instance):
return self._record(instance, 'delete')
def after_insert(self, mapper, connection, instance):
return self._record(instance, 'insert')
def after_update(self, mapper, connection, instance):
return self._record(instance, 'update')
def _record(self, model, operation):
pk = tuple(orm.object_mapper(model).primary_key_from_instance(model))
orm.object_session(model)._model_changes[pk] = (model, operation)
return EXT_CONTINUE
class SignalEmittingSessionExtension(SessionExtension):
"""Emits signals the mapper extension accumulated."""
def before_commit(self, session):
d = session._model_changes
if d:
before_models_committed.emit(changes=d.values())
return EXT_CONTINUE
def after_commit(self, session):
d = session._model_changes
if d:
<|code_end|>
, determine the next line of code. You have imports:
import sys
import time
import solace.schema
import solace.schema
from threading import Lock
from datetime import datetime
from babel import Locale
from sqlalchemy.types import TypeDecorator
from sqlalchemy.engine.url import make_url
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.interfaces import SessionExtension, MapperExtension, \
EXT_CONTINUE
from sqlalchemy.util import to_list
from sqlalchemy import String, orm, sql, create_engine, MetaData
from solace.badges import badges_by_id
from solace.application import Request
from solace.signals import after_request_shutdown, before_response_sent, \
after_cursor_executed, before_cursor_executed, before_models_committed, \
after_models_committed
from solace import settings
and context (class names, function names, or code) available:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
. Output only the next line. | after_models_committed.emit(changes=d.values()) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""
solace.database
~~~~~~~~~~~~~~~
This module defines lower-level database support.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
_engine = None
_engine_lock = Lock()
# the best timer for the platform. on windows systems we're using clock
# for timing which has a higher resolution.
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
def get_engine():
"""Creates or returns the engine."""
global _engine
with _engine_lock:
if _engine is None:
<|code_end|>
, generate the next line using the imports in this file:
import sys
import time
import solace.schema
import solace.schema
from threading import Lock
from datetime import datetime
from babel import Locale
from sqlalchemy.types import TypeDecorator
from sqlalchemy.engine.url import make_url
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.interfaces import SessionExtension, MapperExtension, \
EXT_CONTINUE
from sqlalchemy.util import to_list
from sqlalchemy import String, orm, sql, create_engine, MetaData
from solace.badges import badges_by_id
from solace.application import Request
from solace.signals import after_request_shutdown, before_response_sent, \
after_cursor_executed, before_cursor_executed, before_models_committed, \
after_models_committed
from solace import settings
and context (functions, classes, or occasionally code) from other files:
# Path: solace/signals.py
# def _ref(func):
# def __init__(self, im_self, im_func, im_class):
# def resolve(self):
# def SIG(name, args=None):
# def __init__(self, name, args=None, _frm=None):
# def connect(self, func):
# def is_connected(self, func):
# def get_connections(self):
# def disconnect(self, func):
# def emit(self, **args):
# def __reduce__(self):
# def __repr__(self):
# def emit(self, **args):
# def temporary_connection(func, signal):
# def handler(signal):
# def decorator(func):
# class _MethodRef(object):
# class Signal(object):
# class _BroadcastSignal(Signal):
#
# Path: solace/settings.py
# def configure(**values):
# def revert_to_default():
# def autodiscover_settings():
# def configure_from_file(filename):
# def describe_settings():
. Output only the next line. | options = {'echo': settings.DATABASE_ECHO, |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
"""
solace.utils.remoting
~~~~~~~~~~~~~~~~~~~~~
This module implements a baseclass for remote objects. These
objects can be exposed via JSON on the URL and are also used
by libsolace's direct connection.
It also provides basic helpers for the API.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
def remote_export_primitive(obj):
"""Remote exports a primitive."""
if isinstance(obj, RemoteObject):
return obj.remote_export()
<|code_end|>
, predict the next line using imports from the current file:
from datetime import datetime
from babel import Locale
from solace.utils.lazystring import is_lazy_string
from solace.i18n import is_lazy_string
and context including class names, function names, and sometimes code from other files:
# Path: solace/utils/lazystring.py
# def is_lazy_string(obj):
# """Checks if the given object is a lazy string."""
# return isinstance(obj, _LazyString)
. Output only the next line. | if is_lazy_string(obj): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""
solace.packs
~~~~~~~~~~~~
The packs for static files.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
<|code_end|>
, generate the next line using the imports in this file:
import os
from solace.utils.packs import PackManager
and context (functions, classes, or occasionally code) from other files:
# Path: solace/utils/packs.py
# class PackManager(object):
#
# compressor_class = None
#
# def __init__(self, directory, link_func=None, css_first=True,
# css_template=CSS_TEMPLATE, js_template=JS_TEMPLATE,
# build_filename='%(name)s.compressed.%(ext)s',
# charset='utf-8'):
# self.directory = directory
# if link_func is None:
# link_func = default_link_func
# self.link_func = link_func
# self.css_first = css_first
# self.css_template = CSS_TEMPLATE
# self.js_template = JS_TEMPLATE
# self.build_filename = build_filename
# self.charset = charset
# self._packs = {}
#
# def compress(self, log=None):
# compressor = self.compressor_class(self, log)
# for pack in self._packs.itervalues():
# pack.compress(compressor)
#
# def remove_compressed(self):
# for pack in self._packs.itervalues():
# pack.remove_compressed()
#
# def add_pack(self, name, files):
# self._packs[name] = Pack(self, name, files)
#
# def remove_pack(self, name):
# rv = self._packs.pop(name, None)
# if rv is None:
# raise ValueError('no pack named %r found' % name)
#
# def __getitem__(self, name):
# return self._packs[name]
. Output only the next line. | pack_mgr = PackManager(os.path.join(os.path.dirname(__file__), 'static')) |
Next line prediction: <|code_start|>
CheckH1MirnovCoords.h1 = True
CheckH1MirnovCoords.mds = True
CheckH1MirnovCoords.net = True
CheckH1MirnovCoords.slow = True
CheckH1MirnovCoords.busted = True
class CheckH1Device(H1DevTestCase):
def test_load_h1(self):
self.assertTrue(issubclass(H1, Device))
def test_getdevice(self):
h1test = pyfusion.getDevice('H1')
self.assertTrue(isinstance(h1test, H1))
def test_kh(self):
h1test = pyfusion.getDevice('H1')
shot_kh = (58073, 0.74)
data = h1test.acq.getdata(shot_kh[0], 'H1_mirnov_array_1_coil_1')
#self.assertAlmostEqual(data.meta['kh'], shot_kh[1])
CheckH1Device.slow = True
CheckH1Device.h1 = True
CheckH1Device.mds = True
CheckH1Device.net = True
<|code_end|>
. Use current file imports:
(import os
import pyfusion
from pyfusion.test.tests import PfTestBase, BasePyfusionTestCase
from pyfusion.data.timeseries import TimeseriesData
from pyfusion.data.base import PfMetaData
from pyfusion.devices.base import Device
from pyfusion.devices.H1.device import H1
from pyfusion.devices.H1.device import H1)
and context including class names, function names, or small code snippets from other files:
# Path: pyfusion/test/tests.py
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
#
# class BasePyfusionTestCase(unittest.TestCase):
# """Simple customisation of TestCase."""
# def __init__(self, *args):
# self.listed_device = CONFIG_TEST_DEVICE_NAME
# self.listed_empty_device = CONFIG_EMPTY_TEST_DEVICE_NAME
# self.unlisted_device = NONCONFIG_TEST_DEVICE_NAME
# self.shot_number = TEST_SHOT_NUMBER
# self.unlisted_config_section_type = UNLISTED_CONFIG_SECTION_TYPE
# unittest.TestCase.__init__(self, *args)
#
# Path: pyfusion/data/timeseries.py
# class TimeseriesData(BaseData):
# def __init__(self, timebase = None, signal=None, channels=None, bypass_length_check=False, **kwargs):
# self.timebase = timebase
# self.channels = channels
# self.scales = np.array([[1]]) # retain amplitude scaling info for later
# if bypass_length_check == True:
# self.signal = signal
# else:
# if signal.n_samples() == len(timebase):
# self.signal = signal
# else:
# raise ValueError, "signal has different number of samples to timebase"
# super(TimeseriesData, self).__init__(**kwargs)
#
#
# def generate_frequency_series(self, NFFT, step, window='hamming'):
# w =scipy.hamming(NFFT)
# if len(self.signal.shape)==2:
# signal = np.array([np.fft.rfft(w*self.signal[:,i:i+NFFT]/NFFT) for i in range(0, self.signal.shape[1]-NFFT, step)])
# else:
# signal = np.array([np.fft.rfft(w*self.signal[i:i+NFFT]/NFFT) for i in range(0, self.signal.shape[1]-NFFT, step)])
# timebase = np.array([np.average(self.timebase[i:i+NFFT])
# for i in range(0, self.signal.shape[1]-NFFT, step)])
#
# #This is borrowed from here:
# #http://docs.scipy.org/doc/numpy-dev/reference/generated/numpy.fft.rfftfreq.html
# d = (self.timebase[1] - self.timebase[0])
# val = 1.0/(NFFT*d)
# N = NFFT//2 + 1
# frequency_base = np.round((np.arange(0, N, dtype=int)) * val,4)
# return FrequencyseriesData(frequency_base = frequency_base, timebase=timebase,
# signal=signal,channels=self.channels,NFFT=NFFT, window='hamming',step=step)
. Output only the next line. | class CheckGetH1Device(PfTestBase): |
Given the code snippet: <|code_start|>"""
"""
TEST_DATA_PATH = os.path.abspath(os.path.dirname(__file__))
TEST_CONFIG_FILE = os.path.join(TEST_DATA_PATH, "test.cfg")
class H1DevTestCase(BasePyfusionTestCase):
def setUp(self):
pyfusion.conf.utils.clear_config()
if pyfusion.orm_manager.IS_ACTIVE:
pyfusion.orm_manager.Session.close_all()
pyfusion.orm_manager.clear_mappers()
pyfusion.conf.utils.read_config(TEST_CONFIG_FILE)
class CheckH1MirnovCoords(H1DevTestCase):
def test_single_mirnov_channel_kappah_as_argument(self):
d=pyfusion.getDevice('H1')
data = d.acq.getdata(58073, 'H1_mirnov_array_1_coil_1')
<|code_end|>
, generate the next line using the imports in this file:
import os
import pyfusion
from pyfusion.test.tests import PfTestBase, BasePyfusionTestCase
from pyfusion.data.timeseries import TimeseriesData
from pyfusion.data.base import PfMetaData
from pyfusion.devices.base import Device
from pyfusion.devices.H1.device import H1
from pyfusion.devices.H1.device import H1
and context (functions, classes, or occasionally code) from other files:
# Path: pyfusion/test/tests.py
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
#
# class BasePyfusionTestCase(unittest.TestCase):
# """Simple customisation of TestCase."""
# def __init__(self, *args):
# self.listed_device = CONFIG_TEST_DEVICE_NAME
# self.listed_empty_device = CONFIG_EMPTY_TEST_DEVICE_NAME
# self.unlisted_device = NONCONFIG_TEST_DEVICE_NAME
# self.shot_number = TEST_SHOT_NUMBER
# self.unlisted_config_section_type = UNLISTED_CONFIG_SECTION_TYPE
# unittest.TestCase.__init__(self, *args)
#
# Path: pyfusion/data/timeseries.py
# class TimeseriesData(BaseData):
# def __init__(self, timebase = None, signal=None, channels=None, bypass_length_check=False, **kwargs):
# self.timebase = timebase
# self.channels = channels
# self.scales = np.array([[1]]) # retain amplitude scaling info for later
# if bypass_length_check == True:
# self.signal = signal
# else:
# if signal.n_samples() == len(timebase):
# self.signal = signal
# else:
# raise ValueError, "signal has different number of samples to timebase"
# super(TimeseriesData, self).__init__(**kwargs)
#
#
# def generate_frequency_series(self, NFFT, step, window='hamming'):
# w =scipy.hamming(NFFT)
# if len(self.signal.shape)==2:
# signal = np.array([np.fft.rfft(w*self.signal[:,i:i+NFFT]/NFFT) for i in range(0, self.signal.shape[1]-NFFT, step)])
# else:
# signal = np.array([np.fft.rfft(w*self.signal[i:i+NFFT]/NFFT) for i in range(0, self.signal.shape[1]-NFFT, step)])
# timebase = np.array([np.average(self.timebase[i:i+NFFT])
# for i in range(0, self.signal.shape[1]-NFFT, step)])
#
# #This is borrowed from here:
# #http://docs.scipy.org/doc/numpy-dev/reference/generated/numpy.fft.rfftfreq.html
# d = (self.timebase[1] - self.timebase[0])
# val = 1.0/(NFFT*d)
# N = NFFT//2 + 1
# frequency_base = np.round((np.arange(0, N, dtype=int)) * val,4)
# return FrequencyseriesData(frequency_base = frequency_base, timebase=timebase,
# signal=signal,channels=self.channels,NFFT=NFFT, window='hamming',step=step)
. Output only the next line. | self.assertTrue(isinstance(data, TimeseriesData)) |
Predict the next line for this snippet: <|code_start|> "Checking again in:\n{} seconds.\n"
"Total time elapsed:\n{} seconds.".format(self.jobid, self._cur, self.total_time))
return
def start(self):
self.root.after(1000, self.countdown)
return
def verify_cancel(self):
win = tk.Toplevel(master=self.root)
win.resizable(width=False, height=False)
win.grab_set()
label = tk.Label(master=win, text="Do you really wish\nto close?", font=(font_name, 18))
label.grid(row=0, column=0, columnspan=2, sticky=tk.N)
yes = tk.Button(master=win, text="Yes", font=(font_name, 18), command=self.yes_cancel)
yes.grid(row=1, column=0, sticky=tk.N)
no = tk.Button(master=win, text="No", font=(font_name, 18), command=win.destroy)
no.grid(row=1, column=1, sticky=tk.N)
return
def yes_cancel(self):
subprocess.check_output("scancel {}".format(self.jobid), shell=True)
self.root.destroy()
return
def countdown(self):
self._cur -= 1
self.total_time += 1
if self._cur <= 0:
sjobexitmod_output = subprocess.check_output("sjobexitmod -l {}".format(self.jobid), shell=True)
<|code_end|>
with the help of current file imports:
import tkinter as tk
import Tkinter as tk
import subprocess
from CONSTANTS import *
from Utilities import jtools as jt
and context from other files:
# Path: Utilities/jtools.py
# class OutOfOrderException(Exception):
# class AnalysisError(Exception):
# class CycledList(list):
# def __getitem__(self, key):
# def slurm_id_from_output(sbatch_output):
# def check_slurm_for_job(squeue_output):
# def get_slurm_exit_state(sjobexitmod_output):
# def write_finished_file(f):
# def midpoint(x1, x2):
# def distance(x1, x2):
# def ANobj_times_to_time_window(t):
# def break_path(p, line_length):
# def text_location(xrange, yrange):
# def valid_int_from_str(s):
# def valid_float_from_str(s):
# def complex_mag(z):
# def complex_mag_list(zz):
# def type_verify(var, typ):
# def time_window_to_filelike_str(w):
# def scan_config(f):
# def in_tkStringVar_array(s, tksvarr):
# def valid_probe_array(s, f = DEFAULT_CONFIG_FILE):
# def return_methods():
# def valid_method(s):
# def probe_positions(probe_array):
# def valid_window(s):
# def remove_key(d, k):
# def time_window_parser(s):
# def t_in_window(t, win):
# def window_subset(w1, w2):
# def valid_num_or_range(s):
# def valid_shot_str(s):
# def shot_str_parser(s):
# def reverse_shot_str_parser(shot_list):
# def squareish_grid(n, swapxy=False):
# def find_closest(arr, x):
, which may contain function names, class names, or code. Output only the next line. | exit_state = jt.get_slurm_exit_state(sjobexitmod_output) |
Given snippet: <|code_start|> xZero = (b1 - b2)/(m2 - m1)
yZero = m1*xZero + b1
return (xZero, yZero)
@register("FlucStruc")
def fsplot_phase(input_data, closed=True, hold=0):
""" plot the phase of a flucstruc, optionally replicating the last point
at the beginning (if closed=True).
This version does not yet attempt to take into account angles, or check
that adjacent channels are adjacent (i.e. ch2-ch1, ch2-c2 etc).
Channel names are taken from the fs and plotted abbreviated
1/1/2011: TODO This appears to work only for database=None config
1/17/2011: bdb: May be fixed - I had used channel instead of channel.name
"""
# extract by channels
ch1n,ch2n,ch12n,dp = [],[],[],[]
# bdb this line should be replaced by a call to a routine names something
#like <plotted_width> to help in deciding if the label will fit on the
#current graph.
if (2*len(input_data.dphase)*len(input_data.dphase[0].item.channel_1.name))> 50:
sep = '\n-'
else: sep = '-'
#sep = '-'
for dpn in input_data.dphase:
ch1n.append(dpn.item.channel_1.name)
ch2n.append(dpn.item.channel_2.name)
ch12n.append(dpn.item.channel_1.name+sep+dpn.item.channel_2.name)
dp.append(dpn.item.delta)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from matplotlib.widgets import CheckButtons
from pyfusion.data.utils import peak_freq, split_names
import pylab as pl
import numpy as np
import pyfusion
import pylab as pl
import pylab as pl
and context:
# Path: pyfusion/data/utils.py
# def peak_freq(signal,timebase,minfreq=0,maxfreq=1.e18):
# """
# TODO: old code: needs review
# this function only has a basic unittest to make sure it returns
# the correct freq in a simple case.
# """
# timebase = array(timebase)
# sig_fft = fft.fft(signal)
# sample_time = float(mean(timebase[1:]-timebase[:-1]))
#
# #SRH modification, frequencies seemed a little bit off because of the -1 in the denominator
# #Here we are trusting numpy....
# #fft_freqs = (1./sample_time)*arange(len(sig_fft)).astype(float)/(len(sig_fft)-1)
# fft_freqs = fft.fftfreq(len(sig_fft),d=sample_time)
# # only show up to nyquist freq
# new_len = len(sig_fft)/2
# sig_fft = sig_fft[:new_len]
# fft_freqs = fft_freqs[:new_len]
# [minfreq_elmt,maxfreq_elmt] = searchsorted(fft_freqs,[minfreq,maxfreq])
# sig_fft = sig_fft[minfreq_elmt:maxfreq_elmt]
# fft_freqs = fft_freqs[minfreq_elmt:maxfreq_elmt]
#
# peak_elmt = (argsort(abs(sig_fft)))[-1]
# return [fft_freqs[peak_elmt], peak_elmt]
#
# def split_names(names, pad=' '):
# """ Given an array of strings, return an array of the part of the string
# (e.g. channel name) that varies, and optionally the prefix and suffix.
# The array of varying parts is first in the tuple in case others are not
# wanted. This is used to make the x labels of phase plots simpler and smaller.
# e.g.
# >>> split_names(['MP01','MP10'])
# (['01','10'], 'MP', '')
# """
# # make a new array with elements padded to the same length with <pad>
# nms = []
# maxlen = max([len(nm) for nm in names])
# for nm in names:
# nmarr = [c for c in nm]
# while len(nmarr)< maxlen: nmarr.append(pad)
# nms.append(nmarr)
#
# # the following numpy array comparisons look simple, but require the name string
# # to be exploded into chars. Although a single string can be interchangeably
# # referred to as a string or array of chars, these arrays they have to be
# # re-constituted before return.
# #
# # for nm in nms: # for each nm
# #find the first mismatch - first will be the first char of the extracted arr
# nms_arr=array(nms)
# first=0
# while (first < maxlen and
# (nms_arr[:,first] == nms_arr[0,first]).all()):
# first += 1
# # and the last
# last = maxlen-1
# while ((last >= 0) and
# (nms_arr[:,last] == nms_arr[0,last]).all()):
# last -= 1
#
#
# # check for no mismatch
# if first==maxlen: return(['' for nm in names], ''.join(nms[0]),'')
# # otherwise return, (no need for special code for the case of no match at all)
# return(([''.join(s) for s in nms_arr[:,first:last+1]],
# ''.join(nms_arr[0,0:first]),
# ''.join(nms_arr[0,last+1:maxlen+1])))
which might include code, classes, or functions. Output only the next line. | short_names_1,p,s = split_names(ch1n) # need to break up loops to do this |
Given the code snippet: <|code_start|>"""Test code for data acquisition."""
# channel names in pyfusion test config file
timeseries_test_channel_1 = "test_timeseries_channel_1"
timeseries_test_channel_2 = "test_timeseries_channel_2"
multichannel_name = "test_multichannel_timeseries"
<|code_end|>
, generate the next line using the imports in this file:
from pyfusion.test.tests import PfTestBase
from pyfusion.acquisition.FakeData.acq import FakeDataAcquisition
from pyfusion.acquisition.base import BaseAcquisition
from pyfusion.acquisition.FakeData.acq import FakeDataAcquisition
from pyfusion import conf
from pyfusion.data.timeseries import TimeseriesData
from numpy.testing import assert_array_almost_equal
from pyfusion.devices.base import Device
from pyfusion import conf, config
from pyfusion import getDevice
from pyfusion.data.timeseries import TimeseriesData
from pyfusion.acquisition.base import BaseDataFetcher
from pyfusion.acquisition.FakeData.fetch import SingleChannelSineFetcher
from pyfusion.acquisition.FakeData.fetch import SingleChannelSineFetcher
from pyfusion.data.timeseries import TimeseriesData
from numpy import arange, sin, pi
from numpy.testing import assert_array_almost_equal
from pyfusion.acquisition.base import MultiChannelFetcher
from pyfusion.acquisition.FakeData.acq import FakeDataAcquisition
from pyfusion import config
from numpy.testing import assert_array_almost_equal
and context (functions, classes, or occasionally code) from other files:
# Path: pyfusion/test/tests.py
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
. Output only the next line. | class CheckFakeDataAcquisition(PfTestBase): |
Given the code snippet: <|code_start|> plt.title("Shot 159243 Toroidal Array")
if dosave is not None and "poloidal" in dosave.lower():
plt.title("Shot 159243 Poloidal Array")
plt.xlabel("Time (ms)")
plt.ylabel("Freq (kHz)")
plt.xlim([750, 850])
plt.ylim([45, 250])
if doplot:
plt.show()
if dosave is not None:
plt.savefig(dosave)
else:
ax.specgram(A.results[0][2][0, :], NFFT=1024, Fs=1. / np.mean(np.diff(A.results[0][3])),
noverlap=128, xextent=[A.results[0][3][0], A.results[0][3][-1]])
if clust_arr is not None:
for cl in clust_arr:
mask = (A.z.cluster_assignments == cl)
ax.plot(A.z.feature_obj.misc_data_dict["time"][mask],
A.z.feature_obj.misc_data_dict["freq"][mask],
color=plot_colors[cl], marker="o", linestyle="None",
markersize=A.markersize)
return
def point_analysis(A, shot, time_window, t0, f0, probe_array, doplot=True, dosave=None, clustarr=None):
fft = A.DM.raw_ffts[str(shot)]
raw_mirnov = fft.signal
raw_times = fft.timebase
raw_freqs = fft.frequency_base
<|code_end|>
, generate the next line using the imports in this file:
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from Utilities import jtools as jt
and context (functions, classes, or occasionally code) from other files:
# Path: Utilities/jtools.py
# class OutOfOrderException(Exception):
# class AnalysisError(Exception):
# class CycledList(list):
# def __getitem__(self, key):
# def slurm_id_from_output(sbatch_output):
# def check_slurm_for_job(squeue_output):
# def get_slurm_exit_state(sjobexitmod_output):
# def write_finished_file(f):
# def midpoint(x1, x2):
# def distance(x1, x2):
# def ANobj_times_to_time_window(t):
# def break_path(p, line_length):
# def text_location(xrange, yrange):
# def valid_int_from_str(s):
# def valid_float_from_str(s):
# def complex_mag(z):
# def complex_mag_list(zz):
# def type_verify(var, typ):
# def time_window_to_filelike_str(w):
# def scan_config(f):
# def in_tkStringVar_array(s, tksvarr):
# def valid_probe_array(s, f = DEFAULT_CONFIG_FILE):
# def return_methods():
# def valid_method(s):
# def probe_positions(probe_array):
# def valid_window(s):
# def remove_key(d, k):
# def time_window_parser(s):
# def t_in_window(t, win):
# def window_subset(w1, w2):
# def valid_num_or_range(s):
# def valid_shot_str(s):
# def shot_str_parser(s):
# def reverse_shot_str_parser(shot_list):
# def squareish_grid(n, swapxy=False):
# def find_closest(arr, x):
. Output only the next line. | nt, t_actual = jt.find_closest(raw_times, t0) |
Predict the next line after this snippet: <|code_start|>
class Device(object):
"""Represent a laboratory device.
In general, a customised subclass of Device will be used.
Usage: Device(device_name, **kwargs)
Arguments:
device_name -- name of device as listed in configuration file,
i.e.: [Device:device_name]
Keyword arguments:
Any setting in the [Device:device_name] section of the
configuration file can be overridden by supplying a keyword
argument to here, e.g.: Device(device_name)
"""
def __init__(self, config_name, **kwargs):
if pyfusion.config.pf_has_section('Device', config_name):
self.__dict__.update(get_config_as_dict('Device', config_name))
self.__dict__.update(kwargs)
self.name = config_name
#### attach acquisition
if hasattr(self, 'acq_name'):
acq_class_str = pyfusion.config.pf_get('Acquisition',
self.acq_name, 'acq_class')
<|code_end|>
using the current file's imports:
from pyfusion.conf.utils import kwarg_config_handler, import_from_str, get_config_as_dict
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, Integer, String
from sqlalchemy.orm import mapper
import pyfusion
and any relevant context from other files:
# Path: pyfusion/conf/utils.py
# def kwarg_config_handler(component_type, component_name, **kwargs):
# for config_var in pyfusion.config.pf_options(component_type, component_name):
# if not config_var in kwargs.keys():
# kwargs[config_var] = pyfusion.config.pf_get(component_type,
# component_name, config_var)
# return kwargs
#
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
. Output only the next line. | self.acquisition = import_from_str(acq_class_str)(self.acq_name) |
Predict the next line for this snippet: <|code_start|>"""Basic device class"""
class Device(object):
"""Represent a laboratory device.
In general, a customised subclass of Device will be used.
Usage: Device(device_name, **kwargs)
Arguments:
device_name -- name of device as listed in configuration file,
i.e.: [Device:device_name]
Keyword arguments:
Any setting in the [Device:device_name] section of the
configuration file can be overridden by supplying a keyword
argument to here, e.g.: Device(device_name)
"""
def __init__(self, config_name, **kwargs):
if pyfusion.config.pf_has_section('Device', config_name):
<|code_end|>
with the help of current file imports:
from pyfusion.conf.utils import kwarg_config_handler, import_from_str, get_config_as_dict
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, Integer, String
from sqlalchemy.orm import mapper
import pyfusion
and context from other files:
# Path: pyfusion/conf/utils.py
# def kwarg_config_handler(component_type, component_name, **kwargs):
# for config_var in pyfusion.config.pf_options(component_type, component_name):
# if not config_var in kwargs.keys():
# kwargs[config_var] = pyfusion.config.pf_get(component_type,
# component_name, config_var)
# return kwargs
#
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
, which may contain function names, class names, or code. Output only the next line. | self.__dict__.update(get_config_as_dict('Device', config_name)) |
Using the snippet: <|code_start|> Arguments:
device_name -- name of device as listed in configuration file,
i.e.: [Device:device_name]
Keyword arguments:
Any setting in the [Device:device_name] section of the
configuration file can be overridden by supplying a keyword
argument to here, e.g.: Device(device_name)
"""
def __init__(self, config_name, **kwargs):
if pyfusion.config.pf_has_section('Device', config_name):
self.__dict__.update(get_config_as_dict('Device', config_name))
self.__dict__.update(kwargs)
self.name = config_name
#### attach acquisition
if hasattr(self, 'acq_name'):
acq_class_str = pyfusion.config.pf_get('Acquisition',
self.acq_name, 'acq_class')
self.acquisition = import_from_str(acq_class_str)(self.acq_name)
# shortcut
self.acq = self.acquisition
else:
pyfusion.logging.warning(
"No acquisition class specified for device")
<|code_end|>
, determine the next line of code. You have imports:
from pyfusion.conf.utils import kwarg_config_handler, import_from_str, get_config_as_dict
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, Integer, String
from sqlalchemy.orm import mapper
import pyfusion
and context (class names, function names, or code) available:
# Path: pyfusion/conf/utils.py
# def kwarg_config_handler(component_type, component_name, **kwargs):
# for config_var in pyfusion.config.pf_options(component_type, component_name):
# if not config_var in kwargs.keys():
# kwargs[config_var] = pyfusion.config.pf_get(component_type,
# component_name, config_var)
# return kwargs
#
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
. Output only the next line. | @orm_register() |
Using the snippet: <|code_start|>"""MDSPlus acquisition."""
try:
except:
print "MDSplus python package not found"
<|code_end|>
, determine the next line of code. You have imports:
import warnings, os
import MDSplus
from pyfusion.acquisition.base import BaseAcquisition
and context (class names, function names, or code) available:
# Path: pyfusion/acquisition/base.py
# class BaseAcquisition(object):
# """Base class for datasystem specific acquisition classes.
#
# :param config_name: name of acquisition as specified in\
# configuration file.
#
# On instantiation, the pyfusion configuration is searched for a
# ``[Acquisition:config_name]`` section. The contents of the
# configuration section are loaded into the object namespace. For
# example, a configuration section::
#
# [Acquisition:my_custom_acq]
# acq_class = pyfusion.acquisition.base.BaseAcquisition
# server = my.dataserver.com
#
# will result in the following behaviour::
#
# >>> from pyfusion.acquisition.base import BaseAcquisition
# >>> my_acq = BaseAcquisition('my_custom_acq')
# >>> print(my_acq.server)
# my.dataserver.com
#
# The configuration entries can be overridden with keyword arguments::
#
# >>> my_other_acq = BaseAcquisition('my_custom_acq', server='your.data.net')
# >>> print(my_other_acq.server)
# your.data.net
#
# """
# def __init__(self, config_name=None, **kwargs):
# if config_name != None:
# self.__dict__.update(get_config_as_dict('Acquisition', config_name))
# self.__dict__.update(kwargs)
#
# def getdata(self, shot, config_name=None, **kwargs):
# """Get the data and return prescribed subclass of BaseData.
#
# :param shot: shot number
# :param config_name: name of a fetcher class in the configuration file
# :returns: an instance of a subclass of \
# :py:class:`~pyfusion.data.base.BaseData` or \
# :py:class:`~pyfusion.data.base.BaseDataSet`
#
# This method needs to know which data fetcher class to use, if a
# config_name argument is supplied then the
# ``[Diagnostic:config_name]`` section must exist in the
# configuration file and contain a ``data_fetcher`` class
# specification, for example::
#
# [Diagnostic:H1_mirnov_array_1_coil_1]
# data_fetcher = pyfusion.acquisition.H1.fetch.H1DataFetcher
# mds_path = \h1data::top.operations.mirnov:a14_14:input_1
# coords_cylindrical = 1.114, 0.7732, 0.355
# coord_transform = H1_mirnov
#
# If a ``data_fetcher`` keyword argument is supplied, it overrides
# the configuration file specification.
#
# The fetcher class is instantiated, including any supplied
# keyword arguments, and the result of the ``fetch`` method of the
# fetcher class is returned.
# """
# from pyfusion import config
# # if there is a data_fetcher arg, use that, otherwise get from config
# if kwargs.has_key('data_fetcher'):
# fetcher_class_name = kwargs['data_fetcher']
# else:
# fetcher_class_name = config.pf_get('Diagnostic',
# config_name,
# 'data_fetcher')
# fetcher_class = import_from_str(fetcher_class_name)
# return fetcher_class(self, shot,
# config_name=config_name, **kwargs).fetch()
. Output only the next line. | class MDSPlusAcquisition(BaseAcquisition): |
Predict the next line for this snippet: <|code_start|> (1.185, 0.7732, 0.289):c2,
(1.216, 0.7732, 0.227):c3,
(1.198, 0.7732, 0.137):c4,
(1.129, 0.7732, 0.123):c5,
(1.044, 0.7732, 0.128):c6,
(0.963, 0.7732, 0.112):c7,
(0.924, 0.7732, 0.087):c8,
(0.902, 0.7732, 0.052):c9,
(0.900, 0.7732, -0.008):c10,
(0.925, 0.7732, -0.073):c11,
(0.964, 0.7732, -0.169):c12,
(0.897, 0.7732, -0.238):c13,
(0.821, 0.7732, -0.221):c14,
(0.696, 0.7732, -0.106):c15,
(0.652, 0.7732, 0.036):c16,
(0.676, 0.7732, 0.193):c17,
(0.790, 0.7732, 0.326):c18,
(0.806, 0.7732, 0.336):c19,
(0.934, 0.7732, 0.383):c20,
}
def map_kappa_h_mag_angle(coords, kappa_h):
kh_angle_poly = poly1d(coil_coef_mapping[coords])
return polyval(kh_angle_poly, kappa_h)
################################################################
### End of code without unittests ###### #######################
################################################################
<|code_end|>
with the help of current file imports:
from numpy import poly1d,polyval
from pyfusion.data.base import BaseCoordTransform
and context from other files:
# Path: pyfusion/data/base.py
# class BaseCoordTransform(object):
# """Base class does nothing useful at the moment"""
# input_coords = 'base_input'
# output_coords = 'base_output'
#
# def transform(self, coords):
# return coords
, which may contain function names, class names, or code. Output only the next line. | class MirnovKhMagneticCoordTransform(BaseCoordTransform): |
Continue the code snippet: <|code_start|>
for test_class in find_subclasses(pyfusion, PfTestBase):
globals()['TestSQL%s' %test_class.__name__] = type('TestSQL%s' %test_class.__name__, (test_class, SQLTestCase), {})
globals()['TestSQL%s' %test_class.__name__].sql = True
globals()['TestSQL%s' %test_class.__name__].generated = True
<|code_end|>
. Use current file imports:
import pyfusion
from pyfusion.test.tests import find_subclasses, PfTestBase, SQLTestCase, NoSQLTestCase, TEST_FLAGS
and context (classes, functions, or code) from other files:
# Path: pyfusion/test/tests.py
# def find_subclasses(module, input_class):
# mod_list = [i for i in pkgutil.walk_packages(module.__path__, module.__name__+'.')]
# output = []
#
# for tmp_instance, mod_name, is_pack in mod_list:
# __import__(mod_name)
# for name, cls in inspect.getmembers(sys.modules[mod_name]):
# if inspect.isclass(cls) and issubclass(cls, input_class) and cls != input_class:
# output.append(cls)
# return output
#
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
#
# class SQLTestCase(BasePyfusionTestCase):
#
# def setUp(self):
# pyfusion.orm_manager.shutdown_orm()
# pyfusion.conf.utils.clear_config()
# pyfusion.conf.utils.read_config(TEST_CONFIG_FILE)
#
# class NoSQLTestCase(BasePyfusionTestCase):
#
# def setUp(self):
# pyfusion.orm_manager.shutdown_orm()
# pyfusion.conf.utils.clear_config()
# pyfusion.conf.utils.read_config(TEST_NOSQL_CONFIG_FILE)
#
# TEST_FLAGS = ['dev']
. Output only the next line. | globals()['TestNoSQL%s' %test_class.__name__] = type('TestNoSQL%s' %test_class.__name__, (test_class, NoSQLTestCase), {}) |
Given the code snippet: <|code_start|>
for test_class in find_subclasses(pyfusion, PfTestBase):
globals()['TestSQL%s' %test_class.__name__] = type('TestSQL%s' %test_class.__name__, (test_class, SQLTestCase), {})
globals()['TestSQL%s' %test_class.__name__].sql = True
globals()['TestSQL%s' %test_class.__name__].generated = True
globals()['TestNoSQL%s' %test_class.__name__] = type('TestNoSQL%s' %test_class.__name__, (test_class, NoSQLTestCase), {})
globals()['TestNoSQL%s' %test_class.__name__].sql = False
globals()['TestNoSQL%s' %test_class.__name__].generated = True
<|code_end|>
, generate the next line using the imports in this file:
import pyfusion
from pyfusion.test.tests import find_subclasses, PfTestBase, SQLTestCase, NoSQLTestCase, TEST_FLAGS
and context (functions, classes, or occasionally code) from other files:
# Path: pyfusion/test/tests.py
# def find_subclasses(module, input_class):
# mod_list = [i for i in pkgutil.walk_packages(module.__path__, module.__name__+'.')]
# output = []
#
# for tmp_instance, mod_name, is_pack in mod_list:
# __import__(mod_name)
# for name, cls in inspect.getmembers(sys.modules[mod_name]):
# if inspect.isclass(cls) and issubclass(cls, input_class) and cls != input_class:
# output.append(cls)
# return output
#
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
#
# class SQLTestCase(BasePyfusionTestCase):
#
# def setUp(self):
# pyfusion.orm_manager.shutdown_orm()
# pyfusion.conf.utils.clear_config()
# pyfusion.conf.utils.read_config(TEST_CONFIG_FILE)
#
# class NoSQLTestCase(BasePyfusionTestCase):
#
# def setUp(self):
# pyfusion.orm_manager.shutdown_orm()
# pyfusion.conf.utils.clear_config()
# pyfusion.conf.utils.read_config(TEST_NOSQL_CONFIG_FILE)
#
# TEST_FLAGS = ['dev']
. Output only the next line. | for flag in TEST_FLAGS: |
Given snippet: <|code_start|>
class MetaMethods(type):
"""Metaclass which provides filter and plot methods for data classes."""
def __new__(cls, name, bases, attrs):
for reg in [filter_reg, plot_reg]:
reg_methods = reg.get(name, [])
attrs.update((i.__name__,history_reg_method(i))
for i in reg_methods)
return super(MetaMethods, cls).__new__(cls, name, bases, attrs)
class Coords(object):
"""Stores coordinates with an interface for coordinate transforms."""
def __init__(self, default_coords_name, default_coords_tuple, **kwargs):
self.default_name = default_coords_name
self.default_value_1 = default_coords_tuple[0]
self.default_value_2 = default_coords_tuple[1]
self.default_value_3 = default_coords_tuple[2]
kwargs.update(((default_coords_name, default_coords_tuple),))
self.__dict__.update(kwargs)
def add_coords(self, **kwargs):
self.__dict__.update(kwargs)
def load_from_config(self, **kwargs):
for kw in kwargs.iteritems():
if kw[0] == 'coord_transform':
transform_list = pyfusion.config.pf_options('CoordTransform', kw[1])
for transform_name in transform_list:
transform_class_str = pyfusion.config.pf_get('CoordTransform', kw[1], transform_name)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import operator
import uuid
import copy
import pyfusion
from datetime import datetime
from pyfusion.conf.utils import import_from_str, get_config_as_dict
from pyfusion.data.filters import filter_reg
from pyfusion.data.plots import plot_reg
from pyfusion.data.utils import unique_id
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, \
DateTime, PickleType
from sqlalchemy.orm import reconstructor, mapper, relation, dynamic_loader
from sqlalchemy.orm.collections import column_mapped_collection
and context:
# Path: pyfusion/conf/utils.py
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/data/filters.py
# def cps(a,b):
# def register(*class_names):
# def reg_item(filter_method):
# def reduce_time(input_data, new_time_range):
# def segment(input_data, n_samples, overlap=1.0, datalist= 0):
# def remove_noncontiguous(input_dataset):
# def normalise(input_data, method='peak', separate=False):
# def svd(input_data):
# def fs_group_geometric(input_data, max_energy = 1.0):
# def fs_group_threshold(input_data, threshold=0.2):
# def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
# def subtract_mean(input_data):
# def sp_filter_butterworth_bandpass(input_data, passband, stopband, max_passband_loss, min_stopband_attenuation):
# def correlate(input_data, index_1, index_2, **kwargs):
# def change_time_base(input_data, new_time_base):
#
# Path: pyfusion/data/plots.py
# def register(*class_names):
# def reg_item(plot_method):
# def plot_signals(input_data, filename=None,downsamplefactor=1,n_columns=1):
# def plot_spectrogram(input_data, windowfn=None, channel_number=0, filename=None, coloraxis=None, noverlap=0,NFFT=None, **kwargs):
# def join_ends(inarray,add_2pi = False,add_360deg=False,add_lenarray=False,add_one=False):
# def posNegFill(x,y1,y2):
# def __init__(self,energy_list,initial_list):
# def add(self,elmt):
# def sub(self,elmt):
# def findZero(i,x,y1,y2):
# def fsplot_phase(input_data, closed=True, hold=0):
# def svdplot(input_data, fmax=None, hold=0):
# def button_action(label):
# NFFT=(eval(pyfusion.config.get('Plots','NFFT')))
# NFFT = 2048
# class Energy:
#
# Path: pyfusion/data/utils.py
# def unique_id():
# try:
# return str(uuid.uuid4())
# except:
# return ''.join(_random.choice(string.letters) for i in range(50))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
which might include code, classes, or functions. Output only the next line. | transform_class = import_from_str(transform_class_str) |
Given the code snippet: <|code_start|> def load_transform(self, transform_class):
def _new_transform_method(**kwargs):
return transform_class().transform(self.__dict__.get(transform_class.input_coords),**kwargs)
self.__dict__.update({transform_class.output_coords:_new_transform_method})
def save(self):
if pyfusion.orm_manager.IS_ACTIVE:
# this may be inefficient: get it working, then get it fast
session = pyfusion.orm_manager.Session()
session.add(self)
session.commit()
session.close()
@orm_register()
def setup_coords(man):
man.coords_table = Table('coords', man.metadata,
Column('id', Integer, primary_key=True),
Column('default_name', String(30), nullable=False),
Column('default_value_1', Float),
Column('default_value_2', Float),
Column('default_value_3', Float))
man.metadata.create_all()
mapper(Coords, man.coords_table)
def get_coords_for_channel(channel_name=None, **kwargs):
config_dict = kwargs.copy()
if channel_name:
<|code_end|>
, generate the next line using the imports in this file:
import operator
import uuid
import copy
import pyfusion
from datetime import datetime
from pyfusion.conf.utils import import_from_str, get_config_as_dict
from pyfusion.data.filters import filter_reg
from pyfusion.data.plots import plot_reg
from pyfusion.data.utils import unique_id
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, \
DateTime, PickleType
from sqlalchemy.orm import reconstructor, mapper, relation, dynamic_loader
from sqlalchemy.orm.collections import column_mapped_collection
and context (functions, classes, or occasionally code) from other files:
# Path: pyfusion/conf/utils.py
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/data/filters.py
# def cps(a,b):
# def register(*class_names):
# def reg_item(filter_method):
# def reduce_time(input_data, new_time_range):
# def segment(input_data, n_samples, overlap=1.0, datalist= 0):
# def remove_noncontiguous(input_dataset):
# def normalise(input_data, method='peak', separate=False):
# def svd(input_data):
# def fs_group_geometric(input_data, max_energy = 1.0):
# def fs_group_threshold(input_data, threshold=0.2):
# def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
# def subtract_mean(input_data):
# def sp_filter_butterworth_bandpass(input_data, passband, stopband, max_passband_loss, min_stopband_attenuation):
# def correlate(input_data, index_1, index_2, **kwargs):
# def change_time_base(input_data, new_time_base):
#
# Path: pyfusion/data/plots.py
# def register(*class_names):
# def reg_item(plot_method):
# def plot_signals(input_data, filename=None,downsamplefactor=1,n_columns=1):
# def plot_spectrogram(input_data, windowfn=None, channel_number=0, filename=None, coloraxis=None, noverlap=0,NFFT=None, **kwargs):
# def join_ends(inarray,add_2pi = False,add_360deg=False,add_lenarray=False,add_one=False):
# def posNegFill(x,y1,y2):
# def __init__(self,energy_list,initial_list):
# def add(self,elmt):
# def sub(self,elmt):
# def findZero(i,x,y1,y2):
# def fsplot_phase(input_data, closed=True, hold=0):
# def svdplot(input_data, fmax=None, hold=0):
# def button_action(label):
# NFFT=(eval(pyfusion.config.get('Plots','NFFT')))
# NFFT = 2048
# class Energy:
#
# Path: pyfusion/data/utils.py
# def unique_id():
# try:
# return str(uuid.uuid4())
# except:
# return ''.join(_random.choice(string.letters) for i in range(50))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
. Output only the next line. | config_dict.update(get_config_as_dict('Diagnostic', channel_name)) |
Next line prediction: <|code_start|> def updated_method(input_data, *args, **kwargs):
do_copy = kwargs.pop('copy', True)
if do_copy:
original_hist = input_data.history
input_data = copy.copy(input_data)
copy_history_string = "\n%s > (copy)" %(datetime.now())
input_data.history = original_hist + copy_history_string
args_string = ', '.join(map(str,args))
if args_string is not '':
args_string += ', '
kwargs_string = ', '.join("%s='%s'" %(str(i[0]), str(i[1]))
for i in kwargs.items())
history_string = "\n%s > %s(%s%s)" %(datetime.now(), method.__name__,
args_string, kwargs_string)
input_data.history += history_string
output = method(input_data, *args, **kwargs)
# TODO output.meta.update() looks wrong - if a filter modifies a meta value, does this
# overwrite the modified version with the original?
if output != None:
output.meta.update(input_data.meta)
return output
return updated_method
class MetaMethods(type):
"""Metaclass which provides filter and plot methods for data classes."""
def __new__(cls, name, bases, attrs):
<|code_end|>
. Use current file imports:
(import operator
import uuid
import copy
import pyfusion
from datetime import datetime
from pyfusion.conf.utils import import_from_str, get_config_as_dict
from pyfusion.data.filters import filter_reg
from pyfusion.data.plots import plot_reg
from pyfusion.data.utils import unique_id
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, \
DateTime, PickleType
from sqlalchemy.orm import reconstructor, mapper, relation, dynamic_loader
from sqlalchemy.orm.collections import column_mapped_collection)
and context including class names, function names, or small code snippets from other files:
# Path: pyfusion/conf/utils.py
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/data/filters.py
# def cps(a,b):
# def register(*class_names):
# def reg_item(filter_method):
# def reduce_time(input_data, new_time_range):
# def segment(input_data, n_samples, overlap=1.0, datalist= 0):
# def remove_noncontiguous(input_dataset):
# def normalise(input_data, method='peak', separate=False):
# def svd(input_data):
# def fs_group_geometric(input_data, max_energy = 1.0):
# def fs_group_threshold(input_data, threshold=0.2):
# def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
# def subtract_mean(input_data):
# def sp_filter_butterworth_bandpass(input_data, passband, stopband, max_passband_loss, min_stopband_attenuation):
# def correlate(input_data, index_1, index_2, **kwargs):
# def change_time_base(input_data, new_time_base):
#
# Path: pyfusion/data/plots.py
# def register(*class_names):
# def reg_item(plot_method):
# def plot_signals(input_data, filename=None,downsamplefactor=1,n_columns=1):
# def plot_spectrogram(input_data, windowfn=None, channel_number=0, filename=None, coloraxis=None, noverlap=0,NFFT=None, **kwargs):
# def join_ends(inarray,add_2pi = False,add_360deg=False,add_lenarray=False,add_one=False):
# def posNegFill(x,y1,y2):
# def __init__(self,energy_list,initial_list):
# def add(self,elmt):
# def sub(self,elmt):
# def findZero(i,x,y1,y2):
# def fsplot_phase(input_data, closed=True, hold=0):
# def svdplot(input_data, fmax=None, hold=0):
# def button_action(label):
# NFFT=(eval(pyfusion.config.get('Plots','NFFT')))
# NFFT = 2048
# class Energy:
#
# Path: pyfusion/data/utils.py
# def unique_id():
# try:
# return str(uuid.uuid4())
# except:
# return ''.join(_random.choice(string.letters) for i in range(50))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
. Output only the next line. | for reg in [filter_reg, plot_reg]: |
Given the following code snippet before the placeholder: <|code_start|> def updated_method(input_data, *args, **kwargs):
do_copy = kwargs.pop('copy', True)
if do_copy:
original_hist = input_data.history
input_data = copy.copy(input_data)
copy_history_string = "\n%s > (copy)" %(datetime.now())
input_data.history = original_hist + copy_history_string
args_string = ', '.join(map(str,args))
if args_string is not '':
args_string += ', '
kwargs_string = ', '.join("%s='%s'" %(str(i[0]), str(i[1]))
for i in kwargs.items())
history_string = "\n%s > %s(%s%s)" %(datetime.now(), method.__name__,
args_string, kwargs_string)
input_data.history += history_string
output = method(input_data, *args, **kwargs)
# TODO output.meta.update() looks wrong - if a filter modifies a meta value, does this
# overwrite the modified version with the original?
if output != None:
output.meta.update(input_data.meta)
return output
return updated_method
class MetaMethods(type):
"""Metaclass which provides filter and plot methods for data classes."""
def __new__(cls, name, bases, attrs):
<|code_end|>
, predict the next line using imports from the current file:
import operator
import uuid
import copy
import pyfusion
from datetime import datetime
from pyfusion.conf.utils import import_from_str, get_config_as_dict
from pyfusion.data.filters import filter_reg
from pyfusion.data.plots import plot_reg
from pyfusion.data.utils import unique_id
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, \
DateTime, PickleType
from sqlalchemy.orm import reconstructor, mapper, relation, dynamic_loader
from sqlalchemy.orm.collections import column_mapped_collection
and context including class names, function names, and sometimes code from other files:
# Path: pyfusion/conf/utils.py
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/data/filters.py
# def cps(a,b):
# def register(*class_names):
# def reg_item(filter_method):
# def reduce_time(input_data, new_time_range):
# def segment(input_data, n_samples, overlap=1.0, datalist= 0):
# def remove_noncontiguous(input_dataset):
# def normalise(input_data, method='peak', separate=False):
# def svd(input_data):
# def fs_group_geometric(input_data, max_energy = 1.0):
# def fs_group_threshold(input_data, threshold=0.2):
# def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
# def subtract_mean(input_data):
# def sp_filter_butterworth_bandpass(input_data, passband, stopband, max_passband_loss, min_stopband_attenuation):
# def correlate(input_data, index_1, index_2, **kwargs):
# def change_time_base(input_data, new_time_base):
#
# Path: pyfusion/data/plots.py
# def register(*class_names):
# def reg_item(plot_method):
# def plot_signals(input_data, filename=None,downsamplefactor=1,n_columns=1):
# def plot_spectrogram(input_data, windowfn=None, channel_number=0, filename=None, coloraxis=None, noverlap=0,NFFT=None, **kwargs):
# def join_ends(inarray,add_2pi = False,add_360deg=False,add_lenarray=False,add_one=False):
# def posNegFill(x,y1,y2):
# def __init__(self,energy_list,initial_list):
# def add(self,elmt):
# def sub(self,elmt):
# def findZero(i,x,y1,y2):
# def fsplot_phase(input_data, closed=True, hold=0):
# def svdplot(input_data, fmax=None, hold=0):
# def button_action(label):
# NFFT=(eval(pyfusion.config.get('Plots','NFFT')))
# NFFT = 2048
# class Energy:
#
# Path: pyfusion/data/utils.py
# def unique_id():
# try:
# return str(uuid.uuid4())
# except:
# return ''.join(_random.choice(string.letters) for i in range(50))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
. Output only the next line. | for reg in [filter_reg, plot_reg]: |
Continue the code snippet: <|code_start|>
def save(self):
if pyfusion.orm_manager.IS_ACTIVE:
# this may be inefficient: get it working, then get it fast
self.channels.save()
session = pyfusion.orm_manager.Session()
session.add(self)
session.commit()
session.close()
@orm_register()
def orm_load_basedata(man):
man.basedata_table = Table('basedata', man.metadata,
Column('basedata_id', Integer, primary_key=True),
Column('type', String(30), nullable=False),
Column('meta', PickleType(comparator=operator.eq))
)
#man.metadata.create_all()
mapper(BaseData, man.basedata_table, polymorphic_on=man.basedata_table.c.type, polymorphic_identity='basedata')
class BaseDataSet(object):
__metaclass__ = MetaMethods
def __init__(self, label=''):
self.meta = PfMetaData()
self.created = datetime.now()
self.history = "%s > New %s" %(self.created, self.__class__.__name__)
if label == '':
<|code_end|>
. Use current file imports:
import operator
import uuid
import copy
import pyfusion
from datetime import datetime
from pyfusion.conf.utils import import_from_str, get_config_as_dict
from pyfusion.data.filters import filter_reg
from pyfusion.data.plots import plot_reg
from pyfusion.data.utils import unique_id
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, \
DateTime, PickleType
from sqlalchemy.orm import reconstructor, mapper, relation, dynamic_loader
from sqlalchemy.orm.collections import column_mapped_collection
and context (classes, functions, or code) from other files:
# Path: pyfusion/conf/utils.py
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/data/filters.py
# def cps(a,b):
# def register(*class_names):
# def reg_item(filter_method):
# def reduce_time(input_data, new_time_range):
# def segment(input_data, n_samples, overlap=1.0, datalist= 0):
# def remove_noncontiguous(input_dataset):
# def normalise(input_data, method='peak', separate=False):
# def svd(input_data):
# def fs_group_geometric(input_data, max_energy = 1.0):
# def fs_group_threshold(input_data, threshold=0.2):
# def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
# def subtract_mean(input_data):
# def sp_filter_butterworth_bandpass(input_data, passband, stopband, max_passband_loss, min_stopband_attenuation):
# def correlate(input_data, index_1, index_2, **kwargs):
# def change_time_base(input_data, new_time_base):
#
# Path: pyfusion/data/plots.py
# def register(*class_names):
# def reg_item(plot_method):
# def plot_signals(input_data, filename=None,downsamplefactor=1,n_columns=1):
# def plot_spectrogram(input_data, windowfn=None, channel_number=0, filename=None, coloraxis=None, noverlap=0,NFFT=None, **kwargs):
# def join_ends(inarray,add_2pi = False,add_360deg=False,add_lenarray=False,add_one=False):
# def posNegFill(x,y1,y2):
# def __init__(self,energy_list,initial_list):
# def add(self,elmt):
# def sub(self,elmt):
# def findZero(i,x,y1,y2):
# def fsplot_phase(input_data, closed=True, hold=0):
# def svdplot(input_data, fmax=None, hold=0):
# def button_action(label):
# NFFT=(eval(pyfusion.config.get('Plots','NFFT')))
# NFFT = 2048
# class Energy:
#
# Path: pyfusion/data/utils.py
# def unique_id():
# try:
# return str(uuid.uuid4())
# except:
# return ''.join(_random.choice(string.letters) for i in range(50))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
. Output only the next line. | label = unique_id() |
Given snippet: <|code_start|>
def add_coords(self, **kwargs):
self.__dict__.update(kwargs)
def load_from_config(self, **kwargs):
for kw in kwargs.iteritems():
if kw[0] == 'coord_transform':
transform_list = pyfusion.config.pf_options('CoordTransform', kw[1])
for transform_name in transform_list:
transform_class_str = pyfusion.config.pf_get('CoordTransform', kw[1], transform_name)
transform_class = import_from_str(transform_class_str)
self.load_transform(transform_class)
elif kw[0].startswith('coords_'):
coord_values = tuple(map(float,kw[1].split(',')))
self.add_coords(**{kw[0][7:]: coord_values})
def load_transform(self, transform_class):
def _new_transform_method(**kwargs):
return transform_class().transform(self.__dict__.get(transform_class.input_coords),**kwargs)
self.__dict__.update({transform_class.output_coords:_new_transform_method})
def save(self):
if pyfusion.orm_manager.IS_ACTIVE:
# this may be inefficient: get it working, then get it fast
session = pyfusion.orm_manager.Session()
session.add(self)
session.commit()
session.close()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import operator
import uuid
import copy
import pyfusion
from datetime import datetime
from pyfusion.conf.utils import import_from_str, get_config_as_dict
from pyfusion.data.filters import filter_reg
from pyfusion.data.plots import plot_reg
from pyfusion.data.utils import unique_id
from pyfusion.orm.utils import orm_register
from sqlalchemy import Table, Column, String, Integer, Float, ForeignKey, \
DateTime, PickleType
from sqlalchemy.orm import reconstructor, mapper, relation, dynamic_loader
from sqlalchemy.orm.collections import column_mapped_collection
and context:
# Path: pyfusion/conf/utils.py
# def import_from_str(string_value):
# # TODO: make shortcuts for loading from within pyfusion
# split_val = string_value.split('.')
# val_module = __import__('.'.join(split_val[:-1]),
# globals(), locals(),
# [split_val[-1]])
# return val_module.__dict__[split_val[-1]]
#
# def get_config_as_dict(component_type, component_name):
# config_option_list = pyfusion.config.pf_options(component_type, component_name)
# config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
# return dict(map(config_map, config_option_list))
#
# Path: pyfusion/data/filters.py
# def cps(a,b):
# def register(*class_names):
# def reg_item(filter_method):
# def reduce_time(input_data, new_time_range):
# def segment(input_data, n_samples, overlap=1.0, datalist= 0):
# def remove_noncontiguous(input_dataset):
# def normalise(input_data, method='peak', separate=False):
# def svd(input_data):
# def fs_group_geometric(input_data, max_energy = 1.0):
# def fs_group_threshold(input_data, threshold=0.2):
# def flucstruc(input_data, min_dphase = -pi, group=fs_group_geometric, method='rms', separate=True, label=None):
# def subtract_mean(input_data):
# def sp_filter_butterworth_bandpass(input_data, passband, stopband, max_passband_loss, min_stopband_attenuation):
# def correlate(input_data, index_1, index_2, **kwargs):
# def change_time_base(input_data, new_time_base):
#
# Path: pyfusion/data/plots.py
# def register(*class_names):
# def reg_item(plot_method):
# def plot_signals(input_data, filename=None,downsamplefactor=1,n_columns=1):
# def plot_spectrogram(input_data, windowfn=None, channel_number=0, filename=None, coloraxis=None, noverlap=0,NFFT=None, **kwargs):
# def join_ends(inarray,add_2pi = False,add_360deg=False,add_lenarray=False,add_one=False):
# def posNegFill(x,y1,y2):
# def __init__(self,energy_list,initial_list):
# def add(self,elmt):
# def sub(self,elmt):
# def findZero(i,x,y1,y2):
# def fsplot_phase(input_data, closed=True, hold=0):
# def svdplot(input_data, fmax=None, hold=0):
# def button_action(label):
# NFFT=(eval(pyfusion.config.get('Plots','NFFT')))
# NFFT = 2048
# class Energy:
#
# Path: pyfusion/data/utils.py
# def unique_id():
# try:
# return str(uuid.uuid4())
# except:
# return ''.join(_random.choice(string.letters) for i in range(50))
#
# Path: pyfusion/orm/utils.py
# def orm_register():
# def reg_item(orm_func):
# pyfusion.orm_manager.add_reg_func(orm_func)
# return orm_func
# return reg_item
which might include code, classes, or functions. Output only the next line. | @orm_register() |
Here is a snippet: <|code_start|>"""
generate flucstrucs for benchmarking
"""
THIS_DIR = os.path.dirname(__file__)
stats_file = os.path.join(THIS_DIR, "stats", "fs")
n_ch = 30
n_samples = 1024*100
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import cProfile, pstats, os
from pyfusion.data.tests import get_multimode_test_data, get_n_channels
from pyfusion.data.timeseries import Timebase
and context from other files:
# Path: pyfusion/data/tests.py
# def get_multimode_test_data(channels = get_n_channels(DEFAULT_N_CHANNELS),
# timebase = DEFAULT_TIMEBASE,
# modes = [mode_1, mode_2], noise = DEFAULT_NOISE):
# """Generate synthetic multi-channel data for testing."""
# n_channels = len(channels)
# data_size = (n_channels, timebase.size)
# data_array = noise*2*(np.random.random(data_size)-0.5)
# timebase_matrix = np.resize(timebase, data_size)
# angle_matrix = np.resize(np.array([i.coords.cylindrical[1] for i in channels]),
# data_size[::-1]).T
# for m in modes:
# data_array += m['amp']*np.cos(2*np.pi*m['freq']*timebase_matrix +
# m['mode_number']*angle_matrix + m['phase'])
# output = TimeseriesData(timebase=timebase, signal=Signal(data_array),
# channels=channels)
# return output
#
# def get_n_channels(n_ch):
# """Return a list of n_ch channels."""
# poloidal_coords = 2*np.pi*np.arange(n_ch)/n_ch
# channel_gen = (Channel('ch_%02d' %i, Coords('cylindrical', (1.0,i,0.0)))
# for i in poloidal_coords)
# return ChannelList(*channel_gen)
#
# Path: pyfusion/data/timeseries.py
# class Timebase(np.ndarray):
# """Timebase vector with parameterised internal representation.
#
# see doc/subclassing.py in numpy code for details on subclassing ndarray
# """
# def __new__(cls, input_array):
# # should this follow the example in doc/subclassing.py?... (it doesn't)
# obj = np.asarray(input_array).view(cls).copy()
# obj.sample_freq = 1.0/(obj[1]-obj[0])
# obj.meta = PfMetaData()
# return obj
#
# def is_contiguous(self):
# return max(((self[1:]-self[:-1])-1.0/self.sample_freq)**2) < (0.1/self.sample_freq)**2
#
# def normalise_freq(self, input_freq):
# """Normalise input frequencies to [0,1] where 1 is pi*sample_freq"""
# try:
# return input_freq/(0.5*self.sample_freq)
# except:
# # sample_freq should maybe be self.sample_freq? i don't remember
# return [i/(0.5*self.sample_freq) for i in sample_freq]
#
# def __array_finalize__(self, obj):
# # ``self`` is a new object resulting from
# # ndarray.__new__(InfoArray, ...), therefore it only has
# # attributes that the ndarray.__new__ constructor gave it -
# # i.e. those of a standard ndarray.
# #
# # We could have got to the ndarray.__new__ call in 3 ways:
# # From an explicit constructor - e.g. InfoArray():
# # obj is None
# # (we're in the middle of the InfoArray.__new__
# # constructor, and self.info will be set when we return to
# # InfoArray.__new__)
# if obj is None: return
# # From view casting - e.g arr.view(InfoArray):
# # obj is arr
# # (type(obj) can be InfoArray)
# # From new-from-template - e.g infoarr[:3]
# # type(obj) is InfoArray
# #
# # Note that it is here, rather than in the __new__ method,
# # that we set the default value for 'info', because this
# # method sees all creation of default objects - with the
# # InfoArray.__new__ constructor, but also with
# # arr.view(InfoArray).
# self.sample_freq = getattr(obj, 'sample_freq', None)
# self.meta = getattr(obj, 'meta', None)
# # We do not need to return anything
, which may include functions, classes, or code. Output only the next line. | data = get_multimode_test_data(channels=get_n_channels(n_ch), |
Next line prediction: <|code_start|>"""
generate flucstrucs for benchmarking
"""
THIS_DIR = os.path.dirname(__file__)
stats_file = os.path.join(THIS_DIR, "stats", "fs")
n_ch = 30
n_samples = 1024*100
<|code_end|>
. Use current file imports:
(import numpy as np
import cProfile, pstats, os
from pyfusion.data.tests import get_multimode_test_data, get_n_channels
from pyfusion.data.timeseries import Timebase)
and context including class names, function names, or small code snippets from other files:
# Path: pyfusion/data/tests.py
# def get_multimode_test_data(channels = get_n_channels(DEFAULT_N_CHANNELS),
# timebase = DEFAULT_TIMEBASE,
# modes = [mode_1, mode_2], noise = DEFAULT_NOISE):
# """Generate synthetic multi-channel data for testing."""
# n_channels = len(channels)
# data_size = (n_channels, timebase.size)
# data_array = noise*2*(np.random.random(data_size)-0.5)
# timebase_matrix = np.resize(timebase, data_size)
# angle_matrix = np.resize(np.array([i.coords.cylindrical[1] for i in channels]),
# data_size[::-1]).T
# for m in modes:
# data_array += m['amp']*np.cos(2*np.pi*m['freq']*timebase_matrix +
# m['mode_number']*angle_matrix + m['phase'])
# output = TimeseriesData(timebase=timebase, signal=Signal(data_array),
# channels=channels)
# return output
#
# def get_n_channels(n_ch):
# """Return a list of n_ch channels."""
# poloidal_coords = 2*np.pi*np.arange(n_ch)/n_ch
# channel_gen = (Channel('ch_%02d' %i, Coords('cylindrical', (1.0,i,0.0)))
# for i in poloidal_coords)
# return ChannelList(*channel_gen)
#
# Path: pyfusion/data/timeseries.py
# class Timebase(np.ndarray):
# """Timebase vector with parameterised internal representation.
#
# see doc/subclassing.py in numpy code for details on subclassing ndarray
# """
# def __new__(cls, input_array):
# # should this follow the example in doc/subclassing.py?... (it doesn't)
# obj = np.asarray(input_array).view(cls).copy()
# obj.sample_freq = 1.0/(obj[1]-obj[0])
# obj.meta = PfMetaData()
# return obj
#
# def is_contiguous(self):
# return max(((self[1:]-self[:-1])-1.0/self.sample_freq)**2) < (0.1/self.sample_freq)**2
#
# def normalise_freq(self, input_freq):
# """Normalise input frequencies to [0,1] where 1 is pi*sample_freq"""
# try:
# return input_freq/(0.5*self.sample_freq)
# except:
# # sample_freq should maybe be self.sample_freq? i don't remember
# return [i/(0.5*self.sample_freq) for i in sample_freq]
#
# def __array_finalize__(self, obj):
# # ``self`` is a new object resulting from
# # ndarray.__new__(InfoArray, ...), therefore it only has
# # attributes that the ndarray.__new__ constructor gave it -
# # i.e. those of a standard ndarray.
# #
# # We could have got to the ndarray.__new__ call in 3 ways:
# # From an explicit constructor - e.g. InfoArray():
# # obj is None
# # (we're in the middle of the InfoArray.__new__
# # constructor, and self.info will be set when we return to
# # InfoArray.__new__)
# if obj is None: return
# # From view casting - e.g arr.view(InfoArray):
# # obj is arr
# # (type(obj) can be InfoArray)
# # From new-from-template - e.g infoarr[:3]
# # type(obj) is InfoArray
# #
# # Note that it is here, rather than in the __new__ method,
# # that we set the default value for 'info', because this
# # method sees all creation of default objects - with the
# # InfoArray.__new__ constructor, but also with
# # arr.view(InfoArray).
# self.sample_freq = getattr(obj, 'sample_freq', None)
# self.meta = getattr(obj, 'meta', None)
# # We do not need to return anything
. Output only the next line. | data = get_multimode_test_data(channels=get_n_channels(n_ch), |
Given the following code snippet before the placeholder: <|code_start|>"""
generate flucstrucs for benchmarking
"""
THIS_DIR = os.path.dirname(__file__)
stats_file = os.path.join(THIS_DIR, "stats", "fs")
n_ch = 30
n_samples = 1024*100
data = get_multimode_test_data(channels=get_n_channels(n_ch),
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import cProfile, pstats, os
from pyfusion.data.tests import get_multimode_test_data, get_n_channels
from pyfusion.data.timeseries import Timebase
and context including class names, function names, and sometimes code from other files:
# Path: pyfusion/data/tests.py
# def get_multimode_test_data(channels = get_n_channels(DEFAULT_N_CHANNELS),
# timebase = DEFAULT_TIMEBASE,
# modes = [mode_1, mode_2], noise = DEFAULT_NOISE):
# """Generate synthetic multi-channel data for testing."""
# n_channels = len(channels)
# data_size = (n_channels, timebase.size)
# data_array = noise*2*(np.random.random(data_size)-0.5)
# timebase_matrix = np.resize(timebase, data_size)
# angle_matrix = np.resize(np.array([i.coords.cylindrical[1] for i in channels]),
# data_size[::-1]).T
# for m in modes:
# data_array += m['amp']*np.cos(2*np.pi*m['freq']*timebase_matrix +
# m['mode_number']*angle_matrix + m['phase'])
# output = TimeseriesData(timebase=timebase, signal=Signal(data_array),
# channels=channels)
# return output
#
# def get_n_channels(n_ch):
# """Return a list of n_ch channels."""
# poloidal_coords = 2*np.pi*np.arange(n_ch)/n_ch
# channel_gen = (Channel('ch_%02d' %i, Coords('cylindrical', (1.0,i,0.0)))
# for i in poloidal_coords)
# return ChannelList(*channel_gen)
#
# Path: pyfusion/data/timeseries.py
# class Timebase(np.ndarray):
# """Timebase vector with parameterised internal representation.
#
# see doc/subclassing.py in numpy code for details on subclassing ndarray
# """
# def __new__(cls, input_array):
# # should this follow the example in doc/subclassing.py?... (it doesn't)
# obj = np.asarray(input_array).view(cls).copy()
# obj.sample_freq = 1.0/(obj[1]-obj[0])
# obj.meta = PfMetaData()
# return obj
#
# def is_contiguous(self):
# return max(((self[1:]-self[:-1])-1.0/self.sample_freq)**2) < (0.1/self.sample_freq)**2
#
# def normalise_freq(self, input_freq):
# """Normalise input frequencies to [0,1] where 1 is pi*sample_freq"""
# try:
# return input_freq/(0.5*self.sample_freq)
# except:
# # sample_freq should maybe be self.sample_freq? i don't remember
# return [i/(0.5*self.sample_freq) for i in sample_freq]
#
# def __array_finalize__(self, obj):
# # ``self`` is a new object resulting from
# # ndarray.__new__(InfoArray, ...), therefore it only has
# # attributes that the ndarray.__new__ constructor gave it -
# # i.e. those of a standard ndarray.
# #
# # We could have got to the ndarray.__new__ call in 3 ways:
# # From an explicit constructor - e.g. InfoArray():
# # obj is None
# # (we're in the middle of the InfoArray.__new__
# # constructor, and self.info will be set when we return to
# # InfoArray.__new__)
# if obj is None: return
# # From view casting - e.g arr.view(InfoArray):
# # obj is arr
# # (type(obj) can be InfoArray)
# # From new-from-template - e.g infoarr[:3]
# # type(obj) is InfoArray
# #
# # Note that it is here, rather than in the __new__ method,
# # that we set the default value for 'info', because this
# # method sees all creation of default objects - with the
# # InfoArray.__new__ constructor, but also with
# # arr.view(InfoArray).
# self.sample_freq = getattr(obj, 'sample_freq', None)
# self.meta = getattr(obj, 'meta', None)
# # We do not need to return anything
. Output only the next line. | timebase = Timebase(np.arange(n_samples)*1.e-6), |
Using the snippet: <|code_start|>except:
# importing MDSPlusBaseDataFetcher will fail if MDSPlus is not
# installed. in general, we avoid mds in nosttests by calling
# nosetests -a '!mds' pyfusion but in this case, the import occurs
# outside of a class definition, and can't be avoided with -a '!mds'
# As a kludge, we'll put another class into the namespace so we
# don't get a syntax error when we subclass it. (We don't care that
# the subclass isn't what we want because we're not going to use it
# anyway if we don't have MDSPlus)
TEST_DATA_PATH = os.path.abspath(os.path.dirname(__file__))
TEST_CONFIG_FILE = os.path.join(TEST_DATA_PATH, "test.cfg")
TEST_MDSPLUS_SERVER = 'localhost:8001'
class DummyMDSData(BaseData):
pass
class DummyMDSDataFetcher(MDSPlusBaseDataFetcher):
"""Check that we have a mds data object passed though"""
def do_fetch(self):
# this breaks unit tests:
#data = DummyMDSData()
# this doesn't. Why??
data = BaseData()
data.meta['mds_Data'] = self.acq._Data
return data
<|code_end|>
, determine the next line of code. You have imports:
import os
import pyfusion
import pyfusion
import pyfusion
from pyfusion.test.tests import PfTestBase, BasePyfusionTestCase
from pyfusion.data.base import BaseData
from pyfusion.acquisition.MDSPlus.fetch import MDSPlusBaseDataFetcher
from pyfusion.data.base import BaseData as MDSPlusBaseDataFetcher
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.acquisition.base import BaseAcquisition
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.acquisition.base import BaseDataFetcher
from pyfusion.acquisition.MDSPlus.fetch import MDSPlusDataFetcher
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.data.timeseries import TimeseriesData
from pyfusion.acquisition.utils import get_acq_from_config
and context (class names, function names, or code) available:
# Path: pyfusion/test/tests.py
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
#
# class BasePyfusionTestCase(unittest.TestCase):
# """Simple customisation of TestCase."""
# def __init__(self, *args):
# self.listed_device = CONFIG_TEST_DEVICE_NAME
# self.listed_empty_device = CONFIG_EMPTY_TEST_DEVICE_NAME
# self.unlisted_device = NONCONFIG_TEST_DEVICE_NAME
# self.shot_number = TEST_SHOT_NUMBER
# self.unlisted_config_section_type = UNLISTED_CONFIG_SECTION_TYPE
# unittest.TestCase.__init__(self, *args)
#
# Path: pyfusion/data/base.py
# class BaseData(object):
# """Base class for handling processed data.
#
# In general, specialised subclasses of BaseData will be used
# to handle processed data rather than BaseData itself.
#
# Usage: ..........
# """
# __metaclass__ = MetaMethods
#
# def __init__(self):
# self.meta = PfMetaData()
# self.history = "%s > New %s" %(datetime.now(), self.__class__.__name__)
# if not hasattr(self, 'channels'):
# self.channels = ChannelList()
#
# def save(self):
# if pyfusion.orm_manager.IS_ACTIVE:
# # this may be inefficient: get it working, then get it fast
# self.channels.save()
# session = pyfusion.orm_manager.Session()
# session.add(self)
# session.commit()
# session.close()
. Output only the next line. | class CheckMDSPlusDataAcquisition(PfTestBase): |
Continue the code snippet: <|code_start|> # df_str = "pyfusion.acquisition.MDSPlus.tests.DummyMDSDataFetcher"
# test_data = test_acq.getdata(dummy_shot, data_fetcher=df_str, mds_tree="H1DATA")
# from MDSplus import Data
# self.assertEqual(Data.__dict__, test_data.meta['mds_Data'].__dict__)
CheckMDSPlusDataFetchers.h1 = True
CheckMDSPlusDataFetchers.mds = True
CheckMDSPlusDataFetchers.net = True
CheckMDSPlusDataFetchers.slow = True
class CheckMDSPlusH1Connection(PfTestBase):
"""tests which require access to h1data.anu.edu.au"""
def testH1TimeseriesData(self):
h1mds = MDSPlusAcquisition(server=TEST_MDSPLUS_SERVER)
df_str = "pyfusion.acquisition.MDSPlus.fetch.MDSPlusDataFetcher"
test_data = h1mds.getdata(58133,
data_fetcher = df_str,
mds_path=r"\h1data::top.operations.mirnov:a14_14:input_1")
self.assertTrue(isinstance(test_data, TimeseriesData))
self.assertEqual(test_data.signal[0], -0.01953125)
CheckMDSPlusH1Connection.h1 = True
CheckMDSPlusH1Connection.mds = True
CheckMDSPlusH1Connection.net = True
CheckMDSPlusH1Connection.slow = True
<|code_end|>
. Use current file imports:
import os
import pyfusion
import pyfusion
import pyfusion
from pyfusion.test.tests import PfTestBase, BasePyfusionTestCase
from pyfusion.data.base import BaseData
from pyfusion.acquisition.MDSPlus.fetch import MDSPlusBaseDataFetcher
from pyfusion.data.base import BaseData as MDSPlusBaseDataFetcher
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.acquisition.base import BaseAcquisition
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.acquisition.base import BaseDataFetcher
from pyfusion.acquisition.MDSPlus.fetch import MDSPlusDataFetcher
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.data.timeseries import TimeseriesData
from pyfusion.acquisition.utils import get_acq_from_config
and context (classes, functions, or code) from other files:
# Path: pyfusion/test/tests.py
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
#
# class BasePyfusionTestCase(unittest.TestCase):
# """Simple customisation of TestCase."""
# def __init__(self, *args):
# self.listed_device = CONFIG_TEST_DEVICE_NAME
# self.listed_empty_device = CONFIG_EMPTY_TEST_DEVICE_NAME
# self.unlisted_device = NONCONFIG_TEST_DEVICE_NAME
# self.shot_number = TEST_SHOT_NUMBER
# self.unlisted_config_section_type = UNLISTED_CONFIG_SECTION_TYPE
# unittest.TestCase.__init__(self, *args)
#
# Path: pyfusion/data/base.py
# class BaseData(object):
# """Base class for handling processed data.
#
# In general, specialised subclasses of BaseData will be used
# to handle processed data rather than BaseData itself.
#
# Usage: ..........
# """
# __metaclass__ = MetaMethods
#
# def __init__(self):
# self.meta = PfMetaData()
# self.history = "%s > New %s" %(datetime.now(), self.__class__.__name__)
# if not hasattr(self, 'channels'):
# self.channels = ChannelList()
#
# def save(self):
# if pyfusion.orm_manager.IS_ACTIVE:
# # this may be inefficient: get it working, then get it fast
# self.channels.save()
# session = pyfusion.orm_manager.Session()
# session.add(self)
# session.commit()
# session.close()
. Output only the next line. | class MDSAcqTestCase(BasePyfusionTestCase): |
Here is a snippet: <|code_start|>"""Test code for MDSPlus data acquisition."""
try:
except:
# importing MDSPlusBaseDataFetcher will fail if MDSPlus is not
# installed. in general, we avoid mds in nosttests by calling
# nosetests -a '!mds' pyfusion but in this case, the import occurs
# outside of a class definition, and can't be avoided with -a '!mds'
# As a kludge, we'll put another class into the namespace so we
# don't get a syntax error when we subclass it. (We don't care that
# the subclass isn't what we want because we're not going to use it
# anyway if we don't have MDSPlus)
TEST_DATA_PATH = os.path.abspath(os.path.dirname(__file__))
TEST_CONFIG_FILE = os.path.join(TEST_DATA_PATH, "test.cfg")
TEST_MDSPLUS_SERVER = 'localhost:8001'
<|code_end|>
. Write the next line using the current file imports:
import os
import pyfusion
import pyfusion
import pyfusion
from pyfusion.test.tests import PfTestBase, BasePyfusionTestCase
from pyfusion.data.base import BaseData
from pyfusion.acquisition.MDSPlus.fetch import MDSPlusBaseDataFetcher
from pyfusion.data.base import BaseData as MDSPlusBaseDataFetcher
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.acquisition.base import BaseAcquisition
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.acquisition.base import BaseDataFetcher
from pyfusion.acquisition.MDSPlus.fetch import MDSPlusDataFetcher
from pyfusion.acquisition.MDSPlus.acq import MDSPlusAcquisition
from pyfusion.data.timeseries import TimeseriesData
from pyfusion.acquisition.utils import get_acq_from_config
and context from other files:
# Path: pyfusion/test/tests.py
# class PfTestBase(object):
# """Base class for generated sql and non-sql test cases."""
# pass
#
# class BasePyfusionTestCase(unittest.TestCase):
# """Simple customisation of TestCase."""
# def __init__(self, *args):
# self.listed_device = CONFIG_TEST_DEVICE_NAME
# self.listed_empty_device = CONFIG_EMPTY_TEST_DEVICE_NAME
# self.unlisted_device = NONCONFIG_TEST_DEVICE_NAME
# self.shot_number = TEST_SHOT_NUMBER
# self.unlisted_config_section_type = UNLISTED_CONFIG_SECTION_TYPE
# unittest.TestCase.__init__(self, *args)
#
# Path: pyfusion/data/base.py
# class BaseData(object):
# """Base class for handling processed data.
#
# In general, specialised subclasses of BaseData will be used
# to handle processed data rather than BaseData itself.
#
# Usage: ..........
# """
# __metaclass__ = MetaMethods
#
# def __init__(self):
# self.meta = PfMetaData()
# self.history = "%s > New %s" %(datetime.now(), self.__class__.__name__)
# if not hasattr(self, 'channels'):
# self.channels = ChannelList()
#
# def save(self):
# if pyfusion.orm_manager.IS_ACTIVE:
# # this may be inefficient: get it working, then get it fast
# self.channels.save()
# session = pyfusion.orm_manager.Session()
# session.add(self)
# session.commit()
# session.close()
, which may include functions, classes, or code. Output only the next line. | class DummyMDSData(BaseData): |
Next line prediction: <|code_start|>tf.compat.v1.enable_eager_execution()
class _VariablesTest():
def testAttributes(self):
# Test that after converting an initializer into a variable all the
# attributes stays the same.
tens = initializers.random_tensor([2, 3, 2], tt_rank=2, dtype=self.dtype)
<|code_end|>
. Use current file imports:
(import numpy as np
import tensorflow as tf
from t3f import variables
from t3f import ops
from t3f import initializers)
and context including class names, function names, or small code snippets from other files:
# Path: t3f/variables.py
# def get_variable(name,
# dtype=None,
# initializer=None,
# regularizer=None,
# trainable=True,
# collections=None,
# caching_device=None,
# validate_shape=True):
# def assign(ref, value, validate_shape=None, use_locking=None, name=None):
#
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
#
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
. Output only the next line. | tens_v = variables.get_variable('tt_tens', initializer=tens) |
Here is a snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _VariablesTest():
def testAttributes(self):
# Test that after converting an initializer into a variable all the
# attributes stays the same.
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import tensorflow as tf
from t3f import variables
from t3f import ops
from t3f import initializers
and context from other files:
# Path: t3f/variables.py
# def get_variable(name,
# dtype=None,
# initializer=None,
# regularizer=None,
# trainable=True,
# collections=None,
# caching_device=None,
# validate_shape=True):
# def assign(ref, value, validate_shape=None, use_locking=None, name=None):
#
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
#
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
, which may include functions, classes, or code. Output only the next line. | tens = initializers.random_tensor([2, 3, 2], tt_rank=2, dtype=self.dtype) |
Given snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class UtilsTest(tf.test.TestCase):
def testUnravelIndex(self):
# 2D.
shape = (7, 6)
linear_idx = [22, 41, 37]
desired = [[3, 4], [6, 5], [6, 1]]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
import tensorflow as tf
from t3f import utils
and context:
# Path: t3f/utils.py
# def unravel_index(indices, shape):
# def replace_tf_svd_with_np_svd():
# def my_svd(tensor, full_matrices=False, compute_uv=True):
# def in_eager_mode():
which might include code, classes, or functions. Output only the next line. | actual = utils.unravel_index(linear_idx, shape) |
Using the snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _ApproximateTest():
def testAddN(self):
# Sum a bunch of TT-matrices.
tt_a = initializers.random_matrix(((2, 1, 4), (2, 2, 2)), tt_rank=2,
dtype=self.dtype)
tt_b = initializers.random_matrix(((2, 1, 4), (2, 2, 2)),
tt_rank=[1, 2, 4, 1], dtype=self.dtype)
def desired(tt_objects):
res = tt_objects[0]
for tt in tt_objects[1:]:
res += tt
return res
<|code_end|>
, determine the next line of code. You have imports:
import numpy as np
import tensorflow as tf
from t3f import ops
from t3f import approximate
from t3f import initializers
and context (class names, function names, or code) available:
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
#
# Path: t3f/approximate.py
# def add_n(tt_objects, max_tt_rank, name='t3f_approximate_add_n'):
# def reduce_sum_batch(tt_batch, max_tt_rank, coef=None,
# name='t3f_approximate_reduce_sum_batch'):
#
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
. Output only the next line. | res_actual = ops.full(approximate.add_n([tt_a, tt_b], 6)) |
Here is a snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _ApproximateTest():
def testAddN(self):
# Sum a bunch of TT-matrices.
tt_a = initializers.random_matrix(((2, 1, 4), (2, 2, 2)), tt_rank=2,
dtype=self.dtype)
tt_b = initializers.random_matrix(((2, 1, 4), (2, 2, 2)),
tt_rank=[1, 2, 4, 1], dtype=self.dtype)
def desired(tt_objects):
res = tt_objects[0]
for tt in tt_objects[1:]:
res += tt
return res
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import tensorflow as tf
from t3f import ops
from t3f import approximate
from t3f import initializers
and context from other files:
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
#
# Path: t3f/approximate.py
# def add_n(tt_objects, max_tt_rank, name='t3f_approximate_add_n'):
# def reduce_sum_batch(tt_batch, max_tt_rank, coef=None,
# name='t3f_approximate_reduce_sum_batch'):
#
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
, which may include functions, classes, or code. Output only the next line. | res_actual = ops.full(approximate.add_n([tt_a, tt_b], 6)) |
Given snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _ApproximateTest():
def testAddN(self):
# Sum a bunch of TT-matrices.
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
import tensorflow as tf
from t3f import ops
from t3f import approximate
from t3f import initializers
and context:
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
#
# Path: t3f/approximate.py
# def add_n(tt_objects, max_tt_rank, name='t3f_approximate_add_n'):
# def reduce_sum_batch(tt_batch, max_tt_rank, coef=None,
# name='t3f_approximate_reduce_sum_batch'):
#
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
which might include code, classes, or functions. Output only the next line. | tt_a = initializers.random_matrix(((2, 1, 4), (2, 2, 2)), tt_rank=2, |
Given the code snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _ShapesTest():
def testLazyShapeOverflow(self):
large_shape = [10] * 20
<|code_end|>
, generate the next line using the imports in this file:
import tensorflow as tf
from t3f import initializers
from t3f import shapes
and context (functions, classes, or occasionally code) from other files:
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
#
# Path: t3f/shapes.py
# def tt_ranks(tt, name='t3f_tt_ranks'):
# def shape(tt, name='t3f_shape'):
# def raw_shape(tt, name='t3f_raw_shape'):
# def batch_size(tt, name='t3f_batch_size'):
# def lazy_tt_ranks(tt, name='t3f_lazy_tt_ranks'):
# def lazy_shape(tt, name='t3f_lazy_shape'):
# def lazy_raw_shape(tt, name='t3f_lazy_raw_shape'):
# def lazy_batch_size(tt, name='t3f_lazy_batch_size'):
# def clean_raw_shape(shape, name='t3f_clean_raw_shape'):
# def is_batch_broadcasting_possible(tt_a, tt_b):
# def squeeze_batch_dim(tt, name='t3f_squeeze_batch_dim'):
# def expand_batch_dim(tt, name='t3f_expand_batch_dim'):
. Output only the next line. | tensor = initializers.random_matrix_batch([large_shape, large_shape], |
Next line prediction: <|code_start|>tf.compat.v1.enable_eager_execution()
class _ShapesTest():
def testLazyShapeOverflow(self):
large_shape = [10] * 20
tensor = initializers.random_matrix_batch([large_shape, large_shape],
batch_size=5, dtype=self.dtype)
<|code_end|>
. Use current file imports:
(import tensorflow as tf
from t3f import initializers
from t3f import shapes)
and context including class names, function names, or small code snippets from other files:
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
#
# Path: t3f/shapes.py
# def tt_ranks(tt, name='t3f_tt_ranks'):
# def shape(tt, name='t3f_shape'):
# def raw_shape(tt, name='t3f_raw_shape'):
# def batch_size(tt, name='t3f_batch_size'):
# def lazy_tt_ranks(tt, name='t3f_lazy_tt_ranks'):
# def lazy_shape(tt, name='t3f_lazy_shape'):
# def lazy_raw_shape(tt, name='t3f_lazy_raw_shape'):
# def lazy_batch_size(tt, name='t3f_lazy_batch_size'):
# def clean_raw_shape(shape, name='t3f_clean_raw_shape'):
# def is_batch_broadcasting_possible(tt_a, tt_b):
# def squeeze_batch_dim(tt, name='t3f_squeeze_batch_dim'):
# def expand_batch_dim(tt, name='t3f_expand_batch_dim'):
. Output only the next line. | self.assertAllEqual([5, 10 ** 20, 10 ** 20], shapes.lazy_shape(tensor)) |
Here is a snippet: <|code_start|>tf.compat.v1.enable_v2_behavior()
class _NeuralTest():
def testKerasDense(self):
# Try to create the layer twice to check that it won't crush saying the
# variable already exist.
x = tf.random.normal((20, 28*28))
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import tensorflow as tf
from t3f import nn
and context from other files:
# Path: t3f/nn.py
# class KerasDense(Layer):
# def __init__(self, input_dims, output_dims, tt_rank=2,
# activation=None, use_bias=True, kernel_initializer='glorot',
# bias_initializer=0.1, **kwargs):
# def call(self, x):
# def compute_output_shape(self, input_shape):
, which may include functions, classes, or code. Output only the next line. | layer = nn.KerasDense(input_dims=[7, 4, 7, 4], output_dims=[5, 5, 5, 5]) |
Based on the snippet: <|code_start|>
def value_and_grad(f, x):
"""Gradient of the given function w.r.t. x. Works in eager and graph mode."""
if utils.in_eager_mode():
with tf.GradientTape() as tape:
tape.watch(x)
v = f(x)
return v, tape.gradient(v, x)
else:
v = f(x)
return v, tf.gradients(v, x)
def _enforce_gauge_conditions(deltas, left):
"""Project deltas that define tangent space vec onto the gauge conditions."""
proj_deltas = []
<|code_end|>
, predict the immediate next line with the help of imports:
import tensorflow as tf
from t3f import shapes
from t3f import decompositions
from t3f import riemannian
from t3f import utils
and context (classes, functions, sometimes code) from other files:
# Path: t3f/shapes.py
# def tt_ranks(tt, name='t3f_tt_ranks'):
# def shape(tt, name='t3f_shape'):
# def raw_shape(tt, name='t3f_raw_shape'):
# def batch_size(tt, name='t3f_batch_size'):
# def lazy_tt_ranks(tt, name='t3f_lazy_tt_ranks'):
# def lazy_shape(tt, name='t3f_lazy_shape'):
# def lazy_raw_shape(tt, name='t3f_lazy_raw_shape'):
# def lazy_batch_size(tt, name='t3f_lazy_batch_size'):
# def clean_raw_shape(shape, name='t3f_clean_raw_shape'):
# def is_batch_broadcasting_possible(tt_a, tt_b):
# def squeeze_batch_dim(tt, name='t3f_squeeze_batch_dim'):
# def expand_batch_dim(tt, name='t3f_expand_batch_dim'):
#
# Path: t3f/decompositions.py
# def to_tt_matrix(mat, shape, max_tt_rank=10, epsilon=None,
# name='t3f_to_tt_matrix'):
# def to_tt_tensor(tens, max_tt_rank=10, epsilon=None,
# name='t3f_to_tt_tensor'):
# def round(tt, max_tt_rank=None, epsilon=None, name='t3f_round'):
# def _round_tt(tt, max_tt_rank, epsilon):
# def _round_batch_tt(tt, max_tt_rank, epsilon):
# def orthogonalize_tt_cores(tt, left_to_right=True,
# name='t3f_orthogonalize_tt_cores'):
# def _orthogonalize_tt_cores_left_to_right(tt):
# def _orthogonalize_batch_tt_cores_left_to_right(tt):
# def _orthogonalize_tt_cores_right_to_left(tt):
#
# Path: t3f/riemannian.py
# def project_sum(what, where, weights=None):
# def project(what, where):
# def project_matmul(what, where, matrix):
# def pairwise_flat_inner_projected(projected_tt_vectors_1,
# projected_tt_vectors_2):
# def add_n_projected(tt_objects, coef=None):
# def slice_tt_core(tt_core, left_idx, right_idx):
# def tangent_space_to_deltas(tt, name='t3f_tangent_space_to_deltas'):
# def deltas_to_tangent_space(deltas, tt, left=None, right=None,
# name='t3f_deltas_to_tangent_space'):
#
# Path: t3f/utils.py
# def unravel_index(indices, shape):
# def replace_tf_svd_with_np_svd():
# def my_svd(tensor, full_matrices=False, compute_uv=True):
# def in_eager_mode():
. Output only the next line. | tt_ranks = shapes.lazy_tt_ranks(left) |
Given the code snippet: <|code_start|> e.g. ones that include QR or SVD decomposition (t3f.project, t3f.round) or
for functions that work with TT-cores directly (in contrast to working with
TT-object only via t3f functions). In this cases this function can silently
return wrong results!
Example:
# Scalar product with some predefined tensor squared 0.5 * <x, t>**2.
# It's gradient is <x, t> t and it's Riemannian gradient is
# t3f.project(<x, t> * t, x)
f = lambda x: 0.5 * t3f.flat_inner(x, t)**2
projected_grad = t3f.gradients(f, x) # t3f.project(t3f.flat_inner(x, t) * t, x)
Args:
func: function that takes TensorTrain object as input and outputs a number.
x: point at which to compute the gradient and on which tangent space to
project the gradient.
name: string, name of the Op.
runtime_check: [True] whether to do a sanity check that the passed
function is invariant to different TT representations (otherwise
the Rieamnnian gradient doesn't even exist). It makes things slower,
but helps catching bugs, so turn it off during production deployment.
Returns:
`TensorTrain`, projection of the gradient df/dx onto the tangent space at
point x.
See also:
t3f.hessian_vector_product
"""
with tf.name_scope(name):
<|code_end|>
, generate the next line using the imports in this file:
import tensorflow as tf
from t3f import shapes
from t3f import decompositions
from t3f import riemannian
from t3f import utils
and context (functions, classes, or occasionally code) from other files:
# Path: t3f/shapes.py
# def tt_ranks(tt, name='t3f_tt_ranks'):
# def shape(tt, name='t3f_shape'):
# def raw_shape(tt, name='t3f_raw_shape'):
# def batch_size(tt, name='t3f_batch_size'):
# def lazy_tt_ranks(tt, name='t3f_lazy_tt_ranks'):
# def lazy_shape(tt, name='t3f_lazy_shape'):
# def lazy_raw_shape(tt, name='t3f_lazy_raw_shape'):
# def lazy_batch_size(tt, name='t3f_lazy_batch_size'):
# def clean_raw_shape(shape, name='t3f_clean_raw_shape'):
# def is_batch_broadcasting_possible(tt_a, tt_b):
# def squeeze_batch_dim(tt, name='t3f_squeeze_batch_dim'):
# def expand_batch_dim(tt, name='t3f_expand_batch_dim'):
#
# Path: t3f/decompositions.py
# def to_tt_matrix(mat, shape, max_tt_rank=10, epsilon=None,
# name='t3f_to_tt_matrix'):
# def to_tt_tensor(tens, max_tt_rank=10, epsilon=None,
# name='t3f_to_tt_tensor'):
# def round(tt, max_tt_rank=None, epsilon=None, name='t3f_round'):
# def _round_tt(tt, max_tt_rank, epsilon):
# def _round_batch_tt(tt, max_tt_rank, epsilon):
# def orthogonalize_tt_cores(tt, left_to_right=True,
# name='t3f_orthogonalize_tt_cores'):
# def _orthogonalize_tt_cores_left_to_right(tt):
# def _orthogonalize_batch_tt_cores_left_to_right(tt):
# def _orthogonalize_tt_cores_right_to_left(tt):
#
# Path: t3f/riemannian.py
# def project_sum(what, where, weights=None):
# def project(what, where):
# def project_matmul(what, where, matrix):
# def pairwise_flat_inner_projected(projected_tt_vectors_1,
# projected_tt_vectors_2):
# def add_n_projected(tt_objects, coef=None):
# def slice_tt_core(tt_core, left_idx, right_idx):
# def tangent_space_to_deltas(tt, name='t3f_tangent_space_to_deltas'):
# def deltas_to_tangent_space(deltas, tt, left=None, right=None,
# name='t3f_deltas_to_tangent_space'):
#
# Path: t3f/utils.py
# def unravel_index(indices, shape):
# def replace_tf_svd_with_np_svd():
# def my_svd(tensor, full_matrices=False, compute_uv=True):
# def in_eager_mode():
. Output only the next line. | left = decompositions.orthogonalize_tt_cores(x) |
Next line prediction: <|code_start|> # Scalar product with some predefined tensor squared 0.5 * <x, t>**2.
# It's gradient is <x, t> t and it's Riemannian gradient is
# t3f.project(<x, t> * t, x)
f = lambda x: 0.5 * t3f.flat_inner(x, t)**2
projected_grad = t3f.gradients(f, x) # t3f.project(t3f.flat_inner(x, t) * t, x)
Args:
func: function that takes TensorTrain object as input and outputs a number.
x: point at which to compute the gradient and on which tangent space to
project the gradient.
name: string, name of the Op.
runtime_check: [True] whether to do a sanity check that the passed
function is invariant to different TT representations (otherwise
the Rieamnnian gradient doesn't even exist). It makes things slower,
but helps catching bugs, so turn it off during production deployment.
Returns:
`TensorTrain`, projection of the gradient df/dx onto the tangent space at
point x.
See also:
t3f.hessian_vector_product
"""
with tf.name_scope(name):
left = decompositions.orthogonalize_tt_cores(x)
right = decompositions.orthogonalize_tt_cores(left, left_to_right=False)
deltas = [right.tt_cores[0]]
deltas += [tf.zeros_like(cc) for cc in right.tt_cores[1:]]
def augmented_func(d):
<|code_end|>
. Use current file imports:
(import tensorflow as tf
from t3f import shapes
from t3f import decompositions
from t3f import riemannian
from t3f import utils)
and context including class names, function names, or small code snippets from other files:
# Path: t3f/shapes.py
# def tt_ranks(tt, name='t3f_tt_ranks'):
# def shape(tt, name='t3f_shape'):
# def raw_shape(tt, name='t3f_raw_shape'):
# def batch_size(tt, name='t3f_batch_size'):
# def lazy_tt_ranks(tt, name='t3f_lazy_tt_ranks'):
# def lazy_shape(tt, name='t3f_lazy_shape'):
# def lazy_raw_shape(tt, name='t3f_lazy_raw_shape'):
# def lazy_batch_size(tt, name='t3f_lazy_batch_size'):
# def clean_raw_shape(shape, name='t3f_clean_raw_shape'):
# def is_batch_broadcasting_possible(tt_a, tt_b):
# def squeeze_batch_dim(tt, name='t3f_squeeze_batch_dim'):
# def expand_batch_dim(tt, name='t3f_expand_batch_dim'):
#
# Path: t3f/decompositions.py
# def to_tt_matrix(mat, shape, max_tt_rank=10, epsilon=None,
# name='t3f_to_tt_matrix'):
# def to_tt_tensor(tens, max_tt_rank=10, epsilon=None,
# name='t3f_to_tt_tensor'):
# def round(tt, max_tt_rank=None, epsilon=None, name='t3f_round'):
# def _round_tt(tt, max_tt_rank, epsilon):
# def _round_batch_tt(tt, max_tt_rank, epsilon):
# def orthogonalize_tt_cores(tt, left_to_right=True,
# name='t3f_orthogonalize_tt_cores'):
# def _orthogonalize_tt_cores_left_to_right(tt):
# def _orthogonalize_batch_tt_cores_left_to_right(tt):
# def _orthogonalize_tt_cores_right_to_left(tt):
#
# Path: t3f/riemannian.py
# def project_sum(what, where, weights=None):
# def project(what, where):
# def project_matmul(what, where, matrix):
# def pairwise_flat_inner_projected(projected_tt_vectors_1,
# projected_tt_vectors_2):
# def add_n_projected(tt_objects, coef=None):
# def slice_tt_core(tt_core, left_idx, right_idx):
# def tangent_space_to_deltas(tt, name='t3f_tangent_space_to_deltas'):
# def deltas_to_tangent_space(deltas, tt, left=None, right=None,
# name='t3f_deltas_to_tangent_space'):
#
# Path: t3f/utils.py
# def unravel_index(indices, shape):
# def replace_tf_svd_with_np_svd():
# def my_svd(tensor, full_matrices=False, compute_uv=True):
# def in_eager_mode():
. Output only the next line. | x_projection = riemannian.deltas_to_tangent_space(d, x, left, right) |
Given the following code snippet before the placeholder: <|code_start|>tf.compat.v1.enable_eager_execution()
class _TensorTrainBatchTest():
def testTensorIndexing(self):
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import tensorflow as tf
from t3f import initializers
from t3f import ops
and context including class names, function names, and sometimes code from other files:
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
#
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
. Output only the next line. | tens = initializers.random_tensor_batch((3, 3, 4), batch_size=3, |
Predict the next line after this snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _TensorTrainBatchTest():
def testTensorIndexing(self):
tens = initializers.random_tensor_batch((3, 3, 4), batch_size=3,
dtype=self.dtype)
<|code_end|>
using the current file's imports:
import numpy as np
import tensorflow as tf
from t3f import initializers
from t3f import ops
and any relevant context from other files:
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
#
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
. Output only the next line. | desired = ops.full(tens)[:, :, :, :] |
Next line prediction: <|code_start|>tf.compat.v1.enable_eager_execution()
class _BatchOpsTest():
def testConcatMatrix(self):
# Test concating TTMatrix batches along batch dimension.
<|code_end|>
. Use current file imports:
(import numpy as np
import tensorflow as tf
from t3f import ops
from t3f import batch_ops
from t3f import initializers)
and context including class names, function names, or small code snippets from other files:
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
#
# Path: t3f/batch_ops.py
# def concat_along_batch_dim(tt_list, name='t3f_concat_along_batch_dim'):
# def multiply_along_batch_dim(batch_tt, weights,
# name='t3f_multiply_along_batch_dim'):
# def gram_matrix(tt_vectors, matrix=None, name='t3f_gram_matrix'):
# def pairwise_flat_inner(tt_1, tt_2, matrix=None,
# name='t3f_pairwise_flat_inner'):
#
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
. Output only the next line. | first = initializers.random_matrix_batch(((2, 3), (3, 3)), batch_size=1, |
Continue the code snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _InitializersTest():
def testTensorOnesAndZeros(self):
<|code_end|>
. Use current file imports:
import numpy as np
import tensorflow as tf
from t3f import initializers
from t3f import ops
and context (classes, functions, or code) from other files:
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
#
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
. Output only the next line. | tt_ones = initializers.tensor_ones([2, 3, 4], dtype=self.dtype) |
Given the code snippet: <|code_start|>tf.compat.v1.enable_eager_execution()
class _InitializersTest():
def testTensorOnesAndZeros(self):
tt_ones = initializers.tensor_ones([2, 3, 4], dtype=self.dtype)
tt_zeros = initializers.tensor_zeros([2, 3, 4], dtype=self.dtype)
ones_desired = np.ones((2, 3, 4), dtype=self.dtype.as_numpy_dtype)
zeros_desired = np.zeros((2, 3, 4), dtype=self.dtype.as_numpy_dtype)
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import tensorflow as tf
from t3f import initializers
from t3f import ops
and context (functions, classes, or occasionally code) from other files:
# Path: t3f/initializers.py
# def _validate_input_parameters(is_tensor, shape, **params):
# def tensor_ones(shape, dtype=tf.float32, name='t3f_tensor_ones'):
# def tensor_zeros(shape, dtype=tf.float32, name='t3f_tensor_zeros'):
# def eye(shape, dtype=tf.float32, name='t3f_eye'):
# def matrix_ones(shape, dtype=tf.float32, name='t3f_matrix_ones'):
# def matrix_zeros(shape, dtype=tf.float32, name='t3f_matrix_zeros'):
# def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_tensor_with_random_cores'):
# def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_tensor_batch_with_random_cores'):
# def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32,
# name='t3f_matrix_with_random_cores'):
# def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1,
# mean=0., stddev=1., dtype=tf.float32,
# name='t3f_matrix_batch_with_random_cores'):
# def ones_like(tt, name='t3f_ones_like'):
# def zeros_like(tt, name='t3f_zeros_like'):
# def random_tensor(shape, tt_rank=2, mean=0., stddev=1., dtype=tf.float32,
# name='t3f_random_tensor'):
# def random_tensor_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_tensor_batch'):
# def random_matrix(shape, tt_rank=2, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix'):
# def random_matrix_batch(shape, tt_rank=2, batch_size=1, mean=0., stddev=1.,
# dtype=tf.float32, name='t3f_random_matrix_batch'):
# def glorot_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_glorot_initializer'):
# def he_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_he_initializer'):
# def lecun_initializer(shape, tt_rank=2, dtype=tf.float32,
# name='t3f_lecun_initializer'):
#
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
. Output only the next line. | tt_ones_full = self.evaluate(ops.full(tt_ones)) |
Predict the next line for this snippet: <|code_start|>
def l2_regularizer(scale, scope=None):
"""Returns a function that applies L2 regularization to TensorTrain weights.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l2(tt)` that applies L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %
scale)
if scale == 0.:
tf.logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l2(tt):
"""Applies l2 regularization to TensorTrain object."""
with tf.name_scope(scope, 'l2_regularizer') as name:
my_scale = tf.convert_to_tensor(scale, dtype=tt.dtype, name='scale')
<|code_end|>
with the help of current file imports:
import numbers
import tensorflow as tf
from t3f import ops
and context from other files:
# Path: t3f/ops.py
# def full(tt, name='t3f_full'):
# def _full_tt(tt):
# def _full_tt_batch(tt):
# def tt_tt_matmul(tt_matrix_a, tt_matrix_b):
# def tt_dense_matmul(tt_matrix_a, matrix_b):
# def dense_tt_matmul(matrix_a, tt_matrix_b):
# def sparse_tt_matmul(sparse_matrix_a, tt_matrix_b):
# def tt_sparse_matmul(tt_matrix_a, sparse_matrix_b):
# def matmul(a, b, name='t3f_matmul'):
# def tt_tt_flat_inner(tt_a, tt_b):
# def tt_dense_flat_inner(tt_a, dense_b):
# def tt_sparse_flat_inner(tt_a, sparse_b):
# def dense_tt_flat_inner(dense_a, tt_b):
# def sparse_tt_flat_inner(sparse_a, tt_b):
# def flat_inner(a, b, name='t3f_flat_inner'):
# def _add_tensor_cores(tt_a, tt_b):
# def _add_batch_tensor_cores(tt_a, tt_b):
# def _add_matrix_cores(tt_a, tt_b):
# def _add_batch_matrix_cores(tt_a, tt_b):
# def add(tt_a, tt_b, name='t3f_add'):
# def multiply(tt_left, right, name='t3f_multiply'):
# def frobenius_norm_squared(tt, differentiable=False,
# name='t3f_frobenius_norm_squared'):
# def frobenius_norm(tt, epsilon=1e-5, differentiable=False,
# name='t3f_frobenius_norm'):
# def transpose(tt_matrix, name='t3f_transpose'):
# def quadratic_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form(A, b, c, name='t3f_bilinear_form'):
# def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'):
# def cast(tt, dtype, name='t3f_cast'):
# def gather_nd(tt, indices, name='t3f_gather_nd'):
# def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'):
, which may contain function names, class names, or code. Output only the next line. | return tf.multiply(my_scale, ops.frobenius_norm_squared(tt), name=name) |
Given snippet: <|code_start|>The advantage over using `temci completion` directly is, that it's normally
significantly faster.
Usage:
```
temci_completion [zsh|bash]
```
This returns the location of the completion file.
"""
SUPPORTED_SHELLS = ["zsh", "bash"]
def print_help():
print("""
temci (version {}) Copyright (C) 2016 Johannes Bechberger
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
For details, see the LICENSE file in the source folder of temci.
Usage of temci_completion:
temci_completion [{}]
This will return the completion file name.
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import click
import subprocess
import subprocess
from temci.scripts.version import version
from sys import argv
from os.path import exists
and context:
# Path: temci/scripts/version.py
which might include code, classes, or functions. Output only the next line. | """.format(version, "|".join(SUPPORTED_SHELLS))) |
Given snippet: <|code_start|>"""
Tests related to the processing of settings
"""
def test_config_not_ignored():
"""
Issue "Config now seems to be ignored completely after type checking #62"
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from tests.utils import run_temci
and context:
# Path: tests/utils.py
# def run_temci(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None, raise_exc: bool = False) \
# -> Result:
# """
# Run temci with the passed arguments
#
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}
# :param expect_success: expect a zero return code
# :param misc_env: additional environment variables
# :return: result of the call
# """
# if os.getenv("TEMCI_TEST_CMD", "0") == "1":
# return run_temci_proc(args, settings, files, expect_success, misc_env=misc_env)
# return run_temci_click(args, settings, files, expect_success, misc_env=misc_env, raise_exc=raise_exc)
which might include code, classes, or functions. Output only the next line. | assert "3 single benchmarks" in run_temci("short exec ls", settings={"run": {"runs": 3}}).out |
Predict the next line for this snippet: <|code_start|> {
"attributes": {
"description": "b"
},
"run_config": {
"cmd": "true"
},
"build_config": {
"cmd": "false"
}
}
]
}, expect_success=False)
assert r.ret_code != 0
def test_per_block_runs_issue_113():
assert len(run_temci("exec bla.yaml", files={
"bla.yaml": [
{
"run_config": {"cmd": "echo nooo", "runs": 1}
}
]
}).yaml_contents["run_output.yaml"][0]["data"]["stime"]) == 1
def test_envinfo_in_result():
assert any("env_info" in v for v in run_temci("short exec ls").yaml_contents["run_output.yaml"])
<|code_end|>
with the help of current file imports:
from temci.run.run_driver import is_perf_available
from temci.scripts.cli import ErrorCode
from tests.utils import run_temci, run_temci_proc
and context from other files:
# Path: temci/run/run_driver.py
# def is_perf_available() -> bool:
# """
# Is the ``perf`` tool available?
# """
# try:
# subprocess.check_call("perf stat -x';' -e cycles,cpu-clock,task-clock,instructions,branch-misses,"
# "cache-references -- echo 1", shell=True,
# stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# except BaseException:
# return False
# return True
#
# Path: temci/scripts/cli.py
# class ErrorCode(Enum):
# NO_ERROR = 0
# PROGRAM_ERROR = 1
# TEMCI_ERROR = 255
#
# Path: tests/utils.py
# def run_temci(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None, raise_exc: bool = False) \
# -> Result:
# """
# Run temci with the passed arguments
#
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}
# :param expect_success: expect a zero return code
# :param misc_env: additional environment variables
# :return: result of the call
# """
# if os.getenv("TEMCI_TEST_CMD", "0") == "1":
# return run_temci_proc(args, settings, files, expect_success, misc_env=misc_env)
# return run_temci_click(args, settings, files, expect_success, misc_env=misc_env, raise_exc=raise_exc)
#
# def run_temci_proc(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None) \
# -> Result:
# """
# Run temci with the passed arguments
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}c at
# :param expect_success: expect a zero return code
#
# :return: result of the call
# """
# with tempfile.TemporaryDirectory() as d:
# _store_files(files, str(d))
# cmd = "python3 {}/temci/scripts/cli.py {}".format(os.path.dirname(os.path.dirname(__file__)), args)
# if settings is not None:
# with open(d + "/settings.yaml", "w") as f:
# yaml.dump(settings, f)
# cmd += " --config settings.yaml"
# env = os.environ.copy()
# env["LC_ALL"] = "en_US.utf-8"
# env.update(misc_env or {})
# proc = subprocess.Popen(["/bin/sh", "-c", cmd],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# cwd=str(d),
# env=env,
# universal_newlines=True)
# out, err = proc.communicate()
# file_contents, yaml_contents = _load_files(files, d=d)
# ret = Result(str(out).strip(), str(err).strip(), proc.returncode, file_contents, yaml_contents)
# if expect_success:
# assert proc.returncode == 0, repr(ret)
# return ret
, which may contain function names, class names, or code. Output only the next line. | if is_perf_available(): |
Given snippet: <|code_start|>def test_successful_run_errors():
d = run_temci("short exec true").yaml_contents["run_output.yaml"][0]
assert "internal_error" not in d
assert "error" not in d
def test_errorneous_run():
d = run_temci("short exec 'exit 1'", expect_success=False).yaml_contents["run_output.yaml"][0]
assert "error" in d
e = d["error"]
assert e["return_code"] == 1
def test_check_tag_attribute():
assert run_temci("exec bla.yaml --runs 1", files={
"bla.yaml": [
{
"run_config": {"cmd": "echo 1"},
"attributes": {"tags": "slow"}
}
]
}, expect_success=False).ret_code != 0
def test_included_blocks():
out = run_temci("short exec echo ls --included_blocks ls --runs 1").out
assert "ls" in out and "echo" not in out
def test_discard_blocks_on_error():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from temci.run.run_driver import is_perf_available
from temci.scripts.cli import ErrorCode
from tests.utils import run_temci, run_temci_proc
and context:
# Path: temci/run/run_driver.py
# def is_perf_available() -> bool:
# """
# Is the ``perf`` tool available?
# """
# try:
# subprocess.check_call("perf stat -x';' -e cycles,cpu-clock,task-clock,instructions,branch-misses,"
# "cache-references -- echo 1", shell=True,
# stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# except BaseException:
# return False
# return True
#
# Path: temci/scripts/cli.py
# class ErrorCode(Enum):
# NO_ERROR = 0
# PROGRAM_ERROR = 1
# TEMCI_ERROR = 255
#
# Path: tests/utils.py
# def run_temci(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None, raise_exc: bool = False) \
# -> Result:
# """
# Run temci with the passed arguments
#
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}
# :param expect_success: expect a zero return code
# :param misc_env: additional environment variables
# :return: result of the call
# """
# if os.getenv("TEMCI_TEST_CMD", "0") == "1":
# return run_temci_proc(args, settings, files, expect_success, misc_env=misc_env)
# return run_temci_click(args, settings, files, expect_success, misc_env=misc_env, raise_exc=raise_exc)
#
# def run_temci_proc(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None) \
# -> Result:
# """
# Run temci with the passed arguments
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}c at
# :param expect_success: expect a zero return code
#
# :return: result of the call
# """
# with tempfile.TemporaryDirectory() as d:
# _store_files(files, str(d))
# cmd = "python3 {}/temci/scripts/cli.py {}".format(os.path.dirname(os.path.dirname(__file__)), args)
# if settings is not None:
# with open(d + "/settings.yaml", "w") as f:
# yaml.dump(settings, f)
# cmd += " --config settings.yaml"
# env = os.environ.copy()
# env["LC_ALL"] = "en_US.utf-8"
# env.update(misc_env or {})
# proc = subprocess.Popen(["/bin/sh", "-c", cmd],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# cwd=str(d),
# env=env,
# universal_newlines=True)
# out, err = proc.communicate()
# file_contents, yaml_contents = _load_files(files, d=d)
# ret = Result(str(out).strip(), str(err).strip(), proc.returncode, file_contents, yaml_contents)
# if expect_success:
# assert proc.returncode == 0, repr(ret)
# return ret
which might include code, classes, or functions. Output only the next line. | assert run_temci("short exec 'exit 1' --discard_all_data_for_block_on_error", expect_success=False).ret_code == ErrorCode.PROGRAM_ERROR.value |
Predict the next line for this snippet: <|code_start|>
def test_errorneous_run():
d = run_temci("short exec 'exit 1'", expect_success=False).yaml_contents["run_output.yaml"][0]
assert "error" in d
e = d["error"]
assert e["return_code"] == 1
def test_check_tag_attribute():
assert run_temci("exec bla.yaml --runs 1", files={
"bla.yaml": [
{
"run_config": {"cmd": "echo 1"},
"attributes": {"tags": "slow"}
}
]
}, expect_success=False).ret_code != 0
def test_included_blocks():
out = run_temci("short exec echo ls --included_blocks ls --runs 1").out
assert "ls" in out and "echo" not in out
def test_discard_blocks_on_error():
assert run_temci("short exec 'exit 1' --discard_all_data_for_block_on_error", expect_success=False).ret_code == ErrorCode.PROGRAM_ERROR.value
def test_temci_short_shell():
<|code_end|>
with the help of current file imports:
from temci.run.run_driver import is_perf_available
from temci.scripts.cli import ErrorCode
from tests.utils import run_temci, run_temci_proc
and context from other files:
# Path: temci/run/run_driver.py
# def is_perf_available() -> bool:
# """
# Is the ``perf`` tool available?
# """
# try:
# subprocess.check_call("perf stat -x';' -e cycles,cpu-clock,task-clock,instructions,branch-misses,"
# "cache-references -- echo 1", shell=True,
# stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# except BaseException:
# return False
# return True
#
# Path: temci/scripts/cli.py
# class ErrorCode(Enum):
# NO_ERROR = 0
# PROGRAM_ERROR = 1
# TEMCI_ERROR = 255
#
# Path: tests/utils.py
# def run_temci(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None, raise_exc: bool = False) \
# -> Result:
# """
# Run temci with the passed arguments
#
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}
# :param expect_success: expect a zero return code
# :param misc_env: additional environment variables
# :return: result of the call
# """
# if os.getenv("TEMCI_TEST_CMD", "0") == "1":
# return run_temci_proc(args, settings, files, expect_success, misc_env=misc_env)
# return run_temci_click(args, settings, files, expect_success, misc_env=misc_env, raise_exc=raise_exc)
#
# def run_temci_proc(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None) \
# -> Result:
# """
# Run temci with the passed arguments
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}c at
# :param expect_success: expect a zero return code
#
# :return: result of the call
# """
# with tempfile.TemporaryDirectory() as d:
# _store_files(files, str(d))
# cmd = "python3 {}/temci/scripts/cli.py {}".format(os.path.dirname(os.path.dirname(__file__)), args)
# if settings is not None:
# with open(d + "/settings.yaml", "w") as f:
# yaml.dump(settings, f)
# cmd += " --config settings.yaml"
# env = os.environ.copy()
# env["LC_ALL"] = "en_US.utf-8"
# env.update(misc_env or {})
# proc = subprocess.Popen(["/bin/sh", "-c", cmd],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# cwd=str(d),
# env=env,
# universal_newlines=True)
# out, err = proc.communicate()
# file_contents, yaml_contents = _load_files(files, d=d)
# ret = Result(str(out).strip(), str(err).strip(), proc.returncode, file_contents, yaml_contents)
# if expect_success:
# assert proc.returncode == 0, repr(ret)
# return ret
, which may contain function names, class names, or code. Output only the next line. | assert "42" in run_temci_proc("short shell echo 42").out |
Continue the code snippet: <|code_start|>
def run():
sudo_opt_index = sys.argv.index("--sudo") if "--sudo" in sys.argv else sys.maxsize
raw_opt_index = sys.argv.index("--") if "--" in sys.argv else sys.maxsize
has_sudo_opt = sudo_opt_index != sys.maxsize and sudo_opt_index < raw_opt_index
<|code_end|>
. Use current file imports:
import os
import sys
import json, shlex
from temci.utils.util import has_root_privileges
from temci.scripts.cli import cli_with_error_catching
and context (classes, functions, or code) from other files:
# Path: temci/utils/util.py
# def has_root_privileges() -> bool:
# """
# Has the current user root privileges?
# """
# return does_command_succeed("head /proc/1/stack")
. Output only the next line. | if not has_sudo_opt or has_root_privileges(): |
Using the snippet: <|code_start|>
Number = t.Union[int, float]
""" Numeric type """
def fnumber(number: Number, rel_deviation: Number = None, abs_deviation: Number = None, is_percent: bool = False) -> str:
return FNumber(number, rel_deviation, abs_deviation, is_percent).format()
class ParenthesesMode(Enum):
DIGIT_CHANGE = "d"
ORDER_OF_MAGNITUDE = "o"
@classmethod
def map(cls, key: t.Union[str, 'ParenthesesMode']) -> 'ParenthesesMode':
if isinstance(key, ParenthesesMode):
return key
return {
"d": ParenthesesMode.DIGIT_CHANGE,
"o": ParenthesesMode.ORDER_OF_MAGNITUDE
}[key]
<|code_end|>
, determine the next line of code. You have imports:
import sys
import typing as t
import math
from enum import Enum
from temci.utils.typecheck import *
from temci.utils.util import document
from temci.utils.settings import Settings
and context (class names, function names, or code) available:
# Path: temci/utils/util.py
# def document(**kwargs: t.Dict[str, str]):
# """
# Document
#
# :param kwargs: class attribute, documentation prefix
# """
#
# def dec(klass):
# if sphinx_doc():
# for k, v in kwargs.items():
# klass.__doc__ += """
#
# {}
#
# {}
#
# """.format(v, get_doc_for_type_scheme(klass.__dict__[k]))
# return klass
#
# return dec
. Output only the next line. | @document(settings_format="Configuration format, is in the settings under report/number") |
Continue the code snippet: <|code_start|> "hint": ["true", "false"]
}
} # type: t.Dict[str, t.Any]
""" Completion hints for supported shells for this type instance """
def _instancecheck_impl(self, value, info: Info) -> InfoMsg:
res = ExactEither(True, False).__instancecheck__(value, info)
return info.errormsg_cond(bool(res), self, str(res))
def __str__(self) -> str:
return "Bool()"
def _eq_impl(self, other: 'Bool') -> bool:
return True
class ValidTimeSpan(Type, click.ParamType):
"""
A string that is parseable as timespan by pytimeparse.
E.g. "32m" or "2h 32m".
"""
name = "valid_timespan" # type: str
""" click.ParamType name, that makes this class usable as a click type """
def __init__(self):
super().__init__()
def _instancecheck_impl(self, value, info: Info) -> InfoMsg:
res = Str().__instancecheck__(value, info)
<|code_end|>
. Use current file imports:
import textwrap
import typing as t
import itertools, os, click, inspect
import yaml
from temci.utils.util import parse_timespan
and context (classes, functions, or code) from other files:
# Path: temci/utils/util.py
# def parse_timespan(time: str) -> float:
# """
# Parse a time span expression, see https://pypi.org/project/pytimeparse/
#
# Supports -1 to express an infinite time span
#
# :param time: time span expression, mixture of different time units is possible
# :return: time span in seconds
# """
# try:
# return float(time)
# except ValueError:
# return pytimeparse.parse(time) if time != "-1" else -1
. Output only the next line. | wrong = not bool(res) or parse_timespan(value) == None |
Given the following code snippet before the placeholder: <|code_start|>
return fmt.format(pval) + ["", "k", "M", "G", "T", "P"][exponents] + self.suffix()
def suffix(self) -> str:
return {self.IB: "iB", self.HZ: "Hz", self.NORMAL: ""}[self]
def format_nt(nt: t.NamedTuple, **units: Unit) -> t.List[t.Tuple[str, str]]:
"""
Format the passed named tuple, some of its properties might have
associated units
"""
ret = []
for i, f in enumerate(nt._fields):
val = nt[i]
if f in units:
val = units[f].format(val)
ret.append([f, str(val)])
return ret
class CpuInfo(t.NamedTuple):
cpu: str
cores: int
threads: int
@staticmethod
@functools.lru_cache(1)
def create() -> 'CpuInfo':
<|code_end|>
, predict the next line using imports from the current file:
import enum
import functools
import subprocess
import typing as t
import multiprocessing
import psutil
import platform
from temci.utils import util
and context including class names, function names, and sometimes code from other files:
# Path: temci/utils/util.py
# def recursive_exec_for_leafs(data: dict, func, _path_prep = []):
# def has_root_privileges() -> bool:
# def has_pdflatex() -> bool:
# def does_command_succeed(cmd: str) -> bool:
# def warn_for_pdflatex_non_existence_once(_warned = [False]):
# def get_cache_line_size(cache_level: int = None) -> t.Optional[int]:
# def get_memory_page_size() -> int:
# def get_distribution_name() -> str:
# def get_distribution_release() -> str:
# def does_program_exist(program: str) -> bool:
# def on_apple_os() -> bool:
# def __enter__(self):
# def try_wait(self, wait_flags):
# def __exit__(self, exc_type, exc_val, exc_tb):
# def join_strs(strs: t.List[str], last_word: str = "and") -> str:
# def can_import(module: str) -> bool:
# def sphinx_doc() -> bool:
# def get_doc_for_type_scheme(type_scheme: 'Type') -> str:
# def document(**kwargs: t.Dict[str, str]):
# def dec(klass):
# def __call__(cls, *args, **kwargs):
# def __init__(self):
# def __delitem__(self, key):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def __iter__(self):
# def values(self) -> t.List:
# def keys(self) -> t.List:
# def __len__(self):
# def items(self) -> t.List[t.Tuple[t.Any, t.Any]]:
# def from_list(cls, items: t.Optional[list], key_func: t.Callable[[t.Any], t.Any]) -> 'InsertionTimeOrderedDict':
# def geom_std(values: t.List[float]) -> float:
# def parse_timespan(time: str) -> float:
# def rusage_header() -> str:
# class proc_wait_with_rusage:
# class Singleton(type):
# class InsertionTimeOrderedDict:
. Output only the next line. | if util.on_apple_os(): |
Based on the snippet: <|code_start|>"""
Tests for reporters
"""
def test_console_reporter_auto_mode():
d = lambda d: {
"attributes": {"description": "XYZ" + d},
"data": {"p": [1]}
}
<|code_end|>
, predict the immediate next line with the help of imports:
import json
from tests.utils import run_temci, run_temci_proc
from temci.report.report import ReporterRegistry
and context (classes, functions, sometimes code) from other files:
# Path: tests/utils.py
# def run_temci(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None, raise_exc: bool = False) \
# -> Result:
# """
# Run temci with the passed arguments
#
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}
# :param expect_success: expect a zero return code
# :param misc_env: additional environment variables
# :return: result of the call
# """
# if os.getenv("TEMCI_TEST_CMD", "0") == "1":
# return run_temci_proc(args, settings, files, expect_success, misc_env=misc_env)
# return run_temci_click(args, settings, files, expect_success, misc_env=misc_env, raise_exc=raise_exc)
#
# def run_temci_proc(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None) \
# -> Result:
# """
# Run temci with the passed arguments
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}c at
# :param expect_success: expect a zero return code
#
# :return: result of the call
# """
# with tempfile.TemporaryDirectory() as d:
# _store_files(files, str(d))
# cmd = "python3 {}/temci/scripts/cli.py {}".format(os.path.dirname(os.path.dirname(__file__)), args)
# if settings is not None:
# with open(d + "/settings.yaml", "w") as f:
# yaml.dump(settings, f)
# cmd += " --config settings.yaml"
# env = os.environ.copy()
# env["LC_ALL"] = "en_US.utf-8"
# env.update(misc_env or {})
# proc = subprocess.Popen(["/bin/sh", "-c", cmd],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# cwd=str(d),
# env=env,
# universal_newlines=True)
# out, err = proc.communicate()
# file_contents, yaml_contents = _load_files(files, d=d)
# ret = Result(str(out).strip(), str(err).strip(), proc.returncode, file_contents, yaml_contents)
# if expect_success:
# assert proc.returncode == 0, repr(ret)
# return ret
. Output only the next line. | out = run_temci("report in.yaml --console_mode auto", |
Continue the code snippet: <|code_start|>def test_properties_regexp():
out = run_temci(r"report in.yaml --properties 'p.*'", files={
"in.yaml": [
{
"attributes": {"description": "XYZ"},
"data": {"p456": [1], "z111": [2]}
}
]
}).out
assert "p456" in out and "z111" not in out
def test_console_baseline():
run_temci(r"report in.yaml --console_baseline base", files={
"in.yaml": [
{
"attributes": {"description": "XYZ"},
"data": {"p456": [1], "z111": [2]}
},
{
"attributes": {"description": "base"},
"data": {"p456": [1], "z111": [2]}
}
]
}).out
def test_all_reporters():
for name, rep in ReporterRegistry.registry.items():
print(name)
<|code_end|>
. Use current file imports:
import json
from tests.utils import run_temci, run_temci_proc
from temci.report.report import ReporterRegistry
and context (classes, functions, or code) from other files:
# Path: tests/utils.py
# def run_temci(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None, raise_exc: bool = False) \
# -> Result:
# """
# Run temci with the passed arguments
#
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}
# :param expect_success: expect a zero return code
# :param misc_env: additional environment variables
# :return: result of the call
# """
# if os.getenv("TEMCI_TEST_CMD", "0") == "1":
# return run_temci_proc(args, settings, files, expect_success, misc_env=misc_env)
# return run_temci_click(args, settings, files, expect_success, misc_env=misc_env, raise_exc=raise_exc)
#
# def run_temci_proc(args: str, settings: dict = None, files: Dict[str, Union[dict, list, str]] = None,
# expect_success: bool = True, misc_env: Dict[str, str] = None) \
# -> Result:
# """
# Run temci with the passed arguments
# :param args: arguments for temci
# :param settings: settings dictionary, stored in a file called `settings.yaml` and appended to the arguments
# :param files: {file name: content as string or dictionary that is converted into YAML first}c at
# :param expect_success: expect a zero return code
#
# :return: result of the call
# """
# with tempfile.TemporaryDirectory() as d:
# _store_files(files, str(d))
# cmd = "python3 {}/temci/scripts/cli.py {}".format(os.path.dirname(os.path.dirname(__file__)), args)
# if settings is not None:
# with open(d + "/settings.yaml", "w") as f:
# yaml.dump(settings, f)
# cmd += " --config settings.yaml"
# env = os.environ.copy()
# env["LC_ALL"] = "en_US.utf-8"
# env.update(misc_env or {})
# proc = subprocess.Popen(["/bin/sh", "-c", cmd],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
# cwd=str(d),
# env=env,
# universal_newlines=True)
# out, err = proc.communicate()
# file_contents, yaml_contents = _load_files(files, d=d)
# ret = Result(str(out).strip(), str(err).strip(), proc.returncode, file_contents, yaml_contents)
# if expect_success:
# assert proc.returncode == 0, repr(ret)
# return ret
. Output only the next line. | run_temci_proc("report --reporter {} in.yaml".format(name), files={ |
Based on the snippet: <|code_start|> self.keep_alive = True
# This is a subset of {UPGRADE, CONNECT}, containing the proposals
# made by the client for switching protocols.
self.pending_switch_proposals: Set[Type[Sentinel]] = set()
self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE}
def process_error(self, role: Type[Sentinel]) -> None:
self.states[role] = ERROR
self._fire_state_triggered_transitions()
def process_keep_alive_disabled(self) -> None:
self.keep_alive = False
self._fire_state_triggered_transitions()
def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None:
self.pending_switch_proposals.add(switch_event)
self._fire_state_triggered_transitions()
def process_event(
self,
role: Type[Sentinel],
event_type: Type[Event],
server_switch_event: Optional[Type[Sentinel]] = None,
) -> None:
_event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type
if server_switch_event is not None:
assert role is SERVER
if server_switch_event not in self.pending_switch_proposals:
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import cast, Dict, Optional, Set, Tuple, Type, Union
from ._events import *
from ._util import LocalProtocolError, Sentinel
and context (classes, functions, sometimes code) from other files:
# Path: h11/_util.py
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# class Sentinel(type):
# def __new__(
# cls: Type[_T_Sentinel],
# name: str,
# bases: Tuple[type, ...],
# namespace: Dict[str, Any],
# **kwds: Any
# ) -> _T_Sentinel:
# assert bases == (Sentinel,)
# v = super().__new__(cls, name, bases, namespace, **kwds)
# v.__class__ = v # type: ignore
# return v
#
# def __repr__(self) -> str:
# return self.__name__
. Output only the next line. | raise LocalProtocolError( |
Based on the snippet: <|code_start|>#
# It'd be nice if there were some cleaner way to do all this. This isn't
# *too* terrible, but I feel like it could probably be better.
#
# WARNING
# -------
#
# The script that generates the state machine diagrams for the docs knows how
# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS
# tables. But it can't automatically read the transitions that are written
# directly in Python code. So if you touch those, you need to also update the
# script to keep it in sync!
# Everything in __all__ gets re-exported as part of the h11 public API.
__all__ = [
"CLIENT",
"SERVER",
"IDLE",
"SEND_RESPONSE",
"SEND_BODY",
"DONE",
"MUST_CLOSE",
"CLOSED",
"MIGHT_SWITCH_PROTOCOL",
"SWITCHED_PROTOCOL",
"ERROR",
]
<|code_end|>
, predict the immediate next line with the help of imports:
from typing import cast, Dict, Optional, Set, Tuple, Type, Union
from ._events import *
from ._util import LocalProtocolError, Sentinel
and context (classes, functions, sometimes code) from other files:
# Path: h11/_util.py
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# class Sentinel(type):
# def __new__(
# cls: Type[_T_Sentinel],
# name: str,
# bases: Tuple[type, ...],
# namespace: Dict[str, Any],
# **kwds: Any
# ) -> _T_Sentinel:
# assert bases == (Sentinel,)
# v = super().__new__(cls, name, bases, namespace, **kwds)
# v.__class__ = v # type: ignore
# return v
#
# def __repr__(self) -> str:
# return self.__name__
. Output only the next line. | class CLIENT(Sentinel, metaclass=Sentinel): |
Based on the snippet: <|code_start|># High level events that make up HTTP/1.1 conversations. Loosely inspired by
# the corresponding events in hyper-h2:
#
# http://python-hyper.org/h2/en/stable/api.html#events
#
# Don't subclass these. Stuff will break.
# Everything in __all__ gets re-exported as part of the h11 public API.
__all__ = [
"Event",
"Request",
"InformationalResponse",
"Response",
"Data",
"EndOfMessage",
"ConnectionClosed",
]
<|code_end|>
, predict the immediate next line with the help of imports:
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
and context (classes, functions, sometimes code) from other files:
# Path: h11/_abnf.py
# OWS = r"[ \t]*"
# HEXDIG = r"[0-9A-Fa-f]"
#
# Path: h11/_headers.py
# class Headers(Sequence[Tuple[bytes, bytes]]):
# """
# A list-like interface that allows iterating over headers as byte-pairs
# of (lowercased-name, value).
#
# Internally we actually store the representation as three-tuples,
# including both the raw original casing, in order to preserve casing
# over-the-wire, and the lowercased name, for case-insensitive comparisions.
#
# r = Request(
# method="GET",
# target="/",
# headers=[("Host", "example.org"), ("Connection", "keep-alive")],
# http_version="1.1",
# )
# assert r.headers == [
# (b"host", b"example.org"),
# (b"connection", b"keep-alive")
# ]
# assert r.headers.raw_items() == [
# (b"Host", b"example.org"),
# (b"Connection", b"keep-alive")
# ]
# """
#
# __slots__ = "_full_items"
#
# def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
# self._full_items = full_items
#
# def __bool__(self) -> bool:
# return bool(self._full_items)
#
# def __eq__(self, other: object) -> bool:
# return list(self) == list(other) # type: ignore
#
# def __len__(self) -> int:
# return len(self._full_items)
#
# def __repr__(self) -> str:
# return "<Headers(%s)>" % repr(list(self))
#
# def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
# _, name, value = self._full_items[idx]
# return (name, value)
#
# def raw_items(self) -> List[Tuple[bytes, bytes]]:
# return [(raw_name, value) for raw_name, _, value in self._full_items]
#
# @overload
# def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
# ...
#
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
. Output only the next line. | method_re = re.compile(method.encode("ascii")) |
Based on the snippet: <|code_start|># High level events that make up HTTP/1.1 conversations. Loosely inspired by
# the corresponding events in hyper-h2:
#
# http://python-hyper.org/h2/en/stable/api.html#events
#
# Don't subclass these. Stuff will break.
# Everything in __all__ gets re-exported as part of the h11 public API.
__all__ = [
"Event",
"Request",
"InformationalResponse",
"Response",
"Data",
"EndOfMessage",
"ConnectionClosed",
]
method_re = re.compile(method.encode("ascii"))
<|code_end|>
, predict the immediate next line with the help of imports:
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
and context (classes, functions, sometimes code) from other files:
# Path: h11/_abnf.py
# OWS = r"[ \t]*"
# HEXDIG = r"[0-9A-Fa-f]"
#
# Path: h11/_headers.py
# class Headers(Sequence[Tuple[bytes, bytes]]):
# """
# A list-like interface that allows iterating over headers as byte-pairs
# of (lowercased-name, value).
#
# Internally we actually store the representation as three-tuples,
# including both the raw original casing, in order to preserve casing
# over-the-wire, and the lowercased name, for case-insensitive comparisions.
#
# r = Request(
# method="GET",
# target="/",
# headers=[("Host", "example.org"), ("Connection", "keep-alive")],
# http_version="1.1",
# )
# assert r.headers == [
# (b"host", b"example.org"),
# (b"connection", b"keep-alive")
# ]
# assert r.headers.raw_items() == [
# (b"Host", b"example.org"),
# (b"Connection", b"keep-alive")
# ]
# """
#
# __slots__ = "_full_items"
#
# def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
# self._full_items = full_items
#
# def __bool__(self) -> bool:
# return bool(self._full_items)
#
# def __eq__(self, other: object) -> bool:
# return list(self) == list(other) # type: ignore
#
# def __len__(self) -> int:
# return len(self._full_items)
#
# def __repr__(self) -> str:
# return "<Headers(%s)>" % repr(list(self))
#
# def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
# _, name, value = self._full_items[idx]
# return (name, value)
#
# def raw_items(self) -> List[Tuple[bytes, bytes]]:
# return [(raw_name, value) for raw_name, _, value in self._full_items]
#
# @overload
# def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
# ...
#
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
. Output only the next line. | request_target_re = re.compile(request_target.encode("ascii")) |
Given snippet: <|code_start|> An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte
string. :term:`Bytes-like objects <bytes-like object>` and native
strings containing only ascii characters will be automatically
converted to byte strings.
.. attribute:: target
The target of an HTTP request, e.g. ``b"/index.html"``, or one of the
more exotic formats described in `RFC 7320, section 5.3
<https://tools.ietf.org/html/rfc7230#section-5.3>`_. Always a byte
string. :term:`Bytes-like objects <bytes-like object>` and native
strings containing only ascii characters will be automatically
converted to byte strings.
.. attribute:: headers
Request headers, represented as a list of (name, value) pairs. See
:ref:`the header normalization rules <headers-format>` for details.
.. attribute:: http_version
The HTTP protocol version, represented as a byte string like
``b"1.1"``. See :ref:`the HTTP version normalization rules
<http_version-format>` for details.
"""
__slots__ = ("method", "headers", "target", "http_version")
method: bytes
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
and context:
# Path: h11/_abnf.py
# OWS = r"[ \t]*"
# HEXDIG = r"[0-9A-Fa-f]"
#
# Path: h11/_headers.py
# class Headers(Sequence[Tuple[bytes, bytes]]):
# """
# A list-like interface that allows iterating over headers as byte-pairs
# of (lowercased-name, value).
#
# Internally we actually store the representation as three-tuples,
# including both the raw original casing, in order to preserve casing
# over-the-wire, and the lowercased name, for case-insensitive comparisions.
#
# r = Request(
# method="GET",
# target="/",
# headers=[("Host", "example.org"), ("Connection", "keep-alive")],
# http_version="1.1",
# )
# assert r.headers == [
# (b"host", b"example.org"),
# (b"connection", b"keep-alive")
# ]
# assert r.headers.raw_items() == [
# (b"Host", b"example.org"),
# (b"Connection", b"keep-alive")
# ]
# """
#
# __slots__ = "_full_items"
#
# def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
# self._full_items = full_items
#
# def __bool__(self) -> bool:
# return bool(self._full_items)
#
# def __eq__(self, other: object) -> bool:
# return list(self) == list(other) # type: ignore
#
# def __len__(self) -> int:
# return len(self._full_items)
#
# def __repr__(self) -> str:
# return "<Headers(%s)>" % repr(list(self))
#
# def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
# _, name, value = self._full_items[idx]
# return (name, value)
#
# def raw_items(self) -> List[Tuple[bytes, bytes]]:
# return [(raw_name, value) for raw_name, _, value in self._full_items]
#
# @overload
# def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
# ...
#
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
which might include code, classes, or functions. Output only the next line. | headers: Headers |
Based on the snippet: <|code_start|>
.. attribute:: http_version
The HTTP protocol version, represented as a byte string like
``b"1.1"``. See :ref:`the HTTP version normalization rules
<http_version-format>` for details.
"""
__slots__ = ("method", "headers", "target", "http_version")
method: bytes
headers: Headers
target: bytes
http_version: bytes
def __init__(
self,
*,
method: Union[bytes, str],
headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
target: Union[bytes, str],
http_version: Union[bytes, str] = b"1.1",
_parsed: bool = False,
) -> None:
super().__init__()
if isinstance(headers, Headers):
object.__setattr__(self, "headers", headers)
else:
object.__setattr__(
<|code_end|>
, predict the immediate next line with the help of imports:
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
and context (classes, functions, sometimes code) from other files:
# Path: h11/_abnf.py
# OWS = r"[ \t]*"
# HEXDIG = r"[0-9A-Fa-f]"
#
# Path: h11/_headers.py
# class Headers(Sequence[Tuple[bytes, bytes]]):
# """
# A list-like interface that allows iterating over headers as byte-pairs
# of (lowercased-name, value).
#
# Internally we actually store the representation as three-tuples,
# including both the raw original casing, in order to preserve casing
# over-the-wire, and the lowercased name, for case-insensitive comparisions.
#
# r = Request(
# method="GET",
# target="/",
# headers=[("Host", "example.org"), ("Connection", "keep-alive")],
# http_version="1.1",
# )
# assert r.headers == [
# (b"host", b"example.org"),
# (b"connection", b"keep-alive")
# ]
# assert r.headers.raw_items() == [
# (b"Host", b"example.org"),
# (b"Connection", b"keep-alive")
# ]
# """
#
# __slots__ = "_full_items"
#
# def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
# self._full_items = full_items
#
# def __bool__(self) -> bool:
# return bool(self._full_items)
#
# def __eq__(self, other: object) -> bool:
# return list(self) == list(other) # type: ignore
#
# def __len__(self) -> int:
# return len(self._full_items)
#
# def __repr__(self) -> str:
# return "<Headers(%s)>" % repr(list(self))
#
# def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
# _, name, value = self._full_items[idx]
# return (name, value)
#
# def raw_items(self) -> List[Tuple[bytes, bytes]]:
# return [(raw_name, value) for raw_name, _, value in self._full_items]
#
# @overload
# def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
# ...
#
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
. Output only the next line. | self, "headers", normalize_and_validate(headers, _parsed=_parsed) |
Given the following code snippet before the placeholder: <|code_start|> The HTTP protocol version, represented as a byte string like
``b"1.1"``. See :ref:`the HTTP version normalization rules
<http_version-format>` for details.
"""
__slots__ = ("method", "headers", "target", "http_version")
method: bytes
headers: Headers
target: bytes
http_version: bytes
def __init__(
self,
*,
method: Union[bytes, str],
headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
target: Union[bytes, str],
http_version: Union[bytes, str] = b"1.1",
_parsed: bool = False,
) -> None:
super().__init__()
if isinstance(headers, Headers):
object.__setattr__(self, "headers", headers)
else:
object.__setattr__(
self, "headers", normalize_and_validate(headers, _parsed=_parsed)
)
if not _parsed:
<|code_end|>
, predict the next line using imports from the current file:
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
and context including class names, function names, and sometimes code from other files:
# Path: h11/_abnf.py
# OWS = r"[ \t]*"
# HEXDIG = r"[0-9A-Fa-f]"
#
# Path: h11/_headers.py
# class Headers(Sequence[Tuple[bytes, bytes]]):
# """
# A list-like interface that allows iterating over headers as byte-pairs
# of (lowercased-name, value).
#
# Internally we actually store the representation as three-tuples,
# including both the raw original casing, in order to preserve casing
# over-the-wire, and the lowercased name, for case-insensitive comparisions.
#
# r = Request(
# method="GET",
# target="/",
# headers=[("Host", "example.org"), ("Connection", "keep-alive")],
# http_version="1.1",
# )
# assert r.headers == [
# (b"host", b"example.org"),
# (b"connection", b"keep-alive")
# ]
# assert r.headers.raw_items() == [
# (b"Host", b"example.org"),
# (b"Connection", b"keep-alive")
# ]
# """
#
# __slots__ = "_full_items"
#
# def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
# self._full_items = full_items
#
# def __bool__(self) -> bool:
# return bool(self._full_items)
#
# def __eq__(self, other: object) -> bool:
# return list(self) == list(other) # type: ignore
#
# def __len__(self) -> int:
# return len(self._full_items)
#
# def __repr__(self) -> str:
# return "<Headers(%s)>" % repr(list(self))
#
# def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
# _, name, value = self._full_items[idx]
# return (name, value)
#
# def raw_items(self) -> List[Tuple[bytes, bytes]]:
# return [(raw_name, value) for raw_name, _, value in self._full_items]
#
# @overload
# def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
# ...
#
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
. Output only the next line. | object.__setattr__(self, "method", bytesify(method)) |
Given the following code snippet before the placeholder: <|code_start|> target: Union[bytes, str],
http_version: Union[bytes, str] = b"1.1",
_parsed: bool = False,
) -> None:
super().__init__()
if isinstance(headers, Headers):
object.__setattr__(self, "headers", headers)
else:
object.__setattr__(
self, "headers", normalize_and_validate(headers, _parsed=_parsed)
)
if not _parsed:
object.__setattr__(self, "method", bytesify(method))
object.__setattr__(self, "target", bytesify(target))
object.__setattr__(self, "http_version", bytesify(http_version))
else:
object.__setattr__(self, "method", method)
object.__setattr__(self, "target", target)
object.__setattr__(self, "http_version", http_version)
# "A server MUST respond with a 400 (Bad Request) status code to any
# HTTP/1.1 request message that lacks a Host header field and to any
# request message that contains more than one Host header field or a
# Host header field with an invalid field-value."
# -- https://tools.ietf.org/html/rfc7230#section-5.4
host_count = 0
for name, value in self.headers:
if name == b"host":
host_count += 1
if self.http_version == b"1.1" and host_count == 0:
<|code_end|>
, predict the next line using imports from the current file:
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
and context including class names, function names, and sometimes code from other files:
# Path: h11/_abnf.py
# OWS = r"[ \t]*"
# HEXDIG = r"[0-9A-Fa-f]"
#
# Path: h11/_headers.py
# class Headers(Sequence[Tuple[bytes, bytes]]):
# """
# A list-like interface that allows iterating over headers as byte-pairs
# of (lowercased-name, value).
#
# Internally we actually store the representation as three-tuples,
# including both the raw original casing, in order to preserve casing
# over-the-wire, and the lowercased name, for case-insensitive comparisions.
#
# r = Request(
# method="GET",
# target="/",
# headers=[("Host", "example.org"), ("Connection", "keep-alive")],
# http_version="1.1",
# )
# assert r.headers == [
# (b"host", b"example.org"),
# (b"connection", b"keep-alive")
# ]
# assert r.headers.raw_items() == [
# (b"Host", b"example.org"),
# (b"Connection", b"keep-alive")
# ]
# """
#
# __slots__ = "_full_items"
#
# def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
# self._full_items = full_items
#
# def __bool__(self) -> bool:
# return bool(self._full_items)
#
# def __eq__(self, other: object) -> bool:
# return list(self) == list(other) # type: ignore
#
# def __len__(self) -> int:
# return len(self._full_items)
#
# def __repr__(self) -> str:
# return "<Headers(%s)>" % repr(list(self))
#
# def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
# _, name, value = self._full_items[idx]
# return (name, value)
#
# def raw_items(self) -> List[Tuple[bytes, bytes]]:
# return [(raw_name, value) for raw_name, _, value in self._full_items]
#
# @overload
# def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
# ...
#
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
. Output only the next line. | raise LocalProtocolError("Missing mandatory Host: header") |
Given snippet: <|code_start|> super().__init__()
if isinstance(headers, Headers):
object.__setattr__(self, "headers", headers)
else:
object.__setattr__(
self, "headers", normalize_and_validate(headers, _parsed=_parsed)
)
if not _parsed:
object.__setattr__(self, "method", bytesify(method))
object.__setattr__(self, "target", bytesify(target))
object.__setattr__(self, "http_version", bytesify(http_version))
else:
object.__setattr__(self, "method", method)
object.__setattr__(self, "target", target)
object.__setattr__(self, "http_version", http_version)
# "A server MUST respond with a 400 (Bad Request) status code to any
# HTTP/1.1 request message that lacks a Host header field and to any
# request message that contains more than one Host header field or a
# Host header field with an invalid field-value."
# -- https://tools.ietf.org/html/rfc7230#section-5.4
host_count = 0
for name, value in self.headers:
if name == b"host":
host_count += 1
if self.http_version == b"1.1" and host_count == 0:
raise LocalProtocolError("Missing mandatory Host: header")
if host_count > 1:
raise LocalProtocolError("Found multiple Host: headers")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from abc import ABC
from dataclasses import dataclass, field
from typing import Any, cast, Dict, List, Tuple, Union
from ._abnf import method, request_target
from ._headers import Headers, normalize_and_validate
from ._util import bytesify, LocalProtocolError, validate
and context:
# Path: h11/_abnf.py
# OWS = r"[ \t]*"
# HEXDIG = r"[0-9A-Fa-f]"
#
# Path: h11/_headers.py
# class Headers(Sequence[Tuple[bytes, bytes]]):
# """
# A list-like interface that allows iterating over headers as byte-pairs
# of (lowercased-name, value).
#
# Internally we actually store the representation as three-tuples,
# including both the raw original casing, in order to preserve casing
# over-the-wire, and the lowercased name, for case-insensitive comparisions.
#
# r = Request(
# method="GET",
# target="/",
# headers=[("Host", "example.org"), ("Connection", "keep-alive")],
# http_version="1.1",
# )
# assert r.headers == [
# (b"host", b"example.org"),
# (b"connection", b"keep-alive")
# ]
# assert r.headers.raw_items() == [
# (b"Host", b"example.org"),
# (b"Connection", b"keep-alive")
# ]
# """
#
# __slots__ = "_full_items"
#
# def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
# self._full_items = full_items
#
# def __bool__(self) -> bool:
# return bool(self._full_items)
#
# def __eq__(self, other: object) -> bool:
# return list(self) == list(other) # type: ignore
#
# def __len__(self) -> int:
# return len(self._full_items)
#
# def __repr__(self) -> str:
# return "<Headers(%s)>" % repr(list(self))
#
# def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
# _, name, value = self._full_items[idx]
# return (name, value)
#
# def raw_items(self) -> List[Tuple[bytes, bytes]]:
# return [(raw_name, value) for raw_name, _, value in self._full_items]
#
# @overload
# def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
# ...
#
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
which might include code, classes, or functions. Output only the next line. | validate(method_re, self.method, "Illegal method characters") |
Based on the snippet: <|code_start|>
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops {}")
assert "oops {}" in str(excinfo.value)
with pytest.raises(LocalProtocolError) as excinfo:
validate(my_re, b"", "oops {} xx", 10)
assert "oops 10 xx" in str(excinfo.value)
def test_make_sentinel() -> None:
class S(Sentinel, metaclass=Sentinel):
pass
assert repr(S) == "S"
assert S == S
assert type(S).__name__ == "S"
assert S in {S}
assert type(S) is S
class S2(Sentinel, metaclass=Sentinel):
pass
assert repr(S2) == "S2"
assert S != S2
assert S not in {S2}
assert type(S) is not type(S2)
def test_bytesify() -> None:
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import sys
import traceback
import pytest
from typing import NoReturn
from .._util import (
bytesify,
LocalProtocolError,
ProtocolError,
RemoteProtocolError,
Sentinel,
validate,
)
and context (classes, functions, sometimes code) from other files:
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# class ProtocolError(Exception):
# """Exception indicating a violation of the HTTP/1.1 protocol.
#
# This as an abstract base class, with two concrete base classes:
# :exc:`LocalProtocolError`, which indicates that you tried to do something
# that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
# indicates that the remote peer tried to do something that HTTP/1.1 says is
# illegal. See :ref:`error-handling` for details.
#
# In addition to the normal :exc:`Exception` features, it has one attribute:
#
# .. attribute:: error_status_hint
#
# This gives a suggestion as to what status code a server might use if
# this error occurred as part of a request.
#
# For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
# how you might want to respond to a misbehaving peer, if you're
# implementing a server.
#
# For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
# how your peer might have responded to *you* if h11 had allowed you to
# continue.
#
# The default is 400 Bad Request, a generic catch-all for protocol
# violations.
#
# """
#
# def __init__(self, msg: str, error_status_hint: int = 400) -> None:
# if type(self) is ProtocolError:
# raise TypeError("tried to directly instantiate ProtocolError")
# Exception.__init__(self, msg)
# self.error_status_hint = error_status_hint
#
# class RemoteProtocolError(ProtocolError):
# pass
#
# class Sentinel(type):
# def __new__(
# cls: Type[_T_Sentinel],
# name: str,
# bases: Tuple[type, ...],
# namespace: Dict[str, Any],
# **kwds: Any
# ) -> _T_Sentinel:
# assert bases == (Sentinel,)
# v = super().__new__(cls, name, bases, namespace, **kwds)
# v.__class__ = v # type: ignore
# return v
#
# def __repr__(self) -> str:
# return self.__name__
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
. Output only the next line. | assert bytesify(b"123") == b"123" |
Predict the next line for this snippet: <|code_start|>
def test_ProtocolError() -> None:
with pytest.raises(TypeError):
ProtocolError("abstract base class")
def test_LocalProtocolError() -> None:
try:
<|code_end|>
with the help of current file imports:
import re
import sys
import traceback
import pytest
from typing import NoReturn
from .._util import (
bytesify,
LocalProtocolError,
ProtocolError,
RemoteProtocolError,
Sentinel,
validate,
)
and context from other files:
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# class ProtocolError(Exception):
# """Exception indicating a violation of the HTTP/1.1 protocol.
#
# This as an abstract base class, with two concrete base classes:
# :exc:`LocalProtocolError`, which indicates that you tried to do something
# that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
# indicates that the remote peer tried to do something that HTTP/1.1 says is
# illegal. See :ref:`error-handling` for details.
#
# In addition to the normal :exc:`Exception` features, it has one attribute:
#
# .. attribute:: error_status_hint
#
# This gives a suggestion as to what status code a server might use if
# this error occurred as part of a request.
#
# For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
# how you might want to respond to a misbehaving peer, if you're
# implementing a server.
#
# For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
# how your peer might have responded to *you* if h11 had allowed you to
# continue.
#
# The default is 400 Bad Request, a generic catch-all for protocol
# violations.
#
# """
#
# def __init__(self, msg: str, error_status_hint: int = 400) -> None:
# if type(self) is ProtocolError:
# raise TypeError("tried to directly instantiate ProtocolError")
# Exception.__init__(self, msg)
# self.error_status_hint = error_status_hint
#
# class RemoteProtocolError(ProtocolError):
# pass
#
# class Sentinel(type):
# def __new__(
# cls: Type[_T_Sentinel],
# name: str,
# bases: Tuple[type, ...],
# namespace: Dict[str, Any],
# **kwds: Any
# ) -> _T_Sentinel:
# assert bases == (Sentinel,)
# v = super().__new__(cls, name, bases, namespace, **kwds)
# v.__class__ = v # type: ignore
# return v
#
# def __repr__(self) -> str:
# return self.__name__
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
, which may contain function names, class names, or code. Output only the next line. | raise LocalProtocolError("foo") |
Based on the snippet: <|code_start|>
def test_ProtocolError() -> None:
with pytest.raises(TypeError):
ProtocolError("abstract base class")
def test_LocalProtocolError() -> None:
try:
raise LocalProtocolError("foo")
except LocalProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 400
try:
raise LocalProtocolError("foo", error_status_hint=418)
except LocalProtocolError as e:
assert str(e) == "foo"
assert e.error_status_hint == 418
def thunk() -> NoReturn:
raise LocalProtocolError("a", error_status_hint=420)
try:
try:
thunk()
except LocalProtocolError as exc1:
orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
exc1._reraise_as_remote_protocol_error()
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import sys
import traceback
import pytest
from typing import NoReturn
from .._util import (
bytesify,
LocalProtocolError,
ProtocolError,
RemoteProtocolError,
Sentinel,
validate,
)
and context (classes, functions, sometimes code) from other files:
# Path: h11/_util.py
# def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
# # Fast-path:
# if type(s) is bytes:
# return s
# if isinstance(s, str):
# s = s.encode("ascii")
# if isinstance(s, int):
# raise TypeError("expected bytes-like object, not int")
# return bytes(s)
#
# class LocalProtocolError(ProtocolError):
# def _reraise_as_remote_protocol_error(self) -> NoReturn:
# # After catching a LocalProtocolError, use this method to re-raise it
# # as a RemoteProtocolError. This method must be called from inside an
# # except: block.
# #
# # An easy way to get an equivalent RemoteProtocolError is just to
# # modify 'self' in place.
# self.__class__ = RemoteProtocolError # type: ignore
# # But the re-raising is somewhat non-trivial -- you might think that
# # now that we've modified the in-flight exception object, that just
# # doing 'raise' to re-raise it would be enough. But it turns out that
# # this doesn't work, because Python tracks the exception type
# # (exc_info[0]) separately from the exception object (exc_info[1]),
# # and we only modified the latter. So we really do need to re-raise
# # the new type explicitly.
# # On py3, the traceback is part of the exception object, so our
# # in-place modification preserved it and we can just re-raise:
# raise self
#
# class ProtocolError(Exception):
# """Exception indicating a violation of the HTTP/1.1 protocol.
#
# This as an abstract base class, with two concrete base classes:
# :exc:`LocalProtocolError`, which indicates that you tried to do something
# that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
# indicates that the remote peer tried to do something that HTTP/1.1 says is
# illegal. See :ref:`error-handling` for details.
#
# In addition to the normal :exc:`Exception` features, it has one attribute:
#
# .. attribute:: error_status_hint
#
# This gives a suggestion as to what status code a server might use if
# this error occurred as part of a request.
#
# For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
# how you might want to respond to a misbehaving peer, if you're
# implementing a server.
#
# For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
# how your peer might have responded to *you* if h11 had allowed you to
# continue.
#
# The default is 400 Bad Request, a generic catch-all for protocol
# violations.
#
# """
#
# def __init__(self, msg: str, error_status_hint: int = 400) -> None:
# if type(self) is ProtocolError:
# raise TypeError("tried to directly instantiate ProtocolError")
# Exception.__init__(self, msg)
# self.error_status_hint = error_status_hint
#
# class RemoteProtocolError(ProtocolError):
# pass
#
# class Sentinel(type):
# def __new__(
# cls: Type[_T_Sentinel],
# name: str,
# bases: Tuple[type, ...],
# namespace: Dict[str, Any],
# **kwds: Any
# ) -> _T_Sentinel:
# assert bases == (Sentinel,)
# v = super().__new__(cls, name, bases, namespace, **kwds)
# v.__class__ = v # type: ignore
# return v
#
# def __repr__(self) -> str:
# return self.__name__
#
# def validate(
# regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
# ) -> Dict[str, bytes]:
# match = regex.fullmatch(data)
# if not match:
# if format_args:
# msg = msg.format(*format_args)
# raise LocalProtocolError(msg)
# return match.groupdict()
. Output only the next line. | except RemoteProtocolError as exc2: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.