Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Continue the code snippet: <|code_start|>
class NewBrowserPerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser()
@classmethod
def tearDownClass(cls):
del cls.browser
def test_duckduckgo(self):
<|code_end|>
. Use current file imports:
import unittest
from xpathwebdriver.browser import Browser
and context (classes, functions, or code) from other files:
# Path: xpathwebdriver/browser.py
# class Browser(XpathBrowser):
# '''
# Use this class when using only one browser at a time
# and you don't require complex browser leveling.
#
# For multiple browsers at the same time check examples/04_mutiple_browsers.py
# '''
# def __init__(self, base_url=None, logger=None, settings=None, context_name='default'):
# if settings:
# register_settings_instance(settings)
# else:
# settings = solve_settings()
# self._browser_context = WebdriverManager().get_browser(context_name=context_name)
# XpathBrowser.__init__(self, self._browser_context.acquire_driver(),
# base_url=base_url, logger=logger, settings=settings)
#
# def __del__(self):
# self._free_webdriver()
#
# def _free_webdriver(self):
# # We may get an exception before setting _browser_context
# if getattr(self, '_browser_context', None):
# self._browser_context.__exit__()
# self._browser_context = None
#
# def _quit_failed_webdriver(self):
# failed = WebdriverManager()._is_failed_webdriver(self.driver)
# if failed:
# self._free_webdriver()
# return failed
. Output only the next line. | self.browser.get_url('https://duckduckgo.com/') |
Using the snippet: <|code_start|>
class DuckDuckTest(unittest.TestCase):
def test_duckduckgo(self):
with get_browser() as browser:
driver = browser.driver
driver.get('https://duckduckgo.com')
element = driver.find_element_by_id('search_form_input_homepage')
element.send_keys('Example')
if __name__ == "__main__":
<|code_end|>
, determine the next line of code. You have imports:
import unittest
from xpathwebdriver.webdriver_manager import get_browser
and context (class names, function names, or code) available:
# Path: xpathwebdriver/webdriver_manager.py
# @synchronized(_methods_lock)
# def get_browser(self, context_name='default', browser=None):
# '''
# :param name: optional context's name (mainly for logging purposes)
# :param browser: optional browser name string (eg: 'Firefox', 'Chrome', 'PhantomJs')
# '''
# self._current_context_level += 1
# level = self._current_context_level
# self.init_level(level, browser, context_name)
# return BrowserContextManager(self, level, context_name=context_name, browser_name=browser)
. Output only the next line. | unittest.main() |
Given snippet: <|code_start|>
class TestXpathBrowserWipeAlerts(WebUnitTestBase):
def test_wipe_alerts(self):
body = '''
<script type="text/javascript">
alert('Example alert');
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from xpathwebdriver_tests.test_XpathBrowser import WebUnitTestBase
from selenium.common.exceptions import UnexpectedAlertPresentException
import unittest
and context:
# Path: xpathwebdriver_tests/test_XpathBrowser.py
# class WebUnitTestBase(unittest.TestCase):
#
# port = 8080
# host = 'localhost'
# @classmethod
# def _path_to_url(cls, path):
# return f'http://{cls.host}:{cls.port}/{path}'
#
# @classmethod
# def get_local_page(cls, path):
# cls.browser.get_url(cls._path_to_url(path))
#
# @contextmanager
# def create_html(self, name, body, **kwargs):
# try:
# self.push_page(name, body, **kwargs)
# yield name
# except:
# raise
# finally:
# self.pop_page(name)
#
# def push_page(self, name, body, **kwargs):
# templ = '''
# <!DOCTYPE html>
# <html>
# <head>
# {jquery}
# <title>{name}</title>
# </head>
# <body>
# {body}
# </body>
# </html>
# '''
# jquery = ''
# tmpl_vars = locals().copy()
# tmpl_vars.update(kwargs)
# self._pages_cache[name] = templ.format(**tmpl_vars)
#
# def pop_page(self, name):
# return self._pages_cache.pop(name)
#
# @classmethod
# def setUpClass(cls):
# class Settings(DefaultSettings):
# xpathbrowser_sleep_multiplier = 0.1
# xpathbrowser_sleep_default_time = 0.1
# register_settings_instance(Settings())
# cls.browser = Browser(settings=solve_settings())
# cls._pages_cache = {}
# cls.setup_http_server()
#
# @classmethod
# def setup_http_server(cls):
# class MyServer(bottle.WSGIRefServer):
# def run(self, app): # pragma: no cover
# from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
# from wsgiref.simple_server import make_server
# import socket
# class FixedHandler(WSGIRequestHandler):
# def address_string(self): # Prevent reverse DNS lookups please.
# return self.client_address[0]
# def log_request(*args, **kw):
# if not self.quiet:
# return WSGIRequestHandler.log_request(*args, **kw)
# handler_cls = self.options.get('handler_class', FixedHandler)
# server_cls = self.options.get('server_class', WSGIServer)
# if ':' in self.host: # Fix wsgiref for IPv6 addresses.
# if getattr(server_cls, 'address_family') == socket.AF_INET:
# class server_cls(server_cls):
# address_family = socket.AF_INET6
# srv = make_server(self.host, self.port, app, server_cls, handler_cls)
# ### save tcp server so we can shut it down later
# cls._tcp_server = srv
# srv.serve_forever()
# @bottle.route('/<name>')
# def index(name):
# if name == 'kill':
# raise SystemExit()
# if name in cls._pages_cache:
# return bottle.template(cls._pages_cache[name])
# return None
#
# kwargs = dict(server=MyServer, host=cls.host, port=cls.port)
# thread = threading.Thread(target=bottle.run, kwargs=kwargs)
# thread.start()
# cls._server_thread = thread
#
# @classmethod
# def tearDownClass(cls):
# del cls.browser
# cls._tcp_server.shutdown()
which might include code, classes, or functions. Output only the next line. | </script> |
Based on the snippet: <|code_start|> delta = float(max_wait) / float(parts)
for try_num in range(1, parts + 1):
loaded = condtn(self)
if loaded:
self.log.d('Condition "%s" is True.' % condition)
break
if try_num < parts:
self.log.d('Waiting condition "%s" to be True.' % condition)
time.sleep(delta * try_num)
# If condition was not satisfied print debug message
if not loaded:
self.log.d('Page took too long to load. Increase max_wait parameter'
' or modify object\'s "_max_wait" attribute.')
# Return whether condition was satisfied
return loaded
def _get_xpath_script(self, xpath, single=True):
'''
Get Javascript code for getting single or multiple nodes from webdriver
page's DOM.
Returns web element, attribute or text depending o the xpath specified.
:param xpath: xpath to build the script from
:param single: select only a single node
'''
script_single = '''
var xpath = %(xpath)r;
//XPathResult.FIRST_ORDERED_NODE_TYPE = 9
var FIRST_ORDERED_NODE_TYPE = 9;
var element = document.evaluate(xpath, document, null, FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
<|code_end|>
, predict the immediate next line with the help of imports:
import rel_imp; rel_imp.init()
import sys
import time
import os
import parsel
from functools import wraps
from contextlib import contextmanager
from urllib.parse import urlparse, urlunparse, urljoin, parse_qsl, unquote_plus
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import WebDriverException,\
TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from .logger import Logger
from .validators import is_valid_netloc
and context (classes, functions, sometimes code) from other files:
# Path: xpathwebdriver/logger.py
# class Logger(object):
# default_level = logging.INFO
# handler_level = logging.DEBUG
# default_fmt = '%(levelname)s %(asctime)s: %(message)s'
# default_datefmt = '%H:%M:%S'
#
# def __init__(self, name=None, output=None, level=None, color=False):
# if not level:
# level = self.default_level
# if not output:
# if not name:
# output = logging.getLogger()
# else:
# output = logging.getLogger(name)
# self.output = output
# self.color = color
# if not logging.root.handlers:
# self._config_handler()
# self.set_fmt()
# self.setLevel(level)
#
# def _config_handler(self):
# if not self.output.handlers:
# if self.color:
# hdlr = ColorStreamHandler()
# else:
# hdlr = logging.StreamHandler()
# hdlr.setLevel(self.handler_level)
# self.output.addHandler(hdlr)
#
# def set_fmt(self, fmt=None, datefmt=None):
# datefmt = datefmt or self.default_datefmt
# fmt = fmt or self.default_fmt
# hdlr = self.output.handlers[0]
# fmt = logging.Formatter(fmt=fmt,
# datefmt=datefmt
# )
# hdlr.setFormatter(fmt)
#
# def set_pre_post(self, pre='', post=''):
# if pre:
# pre = '[%s] ' % pre.strip()
# self.set_fmt(fmt=pre + self.default_fmt + post)
#
# def _log(self, method, msg, args):
# getattr(self.output, method)(msg, *args)
#
# def critical(self, msg, *args):
# self._log('critical', msg, args)
#
# def error(self, msg, *args):
# self._log('error', msg, args)
#
# def warning(self, msg, *args):
# self._log('warning', msg, args)
#
# def info(self, msg, *args):
# self._log('info', msg, args)
#
# def debug(self, msg, *args):
# self._log('debug', msg, args)
#
# def exception(self, msg, *args):
# self._log('exception', msg, args)
#
# def c(self, msg, *args):
# self.critical(msg, *args)
#
# def e(self, msg, *args):
# self.error(msg, *args)
#
# def exc(self, msg, *args):
# self.exception(msg, *args)
#
# def w(self, msg, *args):
# self.warning(msg, *args)
#
# def i(self, msg, *args):
# self.info(msg, *args)
#
# def d(self, msg, *args):
# self.debug(msg, *args)
#
# def verbose(self, msg):
# self.output.debug(str(msg))
#
# def v(self, msg, *args):
# self.debug(msg, *args)
#
# def printFilePath(self, file_path, line=None, error=False):
# if error:
# out = self.e
# else:
# out = self.d
# if not line:
# line = 1
# msg = ' File "%s", line %d\n' % (file_path, line)
# out(msg)
#
# def setLevel(self, level):
# if hasattr(self.output, 'setLevel'):
# self.output.setLevel(level)
# else:
# self.w('Cannot set logging level')
#
# def __call__(self, msg):
# self.info(msg)
. Output only the next line. | return extract_element(element); |
Continue the code snippet: <|code_start|>
class NewBrowserPerTest(unittest.TestCase):
def setUp(self):
self.browser = Browser()
def tearDown(self):
<|code_end|>
. Use current file imports:
import unittest
from xpathwebdriver.browser import Browser
and context (classes, functions, or code) from other files:
# Path: xpathwebdriver/browser.py
# class Browser(XpathBrowser):
# '''
# Use this class when using only one browser at a time
# and you don't require complex browser leveling.
#
# For multiple browsers at the same time check examples/04_mutiple_browsers.py
# '''
# def __init__(self, base_url=None, logger=None, settings=None, context_name='default'):
# if settings:
# register_settings_instance(settings)
# else:
# settings = solve_settings()
# self._browser_context = WebdriverManager().get_browser(context_name=context_name)
# XpathBrowser.__init__(self, self._browser_context.acquire_driver(),
# base_url=base_url, logger=logger, settings=settings)
#
# def __del__(self):
# self._free_webdriver()
#
# def _free_webdriver(self):
# # We may get an exception before setting _browser_context
# if getattr(self, '_browser_context', None):
# self._browser_context.__exit__()
# self._browser_context = None
#
# def _quit_failed_webdriver(self):
# failed = WebdriverManager()._is_failed_webdriver(self.driver)
# if failed:
# self._free_webdriver()
# return failed
. Output only the next line. | del self.browser |
Given the following code snippet before the placeholder: <|code_start|>
class DuckDuckTest(unittest.TestCase):
def test_duckduckgo(self):
#Use WebdriverManager for more than 1 browser
with get_browser('First', 'Chrome') as browser:
browser.get_url('https://duckduckgo.com/')
browser.fill(".//*[@id='search_form_input_homepage']", 'xpathwebdriver\n')
with get_browser('Second', 'Firefox') as browser2:
browser2.get_url('https://google.com/')
if __name__ == "__main__":
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from xpathwebdriver.webdriver_manager import get_browser
and context including class names, function names, and sometimes code from other files:
# Path: xpathwebdriver/webdriver_manager.py
# @synchronized(_methods_lock)
# def get_browser(self, context_name='default', browser=None):
# '''
# :param name: optional context's name (mainly for logging purposes)
# :param browser: optional browser name string (eg: 'Firefox', 'Chrome', 'PhantomJs')
# '''
# self._current_context_level += 1
# level = self._current_context_level
# self.init_level(level, browser, context_name)
# return BrowserContextManager(self, level, context_name=context_name, browser_name=browser)
. Output only the next line. | unittest.main() |
Predict the next line for this snippet: <|code_start|>
class LoadCompletionsTest(unittest.TestCase):
def setUp(self):
self.completion_index = CompletionIndex()
# This would probably be cleaner with a pytest.fixture like
# test_completions.index_data
DATA = (
'{"aws": '
'{"commands": ["devicefarm", "foo"], '
'"arguments": ["--debug", "--endpoint-url"], '
'"children": {"devicefarm": '
'{"commands": ["create-device-pool"], '
'"children": {"create-device-pool": '
'{"commands": [], '
'"arguments": ["--project-arn", "--name"]}}}, '
'"foo": '
'{"commands": ["bar"], '
'"children": {"bar": '
'{"commands": [], "arguments": ["--baz"]}}}}}}'
)
self.completion_index.load_index = lambda x: DATA
self.completion_index.load_completions()
def test_load_completions(self):
assert self.completion_index.commands == [
'devicefarm', 'foo']
assert self.completion_index.subcommands == [
<|code_end|>
with the help of current file imports:
import unittest
from awsshell.index.completion import CompletionIndex
and context from other files:
# Path: awsshell/index/completion.py
# class CompletionIndex(object):
# """Handles working with the local commmand completion index.
#
# :type commands: list
# :param commands: ec2, s3, elb...
#
# :type subcommands: list
# :param subcommands: start-instances, stop-instances, terminate-instances...
#
# :type global_opts: list
# :param global_opts: --profile, --region, --output...
#
# :type args_opts: set, to filter out duplicates
# :param args_opts: ec2 start-instances: --instance-ids, --dry-run...
# """
#
# # The completion index can read/write to a cache dir
# # so that it doesn't have to recompute the completion cache
# # every time the CLI starts up.
# DEFAULT_CACHE_DIR = build_config_file_path('cache')
#
# def __init__(self, cache_dir=DEFAULT_CACHE_DIR, fslayer=None):
# self._cache_dir = cache_dir
# if fslayer is None:
# fslayer = FSLayer()
# self._fslayer = fslayer
# self.commands = []
# self.subcommands = []
# self.global_opts = []
# self.args_opts = set()
#
# def load_index(self, version_string):
# """Load the completion index for a given CLI version.
#
# :type version_string: str
# :param version_string: The AWS CLI version, e.g "1.9.2".
#
# :raises: :class:`IndexLoadError <exceptions.IndexLoadError>`
# """
# filename = self._filename_for_version(version_string)
# try:
# contents = self._fslayer.file_contents(filename)
# except FileReadError as e:
# raise IndexLoadError(str(e))
# return contents
#
# def _filename_for_version(self, version_string):
# return os.path.join(
# self._cache_dir, 'completions-%s.json' % version_string)
#
# def load_completions(self):
# """Load completions from the completion index.
#
# Updates the following attributes:
# * commands
# * subcommands
# * global_opts
# * args_opts
# """
# try:
# index_str = self.load_index(utils.AWSCLI_VERSION)
# except IndexLoadError:
# return
# index_str = self.load_index(utils.AWSCLI_VERSION)
# index_data = json.loads(index_str)
# index_root = index_data['aws']
# # ec2, s3, elb...
# self.commands = index_root['commands']
# # --profile, --region, --output...
# self.global_opts = index_root['arguments']
# for command in self.commands:
# # ec2: start-instances, stop-instances, terminate-instances...
# subcommands_current = index_root['children'] \
# .get(command)['commands']
# self.subcommands.extend(subcommands_current)
# for subcommand_current in subcommands_current:
# # start-instances: --instance-ids, --dry-run...
# args_opts_current = index_root['children'] \
# .get(command)['children'] \
# .get(subcommand_current)['arguments']
# self.args_opts.update(args_opts_current)
, which may contain function names, class names, or code. Output only the next line. | 'create-device-pool', 'bar'] |
Based on the snippet: <|code_start|># Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
@pytest.mark.parametrize("search,corpus,expected", [
('foo', ['foobar', 'foobaz'], ['foobar', 'foobaz']),
('f', ['foo', 'foobar', 'bar'], ['foo', 'foobar']),
('z', ['foo', 'foobar', 'bar'], []),
])
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from awsshell.substring import substring_search
and context (classes, functions, sometimes code) from other files:
# Path: awsshell/substring.py
# def substring_search(word, collection):
# """Find all matches in the `collection` for the specified `word`.
#
# If `word` is empty, returns all items in `collection`.
#
# :type word: str
# :param word: The substring to search for.
#
# :type collection: collection, usually a list
# :param collection: A collection of words to match.
#
# :rtype: list of strings
# :return: A sorted list of matching words from collection.
# """
# return [item for item in sorted(collection) if item.startswith(word)]
. Output only the next line. | def test_subsequences(search, corpus, expected): |
Using the snippet: <|code_start|>
def test_prints_error_message_on_unknown_dot_command(errstream):
handler = app.DotCommandHandler(err=errstream)
handler.handle_cmd(".unknown foo bar", None)
assert errstream.getvalue() == "Unknown dot command: .unknown\n"
def test_delegates_to_complete_changing_profile():
completer = mock.Mock(spec=shellcomplete.AWSShellCompleter)
shell = app.AWSShell(completer, mock.Mock(), mock.Mock())
shell.profile = 'mynewprofile'
assert completer.change_profile.call_args == mock.call('mynewprofile')
assert shell.profile == 'mynewprofile'
def test_cd_handler_can_chdir():
chdir = mock.Mock()
handler = app.ChangeDirHandler(chdir=chdir)
handler.run(['.cd', 'foo/bar'], None)
assert chdir.call_args == mock.call('foo/bar')
def test_chdir_syntax_error_prints_err_msg(errstream):
chdir = mock.Mock()
handler = app.ChangeDirHandler(err=errstream, chdir=chdir)
handler.run(['.cd'], None)
assert 'invalid syntax' in errstream.getvalue()
assert not chdir.called
<|code_end|>
, determine the next line of code. You have imports:
import pytest
import mock
from awsshell import app
from awsshell import shellcomplete
from awsshell import compat
and context (class names, function names, or code) available:
# Path: awsshell/app.py
# LOG = logging.getLogger(__name__)
# EXIT_REQUESTED = object()
# USAGE = (
# '.profile # Print the current profile\n'
# '.profile <name> # Change the current profile\n'
# )
# HANDLER_CLASSES = {
# 'edit': EditHandler,
# 'profile': ProfileHandler,
# 'cd': ChangeDirHandler,
# 'exit': ExitHandler,
# 'quit': ExitHandler,
# }
# def create_aws_shell(completer, model_completer, docs):
# def __init__(self, output=sys.stdout, err=sys.stderr, chdir=os.chdir):
# def run(self, command, application):
# def __init__(self, popen_cls=None, env=None, err=sys.stderr):
# def _get_editor_command(self):
# def _generate_edit_history(self, application):
# def run(self, command, application):
# def __init__(self, output=sys.stdout, err=sys.stderr):
# def run(self, command, application):
# def run(self, command, application):
# def __init__(self, output=sys.stdout, err=sys.stderr):
# def handle_cmd(self, command, application):
# def _unknown_cmd(self, cmd_parts, application):
# def __init__(self, completer, model_completer, docs,
# input=None, output=None, popen_cls=None):
# def load_config(self):
# def save_config(self):
# def cli(self):
# def run(self):
# def stop_input_and_refresh_cli(self):
# def create_layout(self, display_completions_in_columns, toolbar):
# def create_buffer(self, completer, history):
# def create_key_manager(self):
# def set_match_fuzzy(match_fuzzy):
# def set_enable_vi_bindings(enable_vi_bindings):
# def set_show_completion_columns(show_completion_columns):
# def set_show_help(show_help):
# def create_application(self, completer, history,
# display_completions_in_columns):
# def on_input_timeout(self, cli):
# def create_cli_interface(self, display_completions_in_columns):
# def profile(self):
# def profile(self, new_profile_name):
# class InputInterrupt(Exception):
# class ChangeDirHandler(object):
# class EditHandler(object):
# class ProfileHandler(object):
# class ExitHandler(object):
# class DotCommandHandler(object):
# class AWSShell(object):
#
# Path: awsshell/shellcomplete.py
# LOG = logging.getLogger(__name__)
# class AWSShellCompleter(Completer):
# def __init__(self, completer, server_side_completer=None):
# def _create_server_side_completer(self, session=None):
# def change_profile(self, profile_name):
# def completer(self):
# def completer(self, value):
# def last_option(self):
# def current_command(self):
# def _convert_to_prompt_completions(self, low_level_completions,
# text_before_cursor):
# def get_completions(self, document, complete_event):
#
# Path: awsshell/compat.py
# PY3 = sys.version_info[0] == 3
# ON_WINDOWS = platform.system() == 'Windows'
# def default_editor():
# def default_editor():
. Output only the next line. | def test_error_displayed_when_chdir_fails(errstream): |
Based on the snippet: <|code_start|>def test_error_displayed_when_chdir_fails(errstream):
chdir = mock.Mock()
chdir.side_effect = OSError("FAILED")
handler = app.ChangeDirHandler(err=errstream, chdir=chdir)
handler.run(['.cd', 'foo'], None)
assert 'FAILED' in errstream.getvalue()
def test_history_stored_correctly():
mock_prompter = mock.Mock()
mock_prompter.buffers = {'clidocs': mock.Mock()}
# Simulate the user entering various commands
quit_document = mock.Mock()
quit_document.text = '.quit'
command_document = mock.Mock()
command_document.text = 'ec2 describe-instances'
mock_prompter.run.side_effect = [command_document, quit_document]
shell = app.AWSShell(mock.Mock(), mock.Mock(), mock.Mock(),
popen_cls=mock.Mock())
shell.create_cli_interface = mock.Mock(return_value=mock_prompter)
shell.run()
# two calls should have been made, history should have added aws
assert mock_prompter.run.call_count == 2
assert list(shell.history) == ['aws ec2 describe-instances']
def test_exit_dot_command_exits_shell():
mock_prompter = mock.Mock()
# Simulate the user entering '.quit'
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
import mock
from awsshell import app
from awsshell import shellcomplete
from awsshell import compat
and context (classes, functions, sometimes code) from other files:
# Path: awsshell/app.py
# LOG = logging.getLogger(__name__)
# EXIT_REQUESTED = object()
# USAGE = (
# '.profile # Print the current profile\n'
# '.profile <name> # Change the current profile\n'
# )
# HANDLER_CLASSES = {
# 'edit': EditHandler,
# 'profile': ProfileHandler,
# 'cd': ChangeDirHandler,
# 'exit': ExitHandler,
# 'quit': ExitHandler,
# }
# def create_aws_shell(completer, model_completer, docs):
# def __init__(self, output=sys.stdout, err=sys.stderr, chdir=os.chdir):
# def run(self, command, application):
# def __init__(self, popen_cls=None, env=None, err=sys.stderr):
# def _get_editor_command(self):
# def _generate_edit_history(self, application):
# def run(self, command, application):
# def __init__(self, output=sys.stdout, err=sys.stderr):
# def run(self, command, application):
# def run(self, command, application):
# def __init__(self, output=sys.stdout, err=sys.stderr):
# def handle_cmd(self, command, application):
# def _unknown_cmd(self, cmd_parts, application):
# def __init__(self, completer, model_completer, docs,
# input=None, output=None, popen_cls=None):
# def load_config(self):
# def save_config(self):
# def cli(self):
# def run(self):
# def stop_input_and_refresh_cli(self):
# def create_layout(self, display_completions_in_columns, toolbar):
# def create_buffer(self, completer, history):
# def create_key_manager(self):
# def set_match_fuzzy(match_fuzzy):
# def set_enable_vi_bindings(enable_vi_bindings):
# def set_show_completion_columns(show_completion_columns):
# def set_show_help(show_help):
# def create_application(self, completer, history,
# display_completions_in_columns):
# def on_input_timeout(self, cli):
# def create_cli_interface(self, display_completions_in_columns):
# def profile(self):
# def profile(self, new_profile_name):
# class InputInterrupt(Exception):
# class ChangeDirHandler(object):
# class EditHandler(object):
# class ProfileHandler(object):
# class ExitHandler(object):
# class DotCommandHandler(object):
# class AWSShell(object):
#
# Path: awsshell/shellcomplete.py
# LOG = logging.getLogger(__name__)
# class AWSShellCompleter(Completer):
# def __init__(self, completer, server_side_completer=None):
# def _create_server_side_completer(self, session=None):
# def change_profile(self, profile_name):
# def completer(self):
# def completer(self, value):
# def last_option(self):
# def current_command(self):
# def _convert_to_prompt_completions(self, low_level_completions,
# text_before_cursor):
# def get_completions(self, document, complete_event):
#
# Path: awsshell/compat.py
# PY3 = sys.version_info[0] == 3
# ON_WINDOWS = platform.system() == 'Windows'
# def default_editor():
# def default_editor():
. Output only the next line. | fake_document = mock.Mock() |
Next line prediction: <|code_start|> # We don't really care about the exact usage message here,
# we just want to ensure usage was written to stderr.
assert 'Usage' in stderr.getvalue()
def test_prints_error_message_on_unknown_dot_command(errstream):
handler = app.DotCommandHandler(err=errstream)
handler.handle_cmd(".unknown foo bar", None)
assert errstream.getvalue() == "Unknown dot command: .unknown\n"
def test_delegates_to_complete_changing_profile():
completer = mock.Mock(spec=shellcomplete.AWSShellCompleter)
shell = app.AWSShell(completer, mock.Mock(), mock.Mock())
shell.profile = 'mynewprofile'
assert completer.change_profile.call_args == mock.call('mynewprofile')
assert shell.profile == 'mynewprofile'
def test_cd_handler_can_chdir():
chdir = mock.Mock()
handler = app.ChangeDirHandler(chdir=chdir)
handler.run(['.cd', 'foo/bar'], None)
assert chdir.call_args == mock.call('foo/bar')
def test_chdir_syntax_error_prints_err_msg(errstream):
chdir = mock.Mock()
handler = app.ChangeDirHandler(err=errstream, chdir=chdir)
handler.run(['.cd'], None)
<|code_end|>
. Use current file imports:
(import pytest
import mock
from awsshell import app
from awsshell import shellcomplete
from awsshell import compat)
and context including class names, function names, or small code snippets from other files:
# Path: awsshell/app.py
# LOG = logging.getLogger(__name__)
# EXIT_REQUESTED = object()
# USAGE = (
# '.profile # Print the current profile\n'
# '.profile <name> # Change the current profile\n'
# )
# HANDLER_CLASSES = {
# 'edit': EditHandler,
# 'profile': ProfileHandler,
# 'cd': ChangeDirHandler,
# 'exit': ExitHandler,
# 'quit': ExitHandler,
# }
# def create_aws_shell(completer, model_completer, docs):
# def __init__(self, output=sys.stdout, err=sys.stderr, chdir=os.chdir):
# def run(self, command, application):
# def __init__(self, popen_cls=None, env=None, err=sys.stderr):
# def _get_editor_command(self):
# def _generate_edit_history(self, application):
# def run(self, command, application):
# def __init__(self, output=sys.stdout, err=sys.stderr):
# def run(self, command, application):
# def run(self, command, application):
# def __init__(self, output=sys.stdout, err=sys.stderr):
# def handle_cmd(self, command, application):
# def _unknown_cmd(self, cmd_parts, application):
# def __init__(self, completer, model_completer, docs,
# input=None, output=None, popen_cls=None):
# def load_config(self):
# def save_config(self):
# def cli(self):
# def run(self):
# def stop_input_and_refresh_cli(self):
# def create_layout(self, display_completions_in_columns, toolbar):
# def create_buffer(self, completer, history):
# def create_key_manager(self):
# def set_match_fuzzy(match_fuzzy):
# def set_enable_vi_bindings(enable_vi_bindings):
# def set_show_completion_columns(show_completion_columns):
# def set_show_help(show_help):
# def create_application(self, completer, history,
# display_completions_in_columns):
# def on_input_timeout(self, cli):
# def create_cli_interface(self, display_completions_in_columns):
# def profile(self):
# def profile(self, new_profile_name):
# class InputInterrupt(Exception):
# class ChangeDirHandler(object):
# class EditHandler(object):
# class ProfileHandler(object):
# class ExitHandler(object):
# class DotCommandHandler(object):
# class AWSShell(object):
#
# Path: awsshell/shellcomplete.py
# LOG = logging.getLogger(__name__)
# class AWSShellCompleter(Completer):
# def __init__(self, completer, server_side_completer=None):
# def _create_server_side_completer(self, session=None):
# def change_profile(self, profile_name):
# def completer(self):
# def completer(self, value):
# def last_option(self):
# def current_command(self):
# def _convert_to_prompt_completions(self, low_level_completions,
# text_before_cursor):
# def get_completions(self, document, complete_event):
#
# Path: awsshell/compat.py
# PY3 = sys.version_info[0] == 3
# ON_WINDOWS = platform.system() == 'Windows'
# def default_editor():
# def default_editor():
. Output only the next line. | assert 'invalid syntax' in errstream.getvalue() |
Given the following code snippet before the placeholder: <|code_start|>
# This is borrowed from prompt_toolkit because we actually
# need to mess with the layouts to get documentation pulled up.
def create_default_layout(app, message='',
lexer=None, is_password=False,
reserve_space_for_menu=False,
<|code_end|>
, predict the next line using imports from the current file:
from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER
from prompt_toolkit.filters import IsDone, HasFocus, Always, \
RendererHeightIsKnown, to_cli_filter, Filter
from prompt_toolkit.layout import Window, HSplit, VSplit, FloatContainer, Float
from prompt_toolkit.layout.containers import ConditionalContainer
from prompt_toolkit.layout.controls import BufferControl, \
TokenListControl, FillControl
from prompt_toolkit.layout.dimension import LayoutDimension
from prompt_toolkit.layout.menus import CompletionsMenu, \
MultiColumnCompletionsMenu
from prompt_toolkit.layout.processors import PasswordProcessor, \
HighlightSearchProcessor, HighlightSelectionProcessor, \
ConditionalProcessor, AppendAutoSuggestion
from prompt_toolkit.layout.prompt import DefaultPrompt
from prompt_toolkit.layout.screen import Char
from prompt_toolkit.layout.toolbars import ValidationToolbar, \
SystemToolbar, ArgToolbar, SearchToolbar
from prompt_toolkit.layout.utils import explode_tokens
from prompt_toolkit.layout.lexers import PygmentsLexer
from pygments.token import Token
from pygments.lexer import Lexer
from awsshell.compat import text_type
and context including class names, function names, and sometimes code from other files:
# Path: awsshell/compat.py
# PY3 = sys.version_info[0] == 3
# ON_WINDOWS = platform.system() == 'Windows'
# def default_editor():
# def default_editor():
. Output only the next line. | get_prompt_tokens=None, |
Predict the next line for this snippet: <|code_start|>
@pytest.fixture
def shell_db(tmpdir):
filename = tmpdir.join('docs.db').strpath
d = db.ConcurrentDBM.create(filename)
return d
def test_can_get_and_set_value(shell_db):
shell_db['foo'] = 'bar'
assert shell_db['foo'] == 'bar'
def test_raise_key_error_when_no_key_exists(shell_db):
with pytest.raises(KeyError) as e:
shell_db['foo']
assert 'foo' in str(e.value)
def test_can_set_multiple_values(shell_db):
shell_db['foo'] = 'a'
shell_db['bar'] = 'b'
assert shell_db['foo'] == 'a'
assert shell_db['bar'] == 'b'
def test_can_change_existing_value(shell_db):
shell_db['foo'] = 'first'
<|code_end|>
with the help of current file imports:
from awsshell import db
import pytest
and context from other files:
# Path: awsshell/db.py
# class ConcurrentDBM(object):
# def open(cls, filename, create=False):
# def create(cls, filename):
# def __init__(self, db):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def close(self):
, which may contain function names, class names, or code. Output only the next line. | shell_db['foo'] = 'second' |
Given snippet: <|code_start|>
class JSONIndexLoader(object):
def __init__(self):
pass
@staticmethod
def index_filename(version_string, type_name='completions'):
return build_config_file_path(
'%s-%s.json' % (version_string, type_name))
def load_index(self, filename):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
from awsshell.utils import build_config_file_path
and context:
# Path: awsshell/utils.py
# def build_config_file_path(file_name):
# return os.path.join(os.path.expanduser('~'), '.aws', 'shell', file_name)
which might include code, classes, or functions. Output only the next line. | with open(filename, 'r') as f: |
Using the snippet: <|code_start|>from __future__ import unicode_literals
def load_lazy_doc_index(filename):
d = load_doc_db(filename)
<|code_end|>
, determine the next line of code. You have imports:
from awsshell import db
and context (class names, function names, or code) available:
# Path: awsshell/db.py
# class ConcurrentDBM(object):
# def open(cls, filename, create=False):
# def create(cls, filename):
# def __init__(self, db):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def close(self):
. Output only the next line. | return DocRetriever(d) |
Given the following code snippet before the placeholder: <|code_start|>
dataset_id = '42'
round_id = 1
results = list_key_store('dataset:%s:rounds' % dataset_id)
for r in results:
if r['round_id'] == round_id:
# print(r)
# reinject this round in another dataset experiment
msg_search = {'dataset_id': '47', 'round_id': 107, 'solution': r['solution'], 'level': 1,
<|code_end|>
, predict the next line using imports from the current file:
from automlk.store import get_key_store, list_key_store, lpush_key_store
from automlk.config import *
and context including class names, function names, and sometimes code from other files:
# Path: automlk/store.py
# def get_key_store(key):
# """
# retieves value from key in store
#
# :param key: key of the data
# :return: value of the data
# """
# if get_use_redis():
# return json.loads(rds.get(str(key)))
# else:
# try:
# if exists_key_store(key):
# return json.load(open(store_folder + '/' + __clean_key(key) + '.json', 'r'))
# else:
# return None
# except:
# return None
#
# def list_key_store(key):
# """
# returns the complete list of values
#
# :param key: key of the data
# :return: list
# """
# if get_use_redis():
# return [json.loads(x) for x in rds.lrange(key, 0, -1)]
# else:
# l = get_key_store(key)
# if l != None:
# return l
# else:
# return []
#
# def lpush_key_store(key, value):
# """
# add value to the beginning of a list of key in store
#
# :param key: key of the data
# :return: value of the data
# :return: None
# """
# if get_use_redis():
# rds.lpush(str(key), json.dumps(value))
# else:
# if exists_key_store(key):
# l = get_key_store(key)
# if isinstance(l, list):
# set_key_store(key, [value] + l)
# else:
# set_key_store(key, [value])
# else:
# set_key_store(key, [value])
. Output only the next line. | 'ensemble_depth': 0, 'model_name': r['model_name'], 'model_params': r['model_params'], 'pipeline': r['pipeline'], |
Using the snippet: <|code_start|>
dataset_id = '42'
round_id = 1
results = list_key_store('dataset:%s:rounds' % dataset_id)
for r in results:
if r['round_id'] == round_id:
# print(r)
# reinject this round in another dataset experiment
msg_search = {'dataset_id': '47', 'round_id': 107, 'solution': r['solution'], 'level': 1,
'ensemble_depth': 0, 'model_name': r['model_name'], 'model_params': r['model_params'], 'pipeline': r['pipeline'],
'threshold': 0, 'time_limit': 10000}
<|code_end|>
, determine the next line of code. You have imports:
from automlk.store import get_key_store, list_key_store, lpush_key_store
from automlk.config import *
and context (class names, function names, or code) available:
# Path: automlk/store.py
# def get_key_store(key):
# """
# retieves value from key in store
#
# :param key: key of the data
# :return: value of the data
# """
# if get_use_redis():
# return json.loads(rds.get(str(key)))
# else:
# try:
# if exists_key_store(key):
# return json.load(open(store_folder + '/' + __clean_key(key) + '.json', 'r'))
# else:
# return None
# except:
# return None
#
# def list_key_store(key):
# """
# returns the complete list of values
#
# :param key: key of the data
# :return: list
# """
# if get_use_redis():
# return [json.loads(x) for x in rds.lrange(key, 0, -1)]
# else:
# l = get_key_store(key)
# if l != None:
# return l
# else:
# return []
#
# def lpush_key_store(key, value):
# """
# add value to the beginning of a list of key in store
#
# :param key: key of the data
# :return: value of the data
# :return: None
# """
# if get_use_redis():
# rds.lpush(str(key), json.dumps(value))
# else:
# if exists_key_store(key):
# l = get_key_store(key)
# if isinstance(l, list):
# set_key_store(key, [value] + l)
# else:
# set_key_store(key, [value])
# else:
# set_key_store(key, [value])
. Output only the next line. | print('sending %s' % msg_search) |
Based on the snippet: <|code_start|>
matplotlib.use('Agg')
log = logging.getLogger(__name__)
try:
import_wordcloud = True
except:
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pylab as plt
import seaborn.apionly as sns
import itertools
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from .config import METRIC_NULL
from .context import get_dataset_folder
from wordcloud import WordCloud
and context (classes, functions, sometimes code) from other files:
# Path: automlk/config.py
# METRIC_NULL = 1e7
#
# Path: automlk/context.py
# def get_dataset_folder(dataset_id):
# """
# storage folder of the dataset
# :param dataset_id: id of the dataset
# :return: folder path (root of the structure)
# """
# return get_data_folder() + '/%s' % dataset_id
. Output only the next line. | import_wordcloud = False |
Using the snippet: <|code_start|>
matplotlib.use('Agg')
log = logging.getLogger(__name__)
try:
import_wordcloud = True
except:
import_wordcloud = False
<|code_end|>
, determine the next line of code. You have imports:
import logging
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pylab as plt
import seaborn.apionly as sns
import itertools
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from .config import METRIC_NULL
from .context import get_dataset_folder
from wordcloud import WordCloud
and context (class names, function names, or code) available:
# Path: automlk/config.py
# METRIC_NULL = 1e7
#
# Path: automlk/context.py
# def get_dataset_folder(dataset_id):
# """
# storage folder of the dataset
# :param dataset_id: id of the dataset
# :return: folder path (root of the structure)
# """
# return get_data_folder() + '/%s' % dataset_id
. Output only the next line. | TRANSPARENT = False |
Given the following code snippet before the placeholder: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s [%(module)s %(lineno)3d] %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/worker_text.log'),
logging.StreamHandler()
])
logging.info('starting worker text')
while True:
try:
worker_text_loop()
<|code_end|>
, predict the next line using imports from the current file:
import logging
import sys
import os
from automlk.worker_text import worker_text_loop
from automlk.context import get_data_folder
and context including class names, function names, and sometimes code from other files:
# Path: automlk/worker_text.py
# def worker_text_loop():
# """
# periodically pool the receiver queue for a search job
#
# :return:
# """
# while True:
# heart_beep('worker_text', '')
# # check the list of datasets
# for ts in get_textset_list():
# if ts.status != 'completed':
# heart_beep('worker_text', {'textset_id': ts.textset_id, 'textset_name': ts.name})
# log.info('searching textset %s' % ts.textset_id)
#
# # read textset
# textset = get_textset(ts.textset_id)
# set_key_store('textset:%s:status' % ts.textset_id, 'searching')
#
# # import text
# with open(textset.filename, 'r') as f:
# text = f.readlines()
#
# # calculate models
# for conf in space_textset_bow:
# pickle.dump(model_bow(text, conf), open(text_model_filename(ts.textset_id, 'bow', conf), 'wb'))
#
# for conf in space_textset_w2v:
# pickle.dump(model_word2vec(text, conf), open(text_model_filename(ts.textset_id, 'w2v', conf), 'wb'))
#
# for conf in space_textset_d2v:
# pickle.dump(model_doc2vec(text, conf), open(text_model_filename(ts.textset_id, 'd2v', conf), 'wb'))
#
# # update status to completed
# set_key_store('textset:%s:status' % ts.textset_id, 'completed')
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | except Exception as e: |
Continue the code snippet: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s [%(module)s %(lineno)3d] %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/worker_text.log'),
logging.StreamHandler()
])
logging.info('starting worker text')
while True:
<|code_end|>
. Use current file imports:
import logging
import sys
import os
from automlk.worker_text import worker_text_loop
from automlk.context import get_data_folder
and context (classes, functions, or code) from other files:
# Path: automlk/worker_text.py
# def worker_text_loop():
# """
# periodically pool the receiver queue for a search job
#
# :return:
# """
# while True:
# heart_beep('worker_text', '')
# # check the list of datasets
# for ts in get_textset_list():
# if ts.status != 'completed':
# heart_beep('worker_text', {'textset_id': ts.textset_id, 'textset_name': ts.name})
# log.info('searching textset %s' % ts.textset_id)
#
# # read textset
# textset = get_textset(ts.textset_id)
# set_key_store('textset:%s:status' % ts.textset_id, 'searching')
#
# # import text
# with open(textset.filename, 'r') as f:
# text = f.readlines()
#
# # calculate models
# for conf in space_textset_bow:
# pickle.dump(model_bow(text, conf), open(text_model_filename(ts.textset_id, 'bow', conf), 'wb'))
#
# for conf in space_textset_w2v:
# pickle.dump(model_word2vec(text, conf), open(text_model_filename(ts.textset_id, 'w2v', conf), 'wb'))
#
# for conf in space_textset_d2v:
# pickle.dump(model_doc2vec(text, conf), open(text_model_filename(ts.textset_id, 'd2v', conf), 'wb'))
#
# # update status to completed
# set_key_store('textset:%s:status' % ts.textset_id, 'completed')
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | try: |
Given snippet: <|code_start|> url=form.url.data,
filename=form.filename.data)
return redirect('/textset_list')
# except Exception as e:
# flash(e)
else:
flash(", ".join([key + ': ' + form.errors[key][0] for key in form.errors.keys()]))
return render_template('create_text.html', form=form, config=get_config())
@app.route('/update_text/<textset_id>', methods=['GET', 'POST'])
def update_text(textset_id):
# form to update a textset
form = UpdateTextsetForm()
if request.method == 'POST':
if form.validate():
update_textset(textset_id,
name=form.name.data,
description=form.description.data,
source=form.source.data,
url=form.url.data)
return redirect('/textset_list')
else:
textset = get_textset(textset_id)
# copy data to form
form.name.data = textset.name
form.description.data = textset.description
form.source.data = textset.source
form.url.data = textset.url
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from app import app
from flask import render_template, send_file, redirect, request, abort, flash, send_from_directory, jsonify
from .form import *
from automlk.textset import *
from automlk.context import get_uploads_folder
and context:
# Path: automlk/context.py
# def get_uploads_folder():
# """
# folder to store file uploads
#
# :return: folder name
# """
# folder = get_data_folder() + '/uploads'
# if not os.path.exists(folder):
# os.makedirs(folder)
# return folder
which might include code, classes, or functions. Output only the next line. | return render_template('update_text.html', form=form, config=get_config()) |
Given snippet: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s [%(module)s %(lineno)3d] %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/grapher.log'),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import time
import pickle
from .store import *
from .graphs import *
from .monitor import heart_beep
from .dataset import get_dataset_list, get_dataset
from .specific import get_feature_engineering, apply_feature_engineering
and context:
# Path: automlk/monitor.py
# def heart_beep(module, msg, index=1, gpu=False):
# """
# send heart beep as module
#
# :param module: controller / worker
# :param msg: status message to send to the web app
# :param index: index of the worker
# :param gpu: gpu capability
# :return:
# """
# global __cpu_pct
# if module == 'controller':
# __cpu_pct = psutil.cpu_percent(interval=0.1)
#
# # detect IP and DNS name
# t = datetime.datetime.now()
# id = socket.gethostname() + '_' + str(index)
# msg_beep = {'module': module,
# 'host_name': id,
# 'cpu_count': psutil.cpu_count(logical=False),
# 'cpu_log_count': psutil.cpu_count(logical=True),
# 'cpu_pct': __cpu_pct,
# 'memory': psutil.virtual_memory().total/1073741824,
# 'memory_pct': psutil.virtual_memory().percent,
# 'version': __version__,
# 'time': {'year': t.year, 'month': t.month, 'day': t.day,
# 'hour': t.hour, 'minute': t.minute, 'second': t.second},
# 'msg': msg
# }
#
# # update list
# sadd_key_store('monitor:%s' % module, id)
#
# # save msg in store
# set_key_store('monitor:%s:%s' % (module, id), msg_beep)
#
# Path: automlk/dataset.py
# def get_dataset_list(include_results=False):
# """
# get the list of all datasets
#
# :param include_results: flag to determine if the status are also retrieved (default = False)
# :return: list of datasets objects or empty list if error (eg. redis or environment not set)
# """
# # try:
# return [get_dataset(dataset_id, include_results) for dataset_id in get_dataset_ids()]
# # except:
# # return []
#
# def get_dataset(dataset_id, include_results=False):
# """
# get the descriptive data of a dataset
#
# :param dataset_id: id of the dataset
# :param include_results: if need to extract results also
# :return: dataset object
# """
# dt = make_dataset(get_key_store('dataset:%s' % dataset_id))
# dt.status = get_key_store('dataset:%s:status' % dataset_id)
#
# # add counters and results
# dt.grapher = get_key_store('dataset:%s:grapher' % dataset_id)
# dt.round_counter = get_counter_store('dataset:%s:round_counter' % dataset_id)
#
# if include_results:
# dt.results = get_key_store('dataset:%s:results' % dataset_id)
#
# return dt
#
# Path: automlk/specific.py
# def get_feature_engineering(dataset_id):
# """
# retrieves the feature engineering content of a dataset
#
# :param dataset_id: id of the dataset
# :return: content: text of the python function (without def and return)
# """
# filename = get_dataset_folder(dataset_id) + '/data/fe_%s.txt' % dataset_id
# if os.path.exists(filename):
# with open(filename, 'r') as f:
# return "".join(f.readlines())
# else:
# return ''
#
# def apply_feature_engineering(dataset_id, X=None):
# """
# executes the feature engineering definition
#
# :param dataset_id: dataset id
# :return:
# """
# global specific_feature_engineering
# exec_feature_engineering(get_feature_engineering(dataset_id))
# if isinstance(X, pd.DataFrame):
# return specific_feature_engineering(X)
# else:
# return None
which might include code, classes, or functions. Output only the next line. | logging.StreamHandler() |
Using the snippet: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s [%(module)s %(lineno)3d] %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/grapher.log'),
logging.StreamHandler()
])
logging.info('starting grapher')
<|code_end|>
, determine the next line of code. You have imports:
import time
import pickle
from .store import *
from .graphs import *
from .monitor import heart_beep
from .dataset import get_dataset_list, get_dataset
from .specific import get_feature_engineering, apply_feature_engineering
and context (class names, function names, or code) available:
# Path: automlk/monitor.py
# def heart_beep(module, msg, index=1, gpu=False):
# """
# send heart beep as module
#
# :param module: controller / worker
# :param msg: status message to send to the web app
# :param index: index of the worker
# :param gpu: gpu capability
# :return:
# """
# global __cpu_pct
# if module == 'controller':
# __cpu_pct = psutil.cpu_percent(interval=0.1)
#
# # detect IP and DNS name
# t = datetime.datetime.now()
# id = socket.gethostname() + '_' + str(index)
# msg_beep = {'module': module,
# 'host_name': id,
# 'cpu_count': psutil.cpu_count(logical=False),
# 'cpu_log_count': psutil.cpu_count(logical=True),
# 'cpu_pct': __cpu_pct,
# 'memory': psutil.virtual_memory().total/1073741824,
# 'memory_pct': psutil.virtual_memory().percent,
# 'version': __version__,
# 'time': {'year': t.year, 'month': t.month, 'day': t.day,
# 'hour': t.hour, 'minute': t.minute, 'second': t.second},
# 'msg': msg
# }
#
# # update list
# sadd_key_store('monitor:%s' % module, id)
#
# # save msg in store
# set_key_store('monitor:%s:%s' % (module, id), msg_beep)
#
# Path: automlk/dataset.py
# def get_dataset_list(include_results=False):
# """
# get the list of all datasets
#
# :param include_results: flag to determine if the status are also retrieved (default = False)
# :return: list of datasets objects or empty list if error (eg. redis or environment not set)
# """
# # try:
# return [get_dataset(dataset_id, include_results) for dataset_id in get_dataset_ids()]
# # except:
# # return []
#
# def get_dataset(dataset_id, include_results=False):
# """
# get the descriptive data of a dataset
#
# :param dataset_id: id of the dataset
# :param include_results: if need to extract results also
# :return: dataset object
# """
# dt = make_dataset(get_key_store('dataset:%s' % dataset_id))
# dt.status = get_key_store('dataset:%s:status' % dataset_id)
#
# # add counters and results
# dt.grapher = get_key_store('dataset:%s:grapher' % dataset_id)
# dt.round_counter = get_counter_store('dataset:%s:round_counter' % dataset_id)
#
# if include_results:
# dt.results = get_key_store('dataset:%s:results' % dataset_id)
#
# return dt
#
# Path: automlk/specific.py
# def get_feature_engineering(dataset_id):
# """
# retrieves the feature engineering content of a dataset
#
# :param dataset_id: id of the dataset
# :return: content: text of the python function (without def and return)
# """
# filename = get_dataset_folder(dataset_id) + '/data/fe_%s.txt' % dataset_id
# if os.path.exists(filename):
# with open(filename, 'r') as f:
# return "".join(f.readlines())
# else:
# return ''
#
# def apply_feature_engineering(dataset_id, X=None):
# """
# executes the feature engineering definition
#
# :param dataset_id: dataset id
# :return:
# """
# global specific_feature_engineering
# exec_feature_engineering(get_feature_engineering(dataset_id))
# if isinstance(X, pd.DataFrame):
# return specific_feature_engineering(X)
# else:
# return None
. Output only the next line. | def grapher_loop(): |
Next line prediction: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s [%(module)s %(lineno)3d] %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/grapher.log'),
logging.StreamHandler()
])
<|code_end|>
. Use current file imports:
(import time
import pickle
from .store import *
from .graphs import *
from .monitor import heart_beep
from .dataset import get_dataset_list, get_dataset
from .specific import get_feature_engineering, apply_feature_engineering)
and context including class names, function names, or small code snippets from other files:
# Path: automlk/monitor.py
# def heart_beep(module, msg, index=1, gpu=False):
# """
# send heart beep as module
#
# :param module: controller / worker
# :param msg: status message to send to the web app
# :param index: index of the worker
# :param gpu: gpu capability
# :return:
# """
# global __cpu_pct
# if module == 'controller':
# __cpu_pct = psutil.cpu_percent(interval=0.1)
#
# # detect IP and DNS name
# t = datetime.datetime.now()
# id = socket.gethostname() + '_' + str(index)
# msg_beep = {'module': module,
# 'host_name': id,
# 'cpu_count': psutil.cpu_count(logical=False),
# 'cpu_log_count': psutil.cpu_count(logical=True),
# 'cpu_pct': __cpu_pct,
# 'memory': psutil.virtual_memory().total/1073741824,
# 'memory_pct': psutil.virtual_memory().percent,
# 'version': __version__,
# 'time': {'year': t.year, 'month': t.month, 'day': t.day,
# 'hour': t.hour, 'minute': t.minute, 'second': t.second},
# 'msg': msg
# }
#
# # update list
# sadd_key_store('monitor:%s' % module, id)
#
# # save msg in store
# set_key_store('monitor:%s:%s' % (module, id), msg_beep)
#
# Path: automlk/dataset.py
# def get_dataset_list(include_results=False):
# """
# get the list of all datasets
#
# :param include_results: flag to determine if the status are also retrieved (default = False)
# :return: list of datasets objects or empty list if error (eg. redis or environment not set)
# """
# # try:
# return [get_dataset(dataset_id, include_results) for dataset_id in get_dataset_ids()]
# # except:
# # return []
#
# def get_dataset(dataset_id, include_results=False):
# """
# get the descriptive data of a dataset
#
# :param dataset_id: id of the dataset
# :param include_results: if need to extract results also
# :return: dataset object
# """
# dt = make_dataset(get_key_store('dataset:%s' % dataset_id))
# dt.status = get_key_store('dataset:%s:status' % dataset_id)
#
# # add counters and results
# dt.grapher = get_key_store('dataset:%s:grapher' % dataset_id)
# dt.round_counter = get_counter_store('dataset:%s:round_counter' % dataset_id)
#
# if include_results:
# dt.results = get_key_store('dataset:%s:results' % dataset_id)
#
# return dt
#
# Path: automlk/specific.py
# def get_feature_engineering(dataset_id):
# """
# retrieves the feature engineering content of a dataset
#
# :param dataset_id: id of the dataset
# :return: content: text of the python function (without def and return)
# """
# filename = get_dataset_folder(dataset_id) + '/data/fe_%s.txt' % dataset_id
# if os.path.exists(filename):
# with open(filename, 'r') as f:
# return "".join(f.readlines())
# else:
# return ''
#
# def apply_feature_engineering(dataset_id, X=None):
# """
# executes the feature engineering definition
#
# :param dataset_id: dataset id
# :return:
# """
# global specific_feature_engineering
# exec_feature_engineering(get_feature_engineering(dataset_id))
# if isinstance(X, pd.DataFrame):
# return specific_feature_engineering(X)
# else:
# return None
. Output only the next line. | logging.info('starting grapher') |
Using the snippet: <|code_start|>
log = logging.getLogger(__name__)
# try to import redis
__config = get_config()
if __config['store'] == 'redis':
try:
rds = redis.Redis(host=__config['store_url'])
print('using redis')
set_use_redis(True)
except:
set_use_redis(False)
log.error('redis not installed: using file storage instead')
if not get_use_redis():
# we will use simple file storage
store_folder = get_data_folder() + '/store'
<|code_end|>
, determine the next line of code. You have imports:
import os
import json
import time
import logging
import redis
from .config import get_use_redis, set_use_redis
from .context import get_config, get_data_folder
and context (class names, function names, or code) available:
# Path: automlk/config.py
# def get_use_redis():
# """
# get global value USE_REDIS
# :return: value USE_REDIS
# """
# return USE_REDIS
#
# def set_use_redis(value):
# """
# set global value USE_REDIS
#
# :param value:
# :return:
# """
# global USE_REDIS
# USE_REDIS = value
#
# Path: automlk/context.py
# def get_config():
# """
#
# retrieves configuration parameters
# :return: config dict
# """
# if os.path.exists('../config.json'):
# with open('../config.json', 'r') as f:
# config = eval("".join(f.readlines()))
# # upward compatibility
# if 'bootstrap' not in config.keys():
# config['bootstrap'] = ''
# if 'graph_theme' not in config.keys():
# config['graph_theme'] = 'dark'
# if 'doc_theme' not in config.keys():
# config['doc_theme'] = 'default'
# return config
# raise EnvironmentError('configuration file %s not found' % '../config.json')
#
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | if not store_folder: |
Here is a snippet: <|code_start|>
log = logging.getLogger(__name__)
# try to import redis
__config = get_config()
if __config['store'] == 'redis':
<|code_end|>
. Write the next line using the current file imports:
import os
import json
import time
import logging
import redis
from .config import get_use_redis, set_use_redis
from .context import get_config, get_data_folder
and context from other files:
# Path: automlk/config.py
# def get_use_redis():
# """
# get global value USE_REDIS
# :return: value USE_REDIS
# """
# return USE_REDIS
#
# def set_use_redis(value):
# """
# set global value USE_REDIS
#
# :param value:
# :return:
# """
# global USE_REDIS
# USE_REDIS = value
#
# Path: automlk/context.py
# def get_config():
# """
#
# retrieves configuration parameters
# :return: config dict
# """
# if os.path.exists('../config.json'):
# with open('../config.json', 'r') as f:
# config = eval("".join(f.readlines()))
# # upward compatibility
# if 'bootstrap' not in config.keys():
# config['bootstrap'] = ''
# if 'graph_theme' not in config.keys():
# config['graph_theme'] = 'dark'
# if 'doc_theme' not in config.keys():
# config['doc_theme'] = 'default'
# return config
# raise EnvironmentError('configuration file %s not found' % '../config.json')
#
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
, which may include functions, classes, or code. Output only the next line. | try: |
Using the snippet: <|code_start|>
log = logging.getLogger(__name__)
# try to import redis
__config = get_config()
if __config['store'] == 'redis':
try:
rds = redis.Redis(host=__config['store_url'])
print('using redis')
set_use_redis(True)
except:
set_use_redis(False)
log.error('redis not installed: using file storage instead')
if not get_use_redis():
# we will use simple file storage
store_folder = get_data_folder() + '/store'
<|code_end|>
, determine the next line of code. You have imports:
import os
import json
import time
import logging
import redis
from .config import get_use_redis, set_use_redis
from .context import get_config, get_data_folder
and context (class names, function names, or code) available:
# Path: automlk/config.py
# def get_use_redis():
# """
# get global value USE_REDIS
# :return: value USE_REDIS
# """
# return USE_REDIS
#
# def set_use_redis(value):
# """
# set global value USE_REDIS
#
# :param value:
# :return:
# """
# global USE_REDIS
# USE_REDIS = value
#
# Path: automlk/context.py
# def get_config():
# """
#
# retrieves configuration parameters
# :return: config dict
# """
# if os.path.exists('../config.json'):
# with open('../config.json', 'r') as f:
# config = eval("".join(f.readlines()))
# # upward compatibility
# if 'bootstrap' not in config.keys():
# config['bootstrap'] = ''
# if 'graph_theme' not in config.keys():
# config['graph_theme'] = 'dark'
# if 'doc_theme' not in config.keys():
# config['doc_theme'] = 'default'
# return config
# raise EnvironmentError('configuration file %s not found' % '../config.json')
#
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | if not store_folder: |
Next line prediction: <|code_start|>
log = logging.getLogger(__name__)
# try to import redis
__config = get_config()
if __config['store'] == 'redis':
<|code_end|>
. Use current file imports:
(import os
import json
import time
import logging
import redis
from .config import get_use_redis, set_use_redis
from .context import get_config, get_data_folder)
and context including class names, function names, or small code snippets from other files:
# Path: automlk/config.py
# def get_use_redis():
# """
# get global value USE_REDIS
# :return: value USE_REDIS
# """
# return USE_REDIS
#
# def set_use_redis(value):
# """
# set global value USE_REDIS
#
# :param value:
# :return:
# """
# global USE_REDIS
# USE_REDIS = value
#
# Path: automlk/context.py
# def get_config():
# """
#
# retrieves configuration parameters
# :return: config dict
# """
# if os.path.exists('../config.json'):
# with open('../config.json', 'r') as f:
# config = eval("".join(f.readlines()))
# # upward compatibility
# if 'bootstrap' not in config.keys():
# config['bootstrap'] = ''
# if 'graph_theme' not in config.keys():
# config['graph_theme'] = 'dark'
# if 'doc_theme' not in config.keys():
# config['doc_theme'] = 'default'
# return config
# raise EnvironmentError('configuration file %s not found' % '../config.json')
#
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | try: |
Given snippet: <|code_start|>class ResetTextsetForm(FlaskForm):
# form to confirm reset of a textset
reset_id = StringField('id')
reset_name = StringField('name')
reset_description = TextAreaField('description')
class DeleteTextsetForm(FlaskForm):
# form to confirm delete of a textset
id = StringField('id')
name = StringField('name')
description = TextAreaField('description')
class DupplicateRound(FlaskForm):
# for to dupplicate a dataset
dataset = SelectField(choices=[])
round = TextAreaField('search')
def set_choices(self, problem_type):
self.dataset.choices = [(d.dataset_id, '#%s: %s' % (d.dataset_id, d.name)) for d in get_dataset_list() if d.problem_type == problem_type]
class AddFolderForm(FlaskForm):
# form to create a folder
id_parent_add = IntegerField('id')
name_parent_add = StringField('name')
name_add = StringField('name')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, BooleanField, SelectField, IntegerField, FileField, SelectMultipleField
from wtforms.validators import DataRequired
from automlk.metrics import metric_list
from automlk.dataset import get_dataset_list
and context:
# Path: automlk/metrics.py
# class Metric(object):
# def __init__(self, name, function, problem_type, best_is_min, need_class=False, binary=False, average=False):
# def rmse(y_act, y_pred):
# def rmsle(y_act, y_pred):
# def gini(y_act, y_pred):
# def gini_normalized(y_act, y_pred):
# def evaluate_metric(y_act, y_pred, metric_name, n_classes):
#
# Path: automlk/dataset.py
# def get_dataset_list(include_results=False):
# """
# get the list of all datasets
#
# :param include_results: flag to determine if the status are also retrieved (default = False)
# :return: list of datasets objects or empty list if error (eg. redis or environment not set)
# """
# # try:
# return [get_dataset(dataset_id, include_results) for dataset_id in get_dataset_ids()]
# # except:
# # return []
which might include code, classes, or functions. Output only the next line. | class UpdateFolderForm(FlaskForm): |
Based on the snippet: <|code_start|>
class CreateDatasetForm(FlaskForm):
# this is the form to create a dataset
name = StringField(validators=[DataRequired()])
folder = SelectField('folder', coerce=int)
description = TextAreaField()
source = StringField()
url = StringField()
mode = SelectField(choices=[('standard', 'standard'), ('benchmark', 'benchmark'), ('competition', 'competition')],
default='standard')
mode_file = SelectField(choices=[('upload', 'upload'), ('path', 'file path')], default='upload')
filename_cols = StringField()
file_cols = FileField()
filename_train = StringField()
<|code_end|>
, predict the immediate next line with the help of imports:
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, BooleanField, SelectField, IntegerField, FileField, SelectMultipleField
from wtforms.validators import DataRequired
from automlk.metrics import metric_list
from automlk.dataset import get_dataset_list
and context (classes, functions, sometimes code) from other files:
# Path: automlk/metrics.py
# class Metric(object):
# def __init__(self, name, function, problem_type, best_is_min, need_class=False, binary=False, average=False):
# def rmse(y_act, y_pred):
# def rmsle(y_act, y_pred):
# def gini(y_act, y_pred):
# def gini_normalized(y_act, y_pred):
# def evaluate_metric(y_act, y_pred, metric_name, n_classes):
#
# Path: automlk/dataset.py
# def get_dataset_list(include_results=False):
# """
# get the list of all datasets
#
# :param include_results: flag to determine if the status are also retrieved (default = False)
# :return: list of datasets objects or empty list if error (eg. redis or environment not set)
# """
# # try:
# return [get_dataset(dataset_id, include_results) for dataset_id in get_dataset_ids()]
# # except:
# # return []
. Output only the next line. | file_train = FileField() |
Predict the next line for this snippet: <|code_start|>
jinja_globals = {'print_list': print_list,
'print_score': print_score,
'print_score_std': print_score_std,
'print_value': print_value,
'print_duration': print_duration,
'print_params': print_params,
'print_other_metrics': print_other_metrics,
<|code_end|>
with the help of current file imports:
import os
import sys
import glob
import zipfile
import pandas as pd
import numpy as np
import jinja2
import subprocess
from .context import get_dataset_folder
from .results import *
from automlk.worker import get_search_rounds
from .print import *
and context from other files:
# Path: automlk/context.py
# def get_dataset_folder(dataset_id):
# """
# storage folder of the dataset
# :param dataset_id: id of the dataset
# :return: folder path (root of the structure)
# """
# return get_data_folder() + '/%s' % dataset_id
#
# Path: automlk/worker.py
# def get_search_rounds(dataset_id):
# """
# get all the results of the search with preprocessing and models
#
# :param dataset_id: id of the dataset
# :return: results of the search as a dataframe
# """
# results = list_key_store('dataset:%s:rounds' % dataset_id)
# return pd.DataFrame(results)
, which may contain function names, class names, or code. Output only the next line. | 'print_title': print_title, |
Predict the next line for this snippet: <|code_start|>
jinja_globals = {'print_list': print_list,
'print_score': print_score,
'print_score_std': print_score_std,
'print_value': print_value,
'print_duration': print_duration,
'print_params': print_params,
'print_other_metrics': print_other_metrics,
'print_title': print_title,
<|code_end|>
with the help of current file imports:
import os
import sys
import glob
import zipfile
import pandas as pd
import numpy as np
import jinja2
import subprocess
from .context import get_dataset_folder
from .results import *
from automlk.worker import get_search_rounds
from .print import *
and context from other files:
# Path: automlk/context.py
# def get_dataset_folder(dataset_id):
# """
# storage folder of the dataset
# :param dataset_id: id of the dataset
# :return: folder path (root of the structure)
# """
# return get_data_folder() + '/%s' % dataset_id
#
# Path: automlk/worker.py
# def get_search_rounds(dataset_id):
# """
# get all the results of the search with preprocessing and models
#
# :param dataset_id: id of the dataset
# :return: results of the search as a dataframe
# """
# results = list_key_store('dataset:%s:rounds' % dataset_id)
# return pd.DataFrame(results)
, which may contain function names, class names, or code. Output only the next line. | } |
Given the following code snippet before the placeholder: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(module)s %(lineno)d %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/grapher.log'),
logging.StreamHandler()
]
<|code_end|>
, predict the next line using imports from the current file:
import logging
import sys
import os
from automlk.grapher import grapher_loop
from automlk.context import get_data_folder
and context including class names, function names, and sometimes code from other files:
# Path: automlk/grapher.py
# def grapher_loop():
# """
# periodically pool the grapher queue for a job
#
# :return:
# """
# while True:
# heart_beep('grapher', '')
# # check the list of datasets
# for dt in get_dataset_list():
# if dt.status != 'created' and not dt.grapher:
# heart_beep('grapher', {'dataset_id': dt.dataset_id, 'dataset_name': dt.name})
# logging.info('grapher on dataset: %s' % dt.dataset_id)
# create_graph_data(dt.dataset_id)
# time.sleep(60)
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | ) |
Based on the snippet: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(module)s %(lineno)d %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/grapher.log'),
logging.StreamHandler()
]
)
while True:
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import sys
import os
from automlk.grapher import grapher_loop
from automlk.context import get_data_folder
and context (classes, functions, sometimes code) from other files:
# Path: automlk/grapher.py
# def grapher_loop():
# """
# periodically pool the grapher queue for a job
#
# :return:
# """
# while True:
# heart_beep('grapher', '')
# # check the list of datasets
# for dt in get_dataset_list():
# if dt.status != 'created' and not dt.grapher:
# heart_beep('grapher', {'dataset_id': dt.dataset_id, 'dataset_name': dt.name})
# logging.info('grapher on dataset: %s' % dt.dataset_id)
# create_graph_data(dt.dataset_id)
# time.sleep(60)
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | try: |
Predict the next line for this snippet: <|code_start|>
dataset_id = '37'
round_id = '19'
dataset = get_dataset(dataset_id)
ds = pickle.load(open(get_dataset_folder(dataset_id) + '/data/eval_set.pkl', 'rb'))
folder = get_dataset_folder(dataset_id) + '/models/'
names = list(pickle.load(open(folder + '%s_feature_names.pkl' % round_id, 'rb')))
print(names)
model = pickle.load(open(folder + '%s_model.pkl' % round_id, 'rb'))
pipe_model = pickle.load(open(folder + '%s_pipe_model.pkl' % round_id, 'rb'))
<|code_end|>
with the help of current file imports:
import pickle
import eli5
import pandas as pd
from eli5.sklearn import PermutationImportance
from automlk.context import get_dataset_folder
from automlk.dataset import get_dataset, get_dataset_sample
and context from other files:
# Path: automlk/context.py
# def get_dataset_folder(dataset_id):
# """
# storage folder of the dataset
# :param dataset_id: id of the dataset
# :return: folder path (root of the structure)
# """
# return get_data_folder() + '/%s' % dataset_id
#
# Path: automlk/dataset.py
# def get_dataset(dataset_id, include_results=False):
# """
# get the descriptive data of a dataset
#
# :param dataset_id: id of the dataset
# :param include_results: if need to extract results also
# :return: dataset object
# """
# dt = make_dataset(get_key_store('dataset:%s' % dataset_id))
# dt.status = get_key_store('dataset:%s:status' % dataset_id)
#
# # add counters and results
# dt.grapher = get_key_store('dataset:%s:grapher' % dataset_id)
# dt.round_counter = get_counter_store('dataset:%s:round_counter' % dataset_id)
#
# if include_results:
# dt.results = get_key_store('dataset:%s:results' % dataset_id)
#
# return dt
#
# def get_dataset_sample(dataset_id):
# """
# retrieves a sample of the dataset
#
# :param dataset_id: dataset id
# :return: list of records as dictionaries
# """
# filename = get_dataset_folder(dataset_id) + '/data/sample.pkl'
# if os.path.exists(filename):
# return pickle.load(open(filename, 'rb')).to_dict(orient='records')
# else:
# return [{}]
, which may contain function names, class names, or code. Output only the next line. | pipe_transform = pickle.load(open(folder + '%s_pipe_transform.pkl' % round_id, 'rb')) |
Based on the snippet: <|code_start|>
dataset_id = '37'
round_id = '19'
dataset = get_dataset(dataset_id)
ds = pickle.load(open(get_dataset_folder(dataset_id) + '/data/eval_set.pkl', 'rb'))
folder = get_dataset_folder(dataset_id) + '/models/'
names = list(pickle.load(open(folder + '%s_feature_names.pkl' % round_id, 'rb')))
print(names)
<|code_end|>
, predict the immediate next line with the help of imports:
import pickle
import eli5
import pandas as pd
from eli5.sklearn import PermutationImportance
from automlk.context import get_dataset_folder
from automlk.dataset import get_dataset, get_dataset_sample
and context (classes, functions, sometimes code) from other files:
# Path: automlk/context.py
# def get_dataset_folder(dataset_id):
# """
# storage folder of the dataset
# :param dataset_id: id of the dataset
# :return: folder path (root of the structure)
# """
# return get_data_folder() + '/%s' % dataset_id
#
# Path: automlk/dataset.py
# def get_dataset(dataset_id, include_results=False):
# """
# get the descriptive data of a dataset
#
# :param dataset_id: id of the dataset
# :param include_results: if need to extract results also
# :return: dataset object
# """
# dt = make_dataset(get_key_store('dataset:%s' % dataset_id))
# dt.status = get_key_store('dataset:%s:status' % dataset_id)
#
# # add counters and results
# dt.grapher = get_key_store('dataset:%s:grapher' % dataset_id)
# dt.round_counter = get_counter_store('dataset:%s:round_counter' % dataset_id)
#
# if include_results:
# dt.results = get_key_store('dataset:%s:results' % dataset_id)
#
# return dt
#
# def get_dataset_sample(dataset_id):
# """
# retrieves a sample of the dataset
#
# :param dataset_id: dataset id
# :return: list of records as dictionaries
# """
# filename = get_dataset_folder(dataset_id) + '/data/sample.pkl'
# if os.path.exists(filename):
# return pickle.load(open(filename, 'rb')).to_dict(orient='records')
# else:
# return [{}]
. Output only the next line. | model = pickle.load(open(folder + '%s_model.pkl' % round_id, 'rb')) |
Continue the code snippet: <|code_start|>
print('dataset:counter', get_key_store('dataset:counter'))
print('dataset:list', list_key_store('dataset:list'))
print('dataset ids', get_dataset_ids())
dt = get_dataset(1)
<|code_end|>
. Use current file imports:
from automlk.store import *
from automlk.dataset import get_dataset_ids, get_dataset, get_dataset_list
and context (classes, functions, or code) from other files:
# Path: automlk/dataset.py
# def get_dataset_ids():
# """
# get the list of ids all datasets
#
# :return: list of ids
# """
# return list_key_store('dataset:list')
#
# def get_dataset(dataset_id, include_results=False):
# """
# get the descriptive data of a dataset
#
# :param dataset_id: id of the dataset
# :param include_results: if need to extract results also
# :return: dataset object
# """
# dt = make_dataset(get_key_store('dataset:%s' % dataset_id))
# dt.status = get_key_store('dataset:%s:status' % dataset_id)
#
# # add counters and results
# dt.grapher = get_key_store('dataset:%s:grapher' % dataset_id)
# dt.round_counter = get_counter_store('dataset:%s:round_counter' % dataset_id)
#
# if include_results:
# dt.results = get_key_store('dataset:%s:results' % dataset_id)
#
# return dt
#
# def get_dataset_list(include_results=False):
# """
# get the list of all datasets
#
# :param include_results: flag to determine if the status are also retrieved (default = False)
# :return: list of datasets objects or empty list if error (eg. redis or environment not set)
# """
# # try:
# return [get_dataset(dataset_id, include_results) for dataset_id in get_dataset_ids()]
# # except:
# # return []
. Output only the next line. | print('dt ok') |
Predict the next line after this snippet: <|code_start|> encoder = model_bow(X[col].values, self.transformer_params)
self.feature_names.remove(col)
self.feature_names += [col + '__' + x.replace(' ', '_') for x in encoder.get_feature_names()]
self.transformer.append((col, encoder))
def transform(self, X):
# transform X
for col, encoder in self.transformer:
# remove col in X
text = [clean_text(s) for s in X[col].values]
T = pd.DataFrame(encoder.transform(text).todense()).reset_index(drop=True)
T.columns = [col + '__' + x.replace(' ', '_') for x in encoder.get_feature_names()]
X = pd.concat([X.reset_index(drop=True), T], axis=1)
X.drop(col, axis=1, inplace=True)
return X
class TransformerWord2Vec(Transformer):
# class for process word2vec for text
def __init__(self, **params):
super().__init__(**params)
self.text_cols = [f['name'] for f in self.context if f['col_type'] == 'text']
self.details = self.text_cols
def fit(self, X, y):
self.transformer = []
self.feature_names = list(X.columns)
for col in self.text_cols:
encoder = get_text_encoder(self.context, col, 'w2v', self.transformer_params)
<|code_end|>
using the current file's imports:
from abc import ABCMeta, abstractmethod
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, MaxAbsScaler, Imputer, PolynomialFeatures, \
LabelEncoder
from sklearn.decomposition import TruncatedSVD, FastICA, PCA
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from .spaces.process import *
from .utils.text_encoders import *
from .context import text_model_filename
from gensim.models import Word2Vec, Doc2Vec
from gensim.models.doc2vec import TaggedDocument
import numpy as np
import pickle
import pandas as pd
import category_encoders as ce
and any relevant context from other files:
# Path: automlk/context.py
# def text_model_filename(textset_id, model_type, params):
# """
# name of the file with params
#
# :param model_type: model type (bow, w2v, d2v)
# :param params: params of the model
# :return:
# """
# folder = get_data_folder() + '/texts/%s' % textset_id
# if not os.path.exists(folder):
# os.makedirs(folder)
#
# params_name = "-".join([key + '_' + str(params[key]) for key in params.keys()])
# for c in ['[', ']', ',', '(', ')', '{', '}']:
# params_name = params_name.replace(c, '')
# return folder + '/%s-' % model_type + params_name.replace(' ', '_')
. Output only the next line. | if encoder is None: |
Given the code snippet: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(module)s %(lineno)d %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/controller.log'),
logging.StreamHandler()
]
<|code_end|>
, generate the next line using the imports in this file:
import logging
import sys
import os
from automlk.controller import controller_loop
from automlk.context import get_data_folder
and context (functions, classes, or occasionally code) from other files:
# Path: automlk/controller.py
# def controller_loop():
# """
# controller process: manages the search strategy and send instruction to workers
# :return:
# """
# i_dataset = -1
# # controls the optimization rounds and sends instructions to the workers
# while True:
# # check the list of datasets to search
# active = [id for id in get_dataset_ids() if get_key_store('dataset:%s:status' % id) == 'searching']
#
# if len(active) == 0:
# heart_beep('controller', {})
# time.sleep(1)
# else:
# # sends work to the workers when their queue is empty
# if llen_key_store(SEARCH_QUEUE) == 0:
# # get next dataset to search
# i_dataset += 1
# if i_dataset > len(active) - 1:
# i_dataset = 0
#
# # retrieves dataset and status of search
# dataset_id = active[i_dataset]
#
# # find search job
# msg_search = __create_search_round(dataset_id)
#
# # send queue the next job to do
# if msg_search != {}:
# log.info('sending %s' % msg_search)
# lpush_key_store(SEARCH_QUEUE, msg_search)
# heart_beep('controller', msg_search)
#
# # then read the duplicate ROUND queue
# while llen_key_store(DUPLICATE_QUEUE) > 0:
# msg = brpop_key_store(DUPLICATE_QUEUE)
# msg_search = __duplicate_search_round(msg['round'], msg['dataset'])
#
# # send queue the next job to do
# log.info('sending %s' % msg_search)
# lpush_key_store(SEARCH_QUEUE, msg_search)
# heart_beep('controller', msg_search)
#
# # then read the results queue
# while llen_key_store(RESULTS_QUEUE) > 0:
# msg_result = brpop_key_store(RESULTS_QUEUE)
# if msg_result is not None:
# __process_result(msg_result)
#
# # then wait 1 second
# time.sleep(1)
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | ) |
Next line prediction: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(module)s %(lineno)d %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/controller.log'),
logging.StreamHandler()
]
)
logging.info('starting controller')
<|code_end|>
. Use current file imports:
(import logging
import sys
import os
from automlk.controller import controller_loop
from automlk.context import get_data_folder)
and context including class names, function names, or small code snippets from other files:
# Path: automlk/controller.py
# def controller_loop():
# """
# controller process: manages the search strategy and send instruction to workers
# :return:
# """
# i_dataset = -1
# # controls the optimization rounds and sends instructions to the workers
# while True:
# # check the list of datasets to search
# active = [id for id in get_dataset_ids() if get_key_store('dataset:%s:status' % id) == 'searching']
#
# if len(active) == 0:
# heart_beep('controller', {})
# time.sleep(1)
# else:
# # sends work to the workers when their queue is empty
# if llen_key_store(SEARCH_QUEUE) == 0:
# # get next dataset to search
# i_dataset += 1
# if i_dataset > len(active) - 1:
# i_dataset = 0
#
# # retrieves dataset and status of search
# dataset_id = active[i_dataset]
#
# # find search job
# msg_search = __create_search_round(dataset_id)
#
# # send queue the next job to do
# if msg_search != {}:
# log.info('sending %s' % msg_search)
# lpush_key_store(SEARCH_QUEUE, msg_search)
# heart_beep('controller', msg_search)
#
# # then read the duplicate ROUND queue
# while llen_key_store(DUPLICATE_QUEUE) > 0:
# msg = brpop_key_store(DUPLICATE_QUEUE)
# msg_search = __duplicate_search_round(msg['round'], msg['dataset'])
#
# # send queue the next job to do
# log.info('sending %s' % msg_search)
# lpush_key_store(SEARCH_QUEUE, msg_search)
# heart_beep('controller', msg_search)
#
# # then read the results queue
# while llen_key_store(RESULTS_QUEUE) > 0:
# msg_result = brpop_key_store(RESULTS_QUEUE)
# if msg_result is not None:
# __process_result(msg_result)
#
# # then wait 1 second
# time.sleep(1)
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | while True: |
Next line prediction: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)5s [%(module)s %(lineno)3d] %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/worker.log'),
logging.StreamHandler()
])
<|code_end|>
. Use current file imports:
(import sys
import logging
from automlk.worker import worker_loop
from automlk.context import get_data_folder)
and context including class names, function names, or small code snippets from other files:
# Path: automlk/worker.py
# def worker_loop(worker_id, gpu=False):
# """
# periodically pool the receiver queue for a search job
# :param worker_id: index of the worker on this machine
# :param gpu: can use gpu on this machine
# :return:
# """
# global __worker_timer_start
# global __worker_timer_limit
# global __worker_dataset
# __worker_dataset = ''
# __worker_timer_start = 0
# __worker_timer_limit = 0
# f_stop = threading.Event()
# # start calling f now and every 60 sec thereafter
# __timer_control(f_stop)
# while True:
# try:
# # poll queue
# msg_search = brpop_key_store('controller:search_queue')
# heart_beep('worker', msg_search, worker_id, gpu)
# __worker_timer_start = time.time()
# __worker_timer_limit = 0
# __worker_dataset = ''
# if msg_search is not None:
# __worker_dataset = msg_search['dataset_id']
# __worker_timer_limit = msg_search['time_limit']
# log.info('received %s' % msg_search)
# msg_search = {**msg_search, **{'start_time': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
# 'host_name': socket.gethostname()}}
# job_search(msg_search)
# except KeyboardInterrupt:
# log.info('Keyboard interrupt: exiting')
# # stop the timer thread
# f_stop.set()
# exit()
# except Exception as e:
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# log.error('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)))
# with open(get_data_folder() + '/errors.txt', 'a') as f:
# f.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + str(msg_search) + '\n')
# f.write('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)) + '\n')
# f.write('-'*80 + '\n')
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | logging.info('starting worker') |
Based on the snippet: <|code_start|>
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)5s [%(module)s %(lineno)3d] %(message)s',
handlers=[
logging.FileHandler(get_data_folder() + '/worker.log'),
logging.StreamHandler()
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import logging
from automlk.worker import worker_loop
from automlk.context import get_data_folder
and context (classes, functions, sometimes code) from other files:
# Path: automlk/worker.py
# def worker_loop(worker_id, gpu=False):
# """
# periodically pool the receiver queue for a search job
# :param worker_id: index of the worker on this machine
# :param gpu: can use gpu on this machine
# :return:
# """
# global __worker_timer_start
# global __worker_timer_limit
# global __worker_dataset
# __worker_dataset = ''
# __worker_timer_start = 0
# __worker_timer_limit = 0
# f_stop = threading.Event()
# # start calling f now and every 60 sec thereafter
# __timer_control(f_stop)
# while True:
# try:
# # poll queue
# msg_search = brpop_key_store('controller:search_queue')
# heart_beep('worker', msg_search, worker_id, gpu)
# __worker_timer_start = time.time()
# __worker_timer_limit = 0
# __worker_dataset = ''
# if msg_search is not None:
# __worker_dataset = msg_search['dataset_id']
# __worker_timer_limit = msg_search['time_limit']
# log.info('received %s' % msg_search)
# msg_search = {**msg_search, **{'start_time': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
# 'host_name': socket.gethostname()}}
# job_search(msg_search)
# except KeyboardInterrupt:
# log.info('Keyboard interrupt: exiting')
# # stop the timer thread
# f_stop.set()
# exit()
# except Exception as e:
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# log.error('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)))
# with open(get_data_folder() + '/errors.txt', 'a') as f:
# f.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + str(msg_search) + '\n')
# f.write('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)) + '\n')
# f.write('-'*80 + '\n')
#
# Path: automlk/context.py
# def get_data_folder():
# """
# retrieves root folder from 'automlk.json' configuration file
# :return: storage folder name of the data
# """
# return get_config()['data']
. Output only the next line. | ]) |
Predict the next line for this snippet: <|code_start|>
def test_gini():
def fequ(a, b):
return abs(a - b) < 1e-6
def T(a, p, g, n):
assert (fequ(gini(a, p), g))
assert (fequ(gini_normalized(a, p), n))
T([1, 2, 3], [10, 20, 30], 0.111111, 1)
T([1, 2, 3], [30, 20, 10], -0.111111, -1)
T([1, 2, 3], [0, 0, 0], -0.111111, -1)
<|code_end|>
with the help of current file imports:
from automlk.metrics import gini, gini_normalized
and context from other files:
# Path: automlk/metrics.py
# def gini(y_act, y_pred):
# """
# metrics gini = Gini coefficient (classification only)
#
# :param y_act: vector of actual values
# :param y_pred: vector of predicted values
# :return: gini
# """
# assert (len(y_act) == len(y_pred))
# all = np.asarray(np.c_[y_act, y_pred, np.arange(len(y_act))], dtype=np.float)
# all = all[np.lexsort((all[:, 2], -1 * all[:, 1]))]
# totalLosses = all[:, 0].sum()
# giniSum = all[:, 0].cumsum().sum() / totalLosses
# giniSum -= (len(y_act) + 1) / 2.
# return giniSum / len(y_act)
#
# def gini_normalized(y_act, y_pred):
# """
# metrics normalized gini = Normalized Gini coefficient (classification only)
#
# :param y_act: vector of actual values
# :param y_pred: vector of predicted values
# :return: gini
# """
# return gini(y_act, y_pred) / gini(y_act, y_act)
, which may contain function names, class names, or code. Output only the next line. | T([3, 2, 1], [0, 0, 0], 0.111111, 1) |
Continue the code snippet: <|code_start|>
def test_gini():
def fequ(a, b):
return abs(a - b) < 1e-6
def T(a, p, g, n):
assert (fequ(gini(a, p), g))
assert (fequ(gini_normalized(a, p), n))
T([1, 2, 3], [10, 20, 30], 0.111111, 1)
T([1, 2, 3], [30, 20, 10], -0.111111, -1)
T([1, 2, 3], [0, 0, 0], -0.111111, -1)
T([3, 2, 1], [0, 0, 0], 0.111111, 1)
T([1, 2, 4, 3], [0, 0, 0, 0], -0.1, -0.8)
T([2, 1, 4, 3], [0, 0, 2, 1], 0.125, 1)
<|code_end|>
. Use current file imports:
from automlk.metrics import gini, gini_normalized
and context (classes, functions, or code) from other files:
# Path: automlk/metrics.py
# def gini(y_act, y_pred):
# """
# metrics gini = Gini coefficient (classification only)
#
# :param y_act: vector of actual values
# :param y_pred: vector of predicted values
# :return: gini
# """
# assert (len(y_act) == len(y_pred))
# all = np.asarray(np.c_[y_act, y_pred, np.arange(len(y_act))], dtype=np.float)
# all = all[np.lexsort((all[:, 2], -1 * all[:, 1]))]
# totalLosses = all[:, 0].sum()
# giniSum = all[:, 0].cumsum().sum() / totalLosses
# giniSum -= (len(y_act) + 1) / 2.
# return giniSum / len(y_act)
#
# def gini_normalized(y_act, y_pred):
# """
# metrics normalized gini = Normalized Gini coefficient (classification only)
#
# :param y_act: vector of actual values
# :param y_pred: vector of predicted values
# :return: gini
# """
# return gini(y_act, y_pred) / gini(y_act, y_act)
. Output only the next line. | T([0, 20, 40, 0, 10], [40, 40, 10, 5, 5], 0, 0) |
Predict the next line for this snippet: <|code_start|>
ds = pickle.load(open(get_dataset_folder('4/data/eval_set.pkl'), 'rb'))
y_pred_eval, y_pred_test, y_pred_submit = get_pred_eval_test('4', 15)
m = confusion_matrix(ds.y_train, np.argmax(y_pred_eval, axis=1))
<|code_end|>
with the help of current file imports:
import pickle
import pandas as pd
import numpy as np
from automlk.models import get_pred_eval_test
from automlk.dataset import get_dataset_folder
from sklearn.metrics import confusion_matrix
and context from other files:
# Path: automlk/models.py
# MAX_ROUNDS = 5000
# PATIENCE = 50
# class Model(object):
# class ModelCatboost(Model):
# class ModelNN(Model):
# class EnsemblePool(object):
# class ModelEnsembleSelection(Model):
# def __init__(self, **params):
# def fit(self, X_train, y_train):
# def fit_early_stopping(self, X_train, y_train, X_eval, y_eval):
# def predict(self, X):
# def predict_proba(self, X):
# def binary_proba(y):
# def __init__(self, **params):
# def set_model(self):
# def fit(self, X_train, y_train):
# def fit_early_stopping(self, X_train, y_train, X_eval, y_eval):
# def __init__(self, **params):
# def fit(self, X_train, y_train):
# def fit_early_stopping(self, X_train, y_train, X_eval, y_eval):
# def __create_model(self, dim):
# def predict_proba(self, X):
# def prepare_y(self, y):
# def __init__(self, pool_model_round_ids, pool_model_names, pool_eval_preds, pool_test_preds, pool_submit_preds):
# def __init__(self, dataset, context, params):
# def cv_pool(self, pool, y, y_test, cv_folds, threshold, depth):
#
# Path: automlk/dataset.py
# def get_dataset_ids():
# def get_dataset_list(include_results=False):
# def get_dataset_status(dataset_id):
# def get_dataset(dataset_id, include_results=False):
# def make_dataset(d):
# def create_dataset_json(dataset_id):
# def create_dataset(name, folder_id, description, source, mode, filename_train, filename_test='', filename_cols='',
# filename_submit='', url=''):
# def update_dataset(dataset_id, name, folder_id, description, source, url):
# def update_feature_dataset(dataset_id, name, description, to_keep, col_type, text_ref):
# def update_problem_dataset(dataset_id, problem_type, y_col, metric, other_metrics, val_col, cv_folds, val_col_shuffle,
# scan, holdout_ratio, col_submit):
# def reset_dataset(dataset_id):
# def delete_dataset(dataset_id):
# def get_dataset_sample(dataset_id):
# def __init__(self, dataset_id, name, folder_id, description, source, mode, filename_train,
# filename_test, filename_cols, filename_submit, url, creation_date):
# def initialize_data(self):
# def update_stats(self, df_train, df_test, df_submit):
# def update_calc(self):
# def update_problem(self, problem_type, y_col, metric, other_metrics, val_col, cv_folds, val_col_shuffle, scan,
# holdout_ratio, col_submit, best_is_min=True):
# def update_y_data(self):
# def save(self, dataset_id):
# def load(self, load_data, features):
# def load_calc(self, calc_data):
# def finalize_creation(self, df_train, df_test, df_submit):
# def get_data(self, part='train'):
# def create_folders(self):
# def __folder(self):
# def __check_data(self, filename, part='train'):
# def __read_data(self, filename):
# def __initialize_features(self, df, df_cols):
# def update_features(self, df):
# def __import_data(self, filename, part):
# def __init__(self, name, raw_type, n_missing, n_unique_values, first_unique_values, description, to_keep,
# col_type, text_ref='', n_unique_ratio=0.):
# def get_y_eval(dataset_id):
# class DataSet(object):
# class Feature(object):
, which may contain function names, class names, or code. Output only the next line. | print(m) |
Given the code snippet: <|code_start|> def predict_proba(self, X):
# prediction with specific case of binary and classification
if self.y_n_classes == 2:
return binary_proba(self.model.predict(X.as_matrix()))
else:
return self.model.predict_proba(X.as_matrix())
def prepare_y(self, y):
# generate y in one hot encoding if classification
if self.problem_type == 'classification':
return to_categorical(y, self.y_n_classes)
else:
return y
class EnsemblePool(object):
# class to manage data required for ensembling
def __init__(self, pool_model_round_ids, pool_model_names, pool_eval_preds, pool_test_preds, pool_submit_preds):
self.pool_model_round_ids = pool_model_round_ids
self.pool_model_names = pool_model_names
self.pool_eval_preds = pool_eval_preds
self.pool_test_preds = pool_test_preds
self.pool_submit_preds = pool_submit_preds
class ModelEnsembleSelection(Model):
# TODO : fix bug
# class for model with ensemble selection
def __init__(self, dataset, context, params):
<|code_end|>
, generate the next line using the imports in this file:
from abc import ABCMeta, abstractmethod
from .spaces.model import *
from .config import METRIC_NULL
from .dataset import get_dataset_folder
from catboost import Pool, CatBoostClassifier, CatBoostRegressor
from .utils.keras_wrapper import keras_create_model, keras_compile_model, import_keras, to_categorical
import pickle
import logging
import numpy as np
import pandas as pd
and context (functions, classes, or occasionally code) from other files:
# Path: automlk/config.py
# METRIC_NULL = 1e7
#
# Path: automlk/dataset.py
# def get_dataset_ids():
# def get_dataset_list(include_results=False):
# def get_dataset_status(dataset_id):
# def get_dataset(dataset_id, include_results=False):
# def make_dataset(d):
# def create_dataset_json(dataset_id):
# def create_dataset(name, folder_id, description, source, mode, filename_train, filename_test='', filename_cols='',
# filename_submit='', url=''):
# def update_dataset(dataset_id, name, folder_id, description, source, url):
# def update_feature_dataset(dataset_id, name, description, to_keep, col_type, text_ref):
# def update_problem_dataset(dataset_id, problem_type, y_col, metric, other_metrics, val_col, cv_folds, val_col_shuffle,
# scan, holdout_ratio, col_submit):
# def reset_dataset(dataset_id):
# def delete_dataset(dataset_id):
# def get_dataset_sample(dataset_id):
# def __init__(self, dataset_id, name, folder_id, description, source, mode, filename_train,
# filename_test, filename_cols, filename_submit, url, creation_date):
# def initialize_data(self):
# def update_stats(self, df_train, df_test, df_submit):
# def update_calc(self):
# def update_problem(self, problem_type, y_col, metric, other_metrics, val_col, cv_folds, val_col_shuffle, scan,
# holdout_ratio, col_submit, best_is_min=True):
# def update_y_data(self):
# def save(self, dataset_id):
# def load(self, load_data, features):
# def load_calc(self, calc_data):
# def finalize_creation(self, df_train, df_test, df_submit):
# def get_data(self, part='train'):
# def create_folders(self):
# def __folder(self):
# def __check_data(self, filename, part='train'):
# def __read_data(self, filename):
# def __initialize_features(self, df, df_cols):
# def update_features(self, df):
# def __import_data(self, filename, part):
# def __init__(self, name, raw_type, n_missing, n_unique_values, first_unique_values, description, to_keep,
# col_type, text_ref='', n_unique_ratio=0.):
# def get_y_eval(dataset_id):
# class DataSet(object):
# class Feature(object):
. Output only the next line. | super().__init__(dataset, context, params) |
Using the snippet: <|code_start|>
@app.route('/monitor', methods=['GET'])
def monitor():
# monitor workers
return render_template('monitor.html', controller=get_heart_beeps('controller'),
grapher=get_heart_beeps('grapher'), worker_text=get_heart_beeps('worker_text'),
workers=get_heart_beeps('worker'), config=get_config())
@app.route('/config', methods=['GET', 'POST'])
def config():
# view/edit configuration
form = ConfigForm()
if request.method == 'POST':
if form.validate():
try:
set_config(data=form.data.data,
theme=form.theme.data,
bootstrap=form.bootstrap.data,
graph_theme=form.graph_theme.data,
store=form.store.data,
store_url=form.store_url.data)
except Exception as e:
<|code_end|>
, determine the next line of code. You have imports:
from app import app
from flask import render_template, request, flash
from .form import *
from automlk.monitor import get_heart_beeps
from automlk.context import get_config, set_config
and context (class names, function names, or code) available:
# Path: automlk/monitor.py
# def get_heart_beeps(module):
# """
# returns last heart beeps of the controller and workers
#
# :param module: controller / worker
# :return: list of status or empty list (eg. if environment not set)
# """
# # returns values of heart beeps
# try:
# # get list and values
# l = smembers_key_store('monitor:%s' % module)
# l_hb = [get_key_store('monitor:%s:%s' % (module, id)) for id in l]
#
# # filter on heart beeps < 12h
# return [h for h in l_hb if
# (datetime.datetime.now() - datetime.datetime(**h['time'])) < datetime.timedelta(hours=12)]
# except:
# return []
#
# Path: automlk/context.py
# def get_config():
# """
#
# retrieves configuration parameters
# :return: config dict
# """
# if os.path.exists('../config.json'):
# with open('../config.json', 'r') as f:
# config = eval("".join(f.readlines()))
# # upward compatibility
# if 'bootstrap' not in config.keys():
# config['bootstrap'] = ''
# if 'graph_theme' not in config.keys():
# config['graph_theme'] = 'dark'
# if 'doc_theme' not in config.keys():
# config['doc_theme'] = 'default'
# return config
# raise EnvironmentError('configuration file %s not found' % '../config.json')
#
# def set_config(data, theme, bootstrap, graph_theme, store, store_url):
# """
# set config data
#
# :param data: path to data storage
# :param theme: theme for user interface
# :param bootstrap: specific url for a bootstrap
# :param graph_theme: style for graphs (dark / white)
# :param store: store mode (redis / file)
# :param store_url: url if redis mode
# :return:
# """
# # check data
# if not os.path.exists(data):
# raise EnvironmentError('data folder %s do not exist' % data)
#
# if store == 'redis':
# # check connection to redis
# try:
# import redis
# rds = redis.Redis(host=store_url)
# rds.exists('test')
# except:
# raise EnvironmentError('could not connect to redis')
# set_use_redis(True)
# else:
# store_folder = data + '/store'
# if not os.path.exists(store_folder):
# os.makedirs(store_folder)
# set_use_redis(False)
#
# # then save config
# config = {'data': data, 'theme': theme, 'bootstrap': bootstrap, 'graph_theme': graph_theme, 'store': store,
# 'store_url': store_url}
# with open('../config.json', 'w') as f:
# f.write(json.dumps(config) + '\n')
. Output only the next line. | flash(str(e)) |
Given the code snippet: <|code_start|>
@app.route('/monitor', methods=['GET'])
def monitor():
# monitor workers
return render_template('monitor.html', controller=get_heart_beeps('controller'),
grapher=get_heart_beeps('grapher'), worker_text=get_heart_beeps('worker_text'),
workers=get_heart_beeps('worker'), config=get_config())
@app.route('/config', methods=['GET', 'POST'])
def config():
# view/edit configuration
form = ConfigForm()
if request.method == 'POST':
if form.validate():
try:
set_config(data=form.data.data,
theme=form.theme.data,
bootstrap=form.bootstrap.data,
graph_theme=form.graph_theme.data,
store=form.store.data,
<|code_end|>
, generate the next line using the imports in this file:
from app import app
from flask import render_template, request, flash
from .form import *
from automlk.monitor import get_heart_beeps
from automlk.context import get_config, set_config
and context (functions, classes, or occasionally code) from other files:
# Path: automlk/monitor.py
# def get_heart_beeps(module):
# """
# returns last heart beeps of the controller and workers
#
# :param module: controller / worker
# :return: list of status or empty list (eg. if environment not set)
# """
# # returns values of heart beeps
# try:
# # get list and values
# l = smembers_key_store('monitor:%s' % module)
# l_hb = [get_key_store('monitor:%s:%s' % (module, id)) for id in l]
#
# # filter on heart beeps < 12h
# return [h for h in l_hb if
# (datetime.datetime.now() - datetime.datetime(**h['time'])) < datetime.timedelta(hours=12)]
# except:
# return []
#
# Path: automlk/context.py
# def get_config():
# """
#
# retrieves configuration parameters
# :return: config dict
# """
# if os.path.exists('../config.json'):
# with open('../config.json', 'r') as f:
# config = eval("".join(f.readlines()))
# # upward compatibility
# if 'bootstrap' not in config.keys():
# config['bootstrap'] = ''
# if 'graph_theme' not in config.keys():
# config['graph_theme'] = 'dark'
# if 'doc_theme' not in config.keys():
# config['doc_theme'] = 'default'
# return config
# raise EnvironmentError('configuration file %s not found' % '../config.json')
#
# def set_config(data, theme, bootstrap, graph_theme, store, store_url):
# """
# set config data
#
# :param data: path to data storage
# :param theme: theme for user interface
# :param bootstrap: specific url for a bootstrap
# :param graph_theme: style for graphs (dark / white)
# :param store: store mode (redis / file)
# :param store_url: url if redis mode
# :return:
# """
# # check data
# if not os.path.exists(data):
# raise EnvironmentError('data folder %s do not exist' % data)
#
# if store == 'redis':
# # check connection to redis
# try:
# import redis
# rds = redis.Redis(host=store_url)
# rds.exists('test')
# except:
# raise EnvironmentError('could not connect to redis')
# set_use_redis(True)
# else:
# store_folder = data + '/store'
# if not os.path.exists(store_folder):
# os.makedirs(store_folder)
# set_use_redis(False)
#
# # then save config
# config = {'data': data, 'theme': theme, 'bootstrap': bootstrap, 'graph_theme': graph_theme, 'store': store,
# 'store_url': store_url}
# with open('../config.json', 'w') as f:
# f.write(json.dumps(config) + '\n')
. Output only the next line. | store_url=form.store_url.data) |
Given the code snippet: <|code_start|>
@app.route('/monitor', methods=['GET'])
def monitor():
# monitor workers
return render_template('monitor.html', controller=get_heart_beeps('controller'),
grapher=get_heart_beeps('grapher'), worker_text=get_heart_beeps('worker_text'),
workers=get_heart_beeps('worker'), config=get_config())
@app.route('/config', methods=['GET', 'POST'])
def config():
# view/edit configuration
form = ConfigForm()
if request.method == 'POST':
if form.validate():
try:
set_config(data=form.data.data,
theme=form.theme.data,
bootstrap=form.bootstrap.data,
graph_theme=form.graph_theme.data,
store=form.store.data,
store_url=form.store_url.data)
except Exception as e:
flash(str(e))
else:
config = get_config()
# copy data to form
form.data.data = config['data']
form.theme.data = config['theme']
<|code_end|>
, generate the next line using the imports in this file:
from app import app
from flask import render_template, request, flash
from .form import *
from automlk.monitor import get_heart_beeps
from automlk.context import get_config, set_config
and context (functions, classes, or occasionally code) from other files:
# Path: automlk/monitor.py
# def get_heart_beeps(module):
# """
# returns last heart beeps of the controller and workers
#
# :param module: controller / worker
# :return: list of status or empty list (eg. if environment not set)
# """
# # returns values of heart beeps
# try:
# # get list and values
# l = smembers_key_store('monitor:%s' % module)
# l_hb = [get_key_store('monitor:%s:%s' % (module, id)) for id in l]
#
# # filter on heart beeps < 12h
# return [h for h in l_hb if
# (datetime.datetime.now() - datetime.datetime(**h['time'])) < datetime.timedelta(hours=12)]
# except:
# return []
#
# Path: automlk/context.py
# def get_config():
# """
#
# retrieves configuration parameters
# :return: config dict
# """
# if os.path.exists('../config.json'):
# with open('../config.json', 'r') as f:
# config = eval("".join(f.readlines()))
# # upward compatibility
# if 'bootstrap' not in config.keys():
# config['bootstrap'] = ''
# if 'graph_theme' not in config.keys():
# config['graph_theme'] = 'dark'
# if 'doc_theme' not in config.keys():
# config['doc_theme'] = 'default'
# return config
# raise EnvironmentError('configuration file %s not found' % '../config.json')
#
# def set_config(data, theme, bootstrap, graph_theme, store, store_url):
# """
# set config data
#
# :param data: path to data storage
# :param theme: theme for user interface
# :param bootstrap: specific url for a bootstrap
# :param graph_theme: style for graphs (dark / white)
# :param store: store mode (redis / file)
# :param store_url: url if redis mode
# :return:
# """
# # check data
# if not os.path.exists(data):
# raise EnvironmentError('data folder %s do not exist' % data)
#
# if store == 'redis':
# # check connection to redis
# try:
# import redis
# rds = redis.Redis(host=store_url)
# rds.exists('test')
# except:
# raise EnvironmentError('could not connect to redis')
# set_use_redis(True)
# else:
# store_folder = data + '/store'
# if not os.path.exists(store_folder):
# os.makedirs(store_folder)
# set_use_redis(False)
#
# # then save config
# config = {'data': data, 'theme': theme, 'bootstrap': bootstrap, 'graph_theme': graph_theme, 'store': store,
# 'store_url': store_url}
# with open('../config.json', 'w') as f:
# f.write(json.dumps(config) + '\n')
. Output only the next line. | form.bootstrap.data = config['bootstrap'] |
Given snippet: <|code_start|> return error_redirect
if type(about) != list or type(category) != list or type(detail) != list:
flash('Invalid arguments..', 'danger')
return error_redirect
if len(about) != len(category) or len(about) != len(detail):
flash('Invalid arguments...', 'danger')
return error_redirect
query = zip(about, category, detail)
media_list = []
trope_filter = None
for about, category, detail in query:
if about == 'info':
if category == 'media':
media_list.append(detail)
elif about == 'trope':
try:
trope = session.query(Trope).get(detail)
except NoResultFound:
return error_redirect
if trope_filter is None:
trope_filter = Work.tropes.any(Trope.id == trope.id)
else:
trope_filter = trope_filter & \
Work.tropes.any(Trope.id == trope.id)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from flask import Blueprint, flash, redirect, render_template, request, url_for
from sqlalchemy.orm.exc import NoResultFound
from ..sqltypes import HashableLocale as Locale
from ..work import Trope, Work
from .db import session
and context:
# Path: cliche/sqltypes.py
# class HashableLocale(Locale):
# """Hashable Locale"""
#
# def __hash__(self):
# return hash('{}_{}'.format(self.language, self.territory))
#
# Path: cliche/work.py
# class Trope(Base): # FIXME: Temporary, It not extend Nameable.
# """Tropes"""
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, primary_key=True)
#
# #: (:class:`str`) Trope name
# name = Column(String, nullable=False)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship('WorkTrope',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Work`.
# works = relationship(lambda: Work, secondary='work_tropes',
# collection_class=set)
#
# __tablename__ = 'tropes'
# __repr_columns__ = id, name
#
# class Work(Nameable):
# """Creative work(s) that could be a single work like a film, or
# a series of works such as a combic book series and a television series.
# """
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, ForeignKey(Nameable.id), primary_key=True)
#
# #: (:class:`str`) Work media type.
# media_type = Column(String, nullable=False)
#
# #: (:class:`datetime.date`) The publication date.
# published_at = Column(Date)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkGenre`\ s that the work has.
# work_genres = relationship('WorkGenre',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Genre`\ s that the work falls into.
# genres = relationship(Genre,
# secondary='work_genres',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Credit`\ s that the work has.
# credits = relationship(Credit,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkFranchise`\ s that the work has.
# work_franchises = relationship('WorkFranchise',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Franchise`\ s that the work belongs to.
# franchises = relationship(Franchise,
# secondary='work_franchises',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Character`\ s that appeared in the work.
# characters = relationship(Character,
# secondary='work_characters',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship(lambda: WorkTrope,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Trope`.
# tropes = relationship(Trope, secondary='work_tropes',
# collection_class=set)
#
# #: (:class:`datetime.datetime`) The date and time on which
# #: the record was created.
# created_at = Column(DateTime(timezone=True),
# nullable=False,
# default=now(),
# index=True)
#
# __tablename__ = 'works'
# __repr_columns__ = [id]
# __mapper_args__ = {
# 'polymorphic_identity': 'works',
# }
#
# Path: cliche/web/db.py
# def get_database_engine():
# def get_database_engine_options():
# def get_session():
# def close_session(exception=None):
# def setup_session(app):
which might include code, classes, or functions. Output only the next line. | if not media_list and trope_filter is None: |
Predict the next line for this snippet: <|code_start|>
adv_search_bp = Blueprint('adv_search', __name__)
@adv_search_bp.route('/', methods=['POST'])
def result():
about = request.form.getlist('about[]', None)
category = request.form.getlist('category[]', None)
detail = request.form.getlist('detail[]', None)
error_redirect = redirect(url_for('index'))
if about is None or category is None or detail is None:
flash('Invalid arguments.', 'danger')
return error_redirect
if type(about) != list or type(category) != list or type(detail) != list:
flash('Invalid arguments..', 'danger')
return error_redirect
if len(about) != len(category) or len(about) != len(detail):
flash('Invalid arguments...', 'danger')
return error_redirect
query = zip(about, category, detail)
media_list = []
trope_filter = None
for about, category, detail in query:
if about == 'info':
<|code_end|>
with the help of current file imports:
from flask import Blueprint, flash, redirect, render_template, request, url_for
from sqlalchemy.orm.exc import NoResultFound
from ..sqltypes import HashableLocale as Locale
from ..work import Trope, Work
from .db import session
and context from other files:
# Path: cliche/sqltypes.py
# class HashableLocale(Locale):
# """Hashable Locale"""
#
# def __hash__(self):
# return hash('{}_{}'.format(self.language, self.territory))
#
# Path: cliche/work.py
# class Trope(Base): # FIXME: Temporary, It not extend Nameable.
# """Tropes"""
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, primary_key=True)
#
# #: (:class:`str`) Trope name
# name = Column(String, nullable=False)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship('WorkTrope',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Work`.
# works = relationship(lambda: Work, secondary='work_tropes',
# collection_class=set)
#
# __tablename__ = 'tropes'
# __repr_columns__ = id, name
#
# class Work(Nameable):
# """Creative work(s) that could be a single work like a film, or
# a series of works such as a combic book series and a television series.
# """
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, ForeignKey(Nameable.id), primary_key=True)
#
# #: (:class:`str`) Work media type.
# media_type = Column(String, nullable=False)
#
# #: (:class:`datetime.date`) The publication date.
# published_at = Column(Date)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkGenre`\ s that the work has.
# work_genres = relationship('WorkGenre',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Genre`\ s that the work falls into.
# genres = relationship(Genre,
# secondary='work_genres',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Credit`\ s that the work has.
# credits = relationship(Credit,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkFranchise`\ s that the work has.
# work_franchises = relationship('WorkFranchise',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Franchise`\ s that the work belongs to.
# franchises = relationship(Franchise,
# secondary='work_franchises',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Character`\ s that appeared in the work.
# characters = relationship(Character,
# secondary='work_characters',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship(lambda: WorkTrope,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Trope`.
# tropes = relationship(Trope, secondary='work_tropes',
# collection_class=set)
#
# #: (:class:`datetime.datetime`) The date and time on which
# #: the record was created.
# created_at = Column(DateTime(timezone=True),
# nullable=False,
# default=now(),
# index=True)
#
# __tablename__ = 'works'
# __repr_columns__ = [id]
# __mapper_args__ = {
# 'polymorphic_identity': 'works',
# }
#
# Path: cliche/web/db.py
# def get_database_engine():
# def get_database_engine_options():
# def get_session():
# def close_session(exception=None):
# def setup_session(app):
, which may contain function names, class names, or code. Output only the next line. | if category == 'media': |
Given the following code snippet before the placeholder: <|code_start|> flash('Invalid arguments...', 'danger')
return error_redirect
query = zip(about, category, detail)
media_list = []
trope_filter = None
for about, category, detail in query:
if about == 'info':
if category == 'media':
media_list.append(detail)
elif about == 'trope':
try:
trope = session.query(Trope).get(detail)
except NoResultFound:
return error_redirect
if trope_filter is None:
trope_filter = Work.tropes.any(Trope.id == trope.id)
else:
trope_filter = trope_filter & \
Work.tropes.any(Trope.id == trope.id)
if not media_list and trope_filter is None:
flash('Invalid arguments....', 'danger')
return error_redirect
result = session.query(
Work,
Work.canonical_name(Locale.parse('en_US')).label('canonical_name')
<|code_end|>
, predict the next line using imports from the current file:
from flask import Blueprint, flash, redirect, render_template, request, url_for
from sqlalchemy.orm.exc import NoResultFound
from ..sqltypes import HashableLocale as Locale
from ..work import Trope, Work
from .db import session
and context including class names, function names, and sometimes code from other files:
# Path: cliche/sqltypes.py
# class HashableLocale(Locale):
# """Hashable Locale"""
#
# def __hash__(self):
# return hash('{}_{}'.format(self.language, self.territory))
#
# Path: cliche/work.py
# class Trope(Base): # FIXME: Temporary, It not extend Nameable.
# """Tropes"""
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, primary_key=True)
#
# #: (:class:`str`) Trope name
# name = Column(String, nullable=False)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship('WorkTrope',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Work`.
# works = relationship(lambda: Work, secondary='work_tropes',
# collection_class=set)
#
# __tablename__ = 'tropes'
# __repr_columns__ = id, name
#
# class Work(Nameable):
# """Creative work(s) that could be a single work like a film, or
# a series of works such as a combic book series and a television series.
# """
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, ForeignKey(Nameable.id), primary_key=True)
#
# #: (:class:`str`) Work media type.
# media_type = Column(String, nullable=False)
#
# #: (:class:`datetime.date`) The publication date.
# published_at = Column(Date)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkGenre`\ s that the work has.
# work_genres = relationship('WorkGenre',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Genre`\ s that the work falls into.
# genres = relationship(Genre,
# secondary='work_genres',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Credit`\ s that the work has.
# credits = relationship(Credit,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkFranchise`\ s that the work has.
# work_franchises = relationship('WorkFranchise',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Franchise`\ s that the work belongs to.
# franchises = relationship(Franchise,
# secondary='work_franchises',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Character`\ s that appeared in the work.
# characters = relationship(Character,
# secondary='work_characters',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship(lambda: WorkTrope,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Trope`.
# tropes = relationship(Trope, secondary='work_tropes',
# collection_class=set)
#
# #: (:class:`datetime.datetime`) The date and time on which
# #: the record was created.
# created_at = Column(DateTime(timezone=True),
# nullable=False,
# default=now(),
# index=True)
#
# __tablename__ = 'works'
# __repr_columns__ = [id]
# __mapper_args__ = {
# 'polymorphic_identity': 'works',
# }
#
# Path: cliche/web/db.py
# def get_database_engine():
# def get_database_engine_options():
# def get_session():
# def close_session(exception=None):
# def setup_session(app):
. Output only the next line. | ) |
Continue the code snippet: <|code_start|>
adv_search_bp = Blueprint('adv_search', __name__)
@adv_search_bp.route('/', methods=['POST'])
def result():
about = request.form.getlist('about[]', None)
category = request.form.getlist('category[]', None)
detail = request.form.getlist('detail[]', None)
error_redirect = redirect(url_for('index'))
if about is None or category is None or detail is None:
flash('Invalid arguments.', 'danger')
return error_redirect
if type(about) != list or type(category) != list or type(detail) != list:
flash('Invalid arguments..', 'danger')
return error_redirect
if len(about) != len(category) or len(about) != len(detail):
flash('Invalid arguments...', 'danger')
return error_redirect
query = zip(about, category, detail)
media_list = []
trope_filter = None
for about, category, detail in query:
<|code_end|>
. Use current file imports:
from flask import Blueprint, flash, redirect, render_template, request, url_for
from sqlalchemy.orm.exc import NoResultFound
from ..sqltypes import HashableLocale as Locale
from ..work import Trope, Work
from .db import session
and context (classes, functions, or code) from other files:
# Path: cliche/sqltypes.py
# class HashableLocale(Locale):
# """Hashable Locale"""
#
# def __hash__(self):
# return hash('{}_{}'.format(self.language, self.territory))
#
# Path: cliche/work.py
# class Trope(Base): # FIXME: Temporary, It not extend Nameable.
# """Tropes"""
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, primary_key=True)
#
# #: (:class:`str`) Trope name
# name = Column(String, nullable=False)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship('WorkTrope',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Work`.
# works = relationship(lambda: Work, secondary='work_tropes',
# collection_class=set)
#
# __tablename__ = 'tropes'
# __repr_columns__ = id, name
#
# class Work(Nameable):
# """Creative work(s) that could be a single work like a film, or
# a series of works such as a combic book series and a television series.
# """
#
# #: (:class:`int`) The primary key integer.
# id = Column(Integer, ForeignKey(Nameable.id), primary_key=True)
#
# #: (:class:`str`) Work media type.
# media_type = Column(String, nullable=False)
#
# #: (:class:`datetime.date`) The publication date.
# published_at = Column(Date)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkGenre`\ s that the work has.
# work_genres = relationship('WorkGenre',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Genre`\ s that the work falls into.
# genres = relationship(Genre,
# secondary='work_genres',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Credit`\ s that the work has.
# credits = relationship(Credit,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkFranchise`\ s that the work has.
# work_franchises = relationship('WorkFranchise',
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Franchise`\ s that the work belongs to.
# franchises = relationship(Franchise,
# secondary='work_franchises',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Character`\ s that appeared in the work.
# characters = relationship(Character,
# secondary='work_characters',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`WorkTrope`.
# work_tropes = relationship(lambda: WorkTrope,
# cascade='delete, merge, save-update',
# collection_class=set)
#
# #: (:class:`collections.abc.MutableSet`) The set of
# #: :class:`Trope`.
# tropes = relationship(Trope, secondary='work_tropes',
# collection_class=set)
#
# #: (:class:`datetime.datetime`) The date and time on which
# #: the record was created.
# created_at = Column(DateTime(timezone=True),
# nullable=False,
# default=now(),
# index=True)
#
# __tablename__ = 'works'
# __repr_columns__ = [id]
# __mapper_args__ = {
# 'polymorphic_identity': 'works',
# }
#
# Path: cliche/web/db.py
# def get_database_engine():
# def get_database_engine_options():
# def get_session():
# def close_session(exception=None):
# def setup_session(app):
. Output only the next line. | if about == 'info': |
Predict the next line for this snippet: <|code_start|> reference_count=15),
Name(nameable=one,
name='first',
locale=enus,
reference_count=10)
})
two.names.update({
Name(nameable=two,
name='이',
locale=kokr,
reference_count=25),
Name(nameable=two,
name='둘',
locale=kokr,
reference_count=25),
Name(nameable=two,
name='two',
locale=enus,
reference_count=5),
Name(nameable=two,
name='second',
locale=enus,
reference_count=5)
})
mystr.names.update({
Name(nameable=mystr,
name='my string',
locale=enus,
reference_count=3),
Name(nameable=mystr,
<|code_end|>
with the help of current file imports:
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from cliche.name import Name, Nameable
from cliche.sqltypes import HashableLocale as Locale
and context from other files:
# Path: cliche/sqltypes.py
# class HashableLocale(Locale):
# """Hashable Locale"""
#
# def __hash__(self):
# return hash('{}_{}'.format(self.language, self.territory))
, which may contain function names, class names, or code. Output only the next line. | name='나의 문자열', |
Given the code snippet: <|code_start|> engineer_name = Column(String(30))
__mapper_args__ = {
'polymorphic_identity': 'engineer',
}
class Manager(Employee):
__tablename__ = 'manager'
id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
manager_name = Column(String(30))
__mapper_args__ = {
'polymorphic_identity': 'manager',
}
prevent_discriminator_from_changing(Engineer.type)
prevent_discriminator_from_changing(Manager.type)
prevent_instantiating(Employee)
def test_prevent_instantiating():
try:
Employee(name='kim')
assert False
except Exception:
pass
<|code_end|>
, generate the next line using the imports in this file:
import enum
import uuid
from babel import Locale
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from cliche.sqltypes import (EnumType, LocaleType,
prevent_discriminator_from_changing,
prevent_instantiating, UuidType)
from cliche.orm import Base
and context (functions, classes, or occasionally code) from other files:
# Path: cliche/sqltypes.py
# class EnumType(TypeDecorator, SchemaType):
# """Custom enum type to be used as :class:`enum.Enum`in Python standard
# library. It inherits :class:`sqlalchemy.types.SchemaType` since it
# requires schema-level DDL. PostgreSQL ENUM type defined in an Alembic
# script must be explicitly created/dropped.
# """
#
# impl = Enum
#
# def __init__(self, enum_class: enum.Enum, **kw):
# if not issubclass(enum_class, enum.Enum):
# raise TypeError('expected enum.Enum subtype')
# super().__init__(*(m.name for m in enum_class), **kw)
# self._enum_class = enum_class
#
# def process_bind_param(self, value, dialect):
# return value.name
#
# def process_result_value(self, value, dialect):
# return self._enum_class[value]
#
# def _set_parent(self, column):
# self.impl._set_parent(column)
#
# @property
# def python_type(self):
# return self._enum_class
#
# class LocaleType(TypeDecorator):
# """Custom locale type to be used as :class:`babel.Locale`."""
#
# impl = String
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if not issubclass(value.__class__, Locale):
# raise TypeError('expected babel.Locale instance')
# return '{}_{}'.format(value.language, value.territory)
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None:
# return value
# return HashableLocale.parse(value)
# return process_result_value
#
# class comparator_factory(TypeDecorator.Comparator):
# @property
# def language(self):
# return func.substr(self.expr, 1, 2)
#
# @property
# def territory(self):
# return func.substr(self.expr, 4, 2)
#
# def prevent_discriminator_from_changing(col):
# def set_discriminator(target, value, oldvalue, initiator):
# oldvalue_is_none = (
# oldvalue.__class__ != _symbol or oldvalue.name != 'NO_VALUE'
# )
# value_is_wrong = (
# value != target.__mapper_args__['polymorphic_identity']
# )
# if oldvalue_is_none or value_is_wrong:
# raise AttributeError('discriminator column cannot be changed')
#
# event.listen(col, 'set', set_discriminator)
#
# def prevent_instantiating(cls):
# def init_non_instantiable_cls(target, args, kwargs):
# raise Exception('{} cannot be instantiated'.format(target.__class__))
#
# event.listen(cls, 'init', init_non_instantiable_cls)
#
# class UuidType(TypeDecorator):
# """Custom UUID type to be used as :class:`uuid.UUID`."""
#
# impl = CHAR
#
# def load_dialect_impl(self, dialect):
# if dialect.name == 'postgresql':
# return dialect.type_descriptor(UUID())
# else:
# return dialect.type_descriptor(CHAR(32))
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if value is None:
# return value
# if not isinstance(value, uuid.UUID):
# raise TypeError('expected uuid.UUID instance')
#
# if dialect.name == 'postgresql':
# return str(value)
# else:
# return '{0:032x}'.format(int(value))
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None or isinstance(value, uuid.UUID):
# return value
# else:
# return uuid.UUID(value)
# return process_result_value
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
. Output only the next line. | def test_prevent_discriminator_from_changing(fx_session): |
Predict the next line for this snippet: <|code_start|> manager_name = Column(String(30))
__mapper_args__ = {
'polymorphic_identity': 'manager',
}
prevent_discriminator_from_changing(Engineer.type)
prevent_discriminator_from_changing(Manager.type)
prevent_instantiating(Employee)
def test_prevent_instantiating():
try:
Employee(name='kim')
assert False
except Exception:
pass
def test_prevent_discriminator_from_changing(fx_session):
try:
Engineer(name='kim', type='changing_manually')
assert False
except AttributeError:
pass
eng = Engineer(name='kim', engineer_name='sw')
try:
eng.type = 'changing_manually'
<|code_end|>
with the help of current file imports:
import enum
import uuid
from babel import Locale
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from cliche.sqltypes import (EnumType, LocaleType,
prevent_discriminator_from_changing,
prevent_instantiating, UuidType)
from cliche.orm import Base
and context from other files:
# Path: cliche/sqltypes.py
# class EnumType(TypeDecorator, SchemaType):
# """Custom enum type to be used as :class:`enum.Enum`in Python standard
# library. It inherits :class:`sqlalchemy.types.SchemaType` since it
# requires schema-level DDL. PostgreSQL ENUM type defined in an Alembic
# script must be explicitly created/dropped.
# """
#
# impl = Enum
#
# def __init__(self, enum_class: enum.Enum, **kw):
# if not issubclass(enum_class, enum.Enum):
# raise TypeError('expected enum.Enum subtype')
# super().__init__(*(m.name for m in enum_class), **kw)
# self._enum_class = enum_class
#
# def process_bind_param(self, value, dialect):
# return value.name
#
# def process_result_value(self, value, dialect):
# return self._enum_class[value]
#
# def _set_parent(self, column):
# self.impl._set_parent(column)
#
# @property
# def python_type(self):
# return self._enum_class
#
# class LocaleType(TypeDecorator):
# """Custom locale type to be used as :class:`babel.Locale`."""
#
# impl = String
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if not issubclass(value.__class__, Locale):
# raise TypeError('expected babel.Locale instance')
# return '{}_{}'.format(value.language, value.territory)
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None:
# return value
# return HashableLocale.parse(value)
# return process_result_value
#
# class comparator_factory(TypeDecorator.Comparator):
# @property
# def language(self):
# return func.substr(self.expr, 1, 2)
#
# @property
# def territory(self):
# return func.substr(self.expr, 4, 2)
#
# def prevent_discriminator_from_changing(col):
# def set_discriminator(target, value, oldvalue, initiator):
# oldvalue_is_none = (
# oldvalue.__class__ != _symbol or oldvalue.name != 'NO_VALUE'
# )
# value_is_wrong = (
# value != target.__mapper_args__['polymorphic_identity']
# )
# if oldvalue_is_none or value_is_wrong:
# raise AttributeError('discriminator column cannot be changed')
#
# event.listen(col, 'set', set_discriminator)
#
# def prevent_instantiating(cls):
# def init_non_instantiable_cls(target, args, kwargs):
# raise Exception('{} cannot be instantiated'.format(target.__class__))
#
# event.listen(cls, 'init', init_non_instantiable_cls)
#
# class UuidType(TypeDecorator):
# """Custom UUID type to be used as :class:`uuid.UUID`."""
#
# impl = CHAR
#
# def load_dialect_impl(self, dialect):
# if dialect.name == 'postgresql':
# return dialect.type_descriptor(UUID())
# else:
# return dialect.type_descriptor(CHAR(32))
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if value is None:
# return value
# if not isinstance(value, uuid.UUID):
# raise TypeError('expected uuid.UUID instance')
#
# if dialect.name == 'postgresql':
# return str(value)
# else:
# return '{0:032x}'.format(int(value))
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None or isinstance(value, uuid.UUID):
# return value
# else:
# return uuid.UUID(value)
# return process_result_value
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
, which may contain function names, class names, or code. Output only the next line. | assert False |
Given the following code snippet before the placeholder: <|code_start|>
class Color(enum.Enum):
red = 1
green = 2
blue = 3
class ColorTable(Base):
__tablename__ = 'color_table'
id = Column(Integer, primary_key=True)
color = Column(EnumType(Color, name='color'))
class LocaleTable(Base):
__tablename__ = 'locale_table'
id = Column(Integer, primary_key=True)
locale = Column(LocaleType())
def test_enum_type(fx_session):
red_obj = ColorTable(color=Color.red)
green_obj = ColorTable(color=Color.green)
blue_obj = ColorTable(color=Color.blue)
fx_session.add(red_obj)
fx_session.add(green_obj)
fx_session.add(blue_obj)
fx_session.flush()
result_obj = fx_session.query(ColorTable) \
<|code_end|>
, predict the next line using imports from the current file:
import enum
import uuid
from babel import Locale
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from cliche.sqltypes import (EnumType, LocaleType,
prevent_discriminator_from_changing,
prevent_instantiating, UuidType)
from cliche.orm import Base
and context including class names, function names, and sometimes code from other files:
# Path: cliche/sqltypes.py
# class EnumType(TypeDecorator, SchemaType):
# """Custom enum type to be used as :class:`enum.Enum`in Python standard
# library. It inherits :class:`sqlalchemy.types.SchemaType` since it
# requires schema-level DDL. PostgreSQL ENUM type defined in an Alembic
# script must be explicitly created/dropped.
# """
#
# impl = Enum
#
# def __init__(self, enum_class: enum.Enum, **kw):
# if not issubclass(enum_class, enum.Enum):
# raise TypeError('expected enum.Enum subtype')
# super().__init__(*(m.name for m in enum_class), **kw)
# self._enum_class = enum_class
#
# def process_bind_param(self, value, dialect):
# return value.name
#
# def process_result_value(self, value, dialect):
# return self._enum_class[value]
#
# def _set_parent(self, column):
# self.impl._set_parent(column)
#
# @property
# def python_type(self):
# return self._enum_class
#
# class LocaleType(TypeDecorator):
# """Custom locale type to be used as :class:`babel.Locale`."""
#
# impl = String
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if not issubclass(value.__class__, Locale):
# raise TypeError('expected babel.Locale instance')
# return '{}_{}'.format(value.language, value.territory)
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None:
# return value
# return HashableLocale.parse(value)
# return process_result_value
#
# class comparator_factory(TypeDecorator.Comparator):
# @property
# def language(self):
# return func.substr(self.expr, 1, 2)
#
# @property
# def territory(self):
# return func.substr(self.expr, 4, 2)
#
# def prevent_discriminator_from_changing(col):
# def set_discriminator(target, value, oldvalue, initiator):
# oldvalue_is_none = (
# oldvalue.__class__ != _symbol or oldvalue.name != 'NO_VALUE'
# )
# value_is_wrong = (
# value != target.__mapper_args__['polymorphic_identity']
# )
# if oldvalue_is_none or value_is_wrong:
# raise AttributeError('discriminator column cannot be changed')
#
# event.listen(col, 'set', set_discriminator)
#
# def prevent_instantiating(cls):
# def init_non_instantiable_cls(target, args, kwargs):
# raise Exception('{} cannot be instantiated'.format(target.__class__))
#
# event.listen(cls, 'init', init_non_instantiable_cls)
#
# class UuidType(TypeDecorator):
# """Custom UUID type to be used as :class:`uuid.UUID`."""
#
# impl = CHAR
#
# def load_dialect_impl(self, dialect):
# if dialect.name == 'postgresql':
# return dialect.type_descriptor(UUID())
# else:
# return dialect.type_descriptor(CHAR(32))
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if value is None:
# return value
# if not isinstance(value, uuid.UUID):
# raise TypeError('expected uuid.UUID instance')
#
# if dialect.name == 'postgresql':
# return str(value)
# else:
# return '{0:032x}'.format(int(value))
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None or isinstance(value, uuid.UUID):
# return value
# else:
# return uuid.UUID(value)
# return process_result_value
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
. Output only the next line. | .filter(ColorTable.color == Color.green) \ |
Using the snippet: <|code_start|> __mapper_args__ = {
'polymorphic_identity': 'manager',
}
prevent_discriminator_from_changing(Engineer.type)
prevent_discriminator_from_changing(Manager.type)
prevent_instantiating(Employee)
def test_prevent_instantiating():
try:
Employee(name='kim')
assert False
except Exception:
pass
def test_prevent_discriminator_from_changing(fx_session):
try:
Engineer(name='kim', type='changing_manually')
assert False
except AttributeError:
pass
eng = Engineer(name='kim', engineer_name='sw')
try:
eng.type = 'changing_manually'
assert False
except AttributeError:
<|code_end|>
, determine the next line of code. You have imports:
import enum
import uuid
from babel import Locale
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from cliche.sqltypes import (EnumType, LocaleType,
prevent_discriminator_from_changing,
prevent_instantiating, UuidType)
from cliche.orm import Base
and context (class names, function names, or code) available:
# Path: cliche/sqltypes.py
# class EnumType(TypeDecorator, SchemaType):
# """Custom enum type to be used as :class:`enum.Enum`in Python standard
# library. It inherits :class:`sqlalchemy.types.SchemaType` since it
# requires schema-level DDL. PostgreSQL ENUM type defined in an Alembic
# script must be explicitly created/dropped.
# """
#
# impl = Enum
#
# def __init__(self, enum_class: enum.Enum, **kw):
# if not issubclass(enum_class, enum.Enum):
# raise TypeError('expected enum.Enum subtype')
# super().__init__(*(m.name for m in enum_class), **kw)
# self._enum_class = enum_class
#
# def process_bind_param(self, value, dialect):
# return value.name
#
# def process_result_value(self, value, dialect):
# return self._enum_class[value]
#
# def _set_parent(self, column):
# self.impl._set_parent(column)
#
# @property
# def python_type(self):
# return self._enum_class
#
# class LocaleType(TypeDecorator):
# """Custom locale type to be used as :class:`babel.Locale`."""
#
# impl = String
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if not issubclass(value.__class__, Locale):
# raise TypeError('expected babel.Locale instance')
# return '{}_{}'.format(value.language, value.territory)
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None:
# return value
# return HashableLocale.parse(value)
# return process_result_value
#
# class comparator_factory(TypeDecorator.Comparator):
# @property
# def language(self):
# return func.substr(self.expr, 1, 2)
#
# @property
# def territory(self):
# return func.substr(self.expr, 4, 2)
#
# def prevent_discriminator_from_changing(col):
# def set_discriminator(target, value, oldvalue, initiator):
# oldvalue_is_none = (
# oldvalue.__class__ != _symbol or oldvalue.name != 'NO_VALUE'
# )
# value_is_wrong = (
# value != target.__mapper_args__['polymorphic_identity']
# )
# if oldvalue_is_none or value_is_wrong:
# raise AttributeError('discriminator column cannot be changed')
#
# event.listen(col, 'set', set_discriminator)
#
# def prevent_instantiating(cls):
# def init_non_instantiable_cls(target, args, kwargs):
# raise Exception('{} cannot be instantiated'.format(target.__class__))
#
# event.listen(cls, 'init', init_non_instantiable_cls)
#
# class UuidType(TypeDecorator):
# """Custom UUID type to be used as :class:`uuid.UUID`."""
#
# impl = CHAR
#
# def load_dialect_impl(self, dialect):
# if dialect.name == 'postgresql':
# return dialect.type_descriptor(UUID())
# else:
# return dialect.type_descriptor(CHAR(32))
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if value is None:
# return value
# if not isinstance(value, uuid.UUID):
# raise TypeError('expected uuid.UUID instance')
#
# if dialect.name == 'postgresql':
# return str(value)
# else:
# return '{0:032x}'.format(int(value))
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None or isinstance(value, uuid.UUID):
# return value
# else:
# return uuid.UUID(value)
# return process_result_value
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
. Output only the next line. | pass |
Next line prediction: <|code_start|>
class Color(enum.Enum):
red = 1
green = 2
blue = 3
class ColorTable(Base):
__tablename__ = 'color_table'
id = Column(Integer, primary_key=True)
color = Column(EnumType(Color, name='color'))
class LocaleTable(Base):
__tablename__ = 'locale_table'
<|code_end|>
. Use current file imports:
(import enum
import uuid
from babel import Locale
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from cliche.sqltypes import (EnumType, LocaleType,
prevent_discriminator_from_changing,
prevent_instantiating, UuidType)
from cliche.orm import Base)
and context including class names, function names, or small code snippets from other files:
# Path: cliche/sqltypes.py
# class EnumType(TypeDecorator, SchemaType):
# """Custom enum type to be used as :class:`enum.Enum`in Python standard
# library. It inherits :class:`sqlalchemy.types.SchemaType` since it
# requires schema-level DDL. PostgreSQL ENUM type defined in an Alembic
# script must be explicitly created/dropped.
# """
#
# impl = Enum
#
# def __init__(self, enum_class: enum.Enum, **kw):
# if not issubclass(enum_class, enum.Enum):
# raise TypeError('expected enum.Enum subtype')
# super().__init__(*(m.name for m in enum_class), **kw)
# self._enum_class = enum_class
#
# def process_bind_param(self, value, dialect):
# return value.name
#
# def process_result_value(self, value, dialect):
# return self._enum_class[value]
#
# def _set_parent(self, column):
# self.impl._set_parent(column)
#
# @property
# def python_type(self):
# return self._enum_class
#
# class LocaleType(TypeDecorator):
# """Custom locale type to be used as :class:`babel.Locale`."""
#
# impl = String
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if not issubclass(value.__class__, Locale):
# raise TypeError('expected babel.Locale instance')
# return '{}_{}'.format(value.language, value.territory)
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None:
# return value
# return HashableLocale.parse(value)
# return process_result_value
#
# class comparator_factory(TypeDecorator.Comparator):
# @property
# def language(self):
# return func.substr(self.expr, 1, 2)
#
# @property
# def territory(self):
# return func.substr(self.expr, 4, 2)
#
# def prevent_discriminator_from_changing(col):
# def set_discriminator(target, value, oldvalue, initiator):
# oldvalue_is_none = (
# oldvalue.__class__ != _symbol or oldvalue.name != 'NO_VALUE'
# )
# value_is_wrong = (
# value != target.__mapper_args__['polymorphic_identity']
# )
# if oldvalue_is_none or value_is_wrong:
# raise AttributeError('discriminator column cannot be changed')
#
# event.listen(col, 'set', set_discriminator)
#
# def prevent_instantiating(cls):
# def init_non_instantiable_cls(target, args, kwargs):
# raise Exception('{} cannot be instantiated'.format(target.__class__))
#
# event.listen(cls, 'init', init_non_instantiable_cls)
#
# class UuidType(TypeDecorator):
# """Custom UUID type to be used as :class:`uuid.UUID`."""
#
# impl = CHAR
#
# def load_dialect_impl(self, dialect):
# if dialect.name == 'postgresql':
# return dialect.type_descriptor(UUID())
# else:
# return dialect.type_descriptor(CHAR(32))
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if value is None:
# return value
# if not isinstance(value, uuid.UUID):
# raise TypeError('expected uuid.UUID instance')
#
# if dialect.name == 'postgresql':
# return str(value)
# else:
# return '{0:032x}'.format(int(value))
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None or isinstance(value, uuid.UUID):
# return value
# else:
# return uuid.UUID(value)
# return process_result_value
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
. Output only the next line. | id = Column(Integer, primary_key=True) |
Here is a snippet: <|code_start|> result_obj = fx_session.query(ColorTable) \
.filter(ColorTable.color == Color.green) \
.one()
assert green_obj is result_obj
def test_locale_type(fx_session):
en_us = LocaleTable(locale=Locale.parse('en_US'))
de_de = LocaleTable(locale=Locale.parse('de_DE'))
ko_kr = LocaleTable(locale=Locale.parse('ko_KR'))
with fx_session.begin():
fx_session.add_all([en_us, de_de, ko_kr])
result = fx_session.query(LocaleTable) \
.filter(LocaleTable.locale == Locale.parse('de_DE')) \
.one()
assert result.locale == Locale.parse('de_DE')
class UuidTable(Base):
__tablename__ = 'uuid_table'
id = Column(UuidType, primary_key=True, default=uuid.uuid4)
uuid = Column(UuidType, default=uuid.uuid4)
def test_uuid_type(fx_session):
some_entity_1 = UuidTable()
some_entity_2 = UuidTable()
with fx_session.begin():
fx_session.add_all([some_entity_1, some_entity_2])
<|code_end|>
. Write the next line using the current file imports:
import enum
import uuid
from babel import Locale
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String
from cliche.sqltypes import (EnumType, LocaleType,
prevent_discriminator_from_changing,
prevent_instantiating, UuidType)
from cliche.orm import Base
and context from other files:
# Path: cliche/sqltypes.py
# class EnumType(TypeDecorator, SchemaType):
# """Custom enum type to be used as :class:`enum.Enum`in Python standard
# library. It inherits :class:`sqlalchemy.types.SchemaType` since it
# requires schema-level DDL. PostgreSQL ENUM type defined in an Alembic
# script must be explicitly created/dropped.
# """
#
# impl = Enum
#
# def __init__(self, enum_class: enum.Enum, **kw):
# if not issubclass(enum_class, enum.Enum):
# raise TypeError('expected enum.Enum subtype')
# super().__init__(*(m.name for m in enum_class), **kw)
# self._enum_class = enum_class
#
# def process_bind_param(self, value, dialect):
# return value.name
#
# def process_result_value(self, value, dialect):
# return self._enum_class[value]
#
# def _set_parent(self, column):
# self.impl._set_parent(column)
#
# @property
# def python_type(self):
# return self._enum_class
#
# class LocaleType(TypeDecorator):
# """Custom locale type to be used as :class:`babel.Locale`."""
#
# impl = String
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if not issubclass(value.__class__, Locale):
# raise TypeError('expected babel.Locale instance')
# return '{}_{}'.format(value.language, value.territory)
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None:
# return value
# return HashableLocale.parse(value)
# return process_result_value
#
# class comparator_factory(TypeDecorator.Comparator):
# @property
# def language(self):
# return func.substr(self.expr, 1, 2)
#
# @property
# def territory(self):
# return func.substr(self.expr, 4, 2)
#
# def prevent_discriminator_from_changing(col):
# def set_discriminator(target, value, oldvalue, initiator):
# oldvalue_is_none = (
# oldvalue.__class__ != _symbol or oldvalue.name != 'NO_VALUE'
# )
# value_is_wrong = (
# value != target.__mapper_args__['polymorphic_identity']
# )
# if oldvalue_is_none or value_is_wrong:
# raise AttributeError('discriminator column cannot be changed')
#
# event.listen(col, 'set', set_discriminator)
#
# def prevent_instantiating(cls):
# def init_non_instantiable_cls(target, args, kwargs):
# raise Exception('{} cannot be instantiated'.format(target.__class__))
#
# event.listen(cls, 'init', init_non_instantiable_cls)
#
# class UuidType(TypeDecorator):
# """Custom UUID type to be used as :class:`uuid.UUID`."""
#
# impl = CHAR
#
# def load_dialect_impl(self, dialect):
# if dialect.name == 'postgresql':
# return dialect.type_descriptor(UUID())
# else:
# return dialect.type_descriptor(CHAR(32))
#
# def bind_processor(self, dialect):
# def process_bind_param(value):
# if value is None:
# return value
# if not isinstance(value, uuid.UUID):
# raise TypeError('expected uuid.UUID instance')
#
# if dialect.name == 'postgresql':
# return str(value)
# else:
# return '{0:032x}'.format(int(value))
# return process_bind_param
#
# def result_processor(self, dialect, coltype):
# def process_result_value(value):
# if value is None or isinstance(value, uuid.UUID):
# return value
# else:
# return uuid.UUID(value)
# return process_result_value
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
, which may include functions, classes, or code. Output only the next line. | assert some_entity_1.id != some_entity_2.id |
Using the snippet: <|code_start|>
__all__ = 'assert_contain_text', 'get_url'
def assert_contain_text(text, expr, data):
def traverse(elements):
for element in elements:
if text in element.text_content():
return True
else:
<|code_end|>
, determine the next line of code. You have imports:
from cliche.web.app import app
from flask import url_for
from lxml.html import document_fromstring
and context (class names, function names, or code) available:
# Path: cliche/web/app.py
# def setup_sentry():
# def get_sentry() -> Sentry:
# def index():
# def check_login_status():
# def template_processor():
# def add_login_header(response):
. Output only the next line. | return False |
Based on the snippet: <|code_start|> fake_new_res = dict(
user_id='1234567890',
screen_name='cliche.io',
oauth_token='":LKJHGFDSA',
oauth_token_secret='}{POIUYTREWQ',
)
fake_new_vendors = [
Vendor('twitter', TwitterCredential, Version.oauth1,
FakeProvider(fake_new_res), ('screen_name', 'user_id'))
]
monkeypatch.setattr('cliche.web.social.oauth.vendors', fake_new_vendors)
rv = fx_flask_client.get(get_url('oauth.oauth_authorized',
vendor='twitter'))
assert rv.status_code == 302
created_new_user = fx_session.query(User). \
filter(
User.credentials.any(
TwitterCredential.identifier == fake_new_res['user_id']
)
).one()
fx_session.expire(created_new_user)
created_new_twitter_credential = fx_session.query(TwitterCredential). \
filter_by(user=created_new_user).one()
fx_session.expire(created_new_twitter_credential)
assert old_user == created_new_user
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
from cliche.credentials import TwitterCredential
from cliche.user import User
from cliche.web.social.oauth import Vendor, Version
from ...web_utils import assert_contain_text, get_url
and context (classes, functions, sometimes code) from other files:
# Path: cliche/credentials.py
# class TwitterCredential(Credential):
# """Information about Twitter User"""
#
# #: (:class:`int`) The primary key from :class:`Credential.id`.
# id = Column(Integer, ForeignKey('credential.id'), primary_key=True)
#
# #: (:class:`int`) Twitter user id
# identifier = Column(BigInteger, nullable=False, unique=True)
#
# #: (:class:`str`) The oauth token.
# token = Column(String)
#
# #: (:class:`str`) The oauth secret token.
# token_secret = Column(String)
#
# __tablename__ = 'twitter_credential'
# __mapper_args__ = {'polymorphic_identity': 'twitter'}
# __repr_columns__ = id, identifier
#
# Path: cliche/web/social/oauth.py
# class Version(enum.Enum):
# class OAuthVendorConverter(BaseConverter):
# def login(vendor):
# def oauth_authorized(vendor):
# def make_account(credential_table, name, user_id):
# def __init__(self, url_map):
# def to_python(self, value):
# def to_url(self, value):
#
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | assert old_twitter_credential == created_new_twitter_credential |
Given the following code snippet before the placeholder: <|code_start|> identifier=fake_old_res['user_id'],
token=fake_old_res['oauth_token'],
token_secret=fake_old_res['oauth_token_secret']
)
old_user = User(name=fake_old_res['screen_name'],
credentials={old_twitter_credential})
with fx_session.begin():
fx_session.add(old_user)
fake_new_res = dict(
user_id='1234567890',
screen_name='cliche.io',
oauth_token='":LKJHGFDSA',
oauth_token_secret='}{POIUYTREWQ',
)
fake_new_vendors = [
Vendor('twitter', TwitterCredential, Version.oauth1,
FakeProvider(fake_new_res), ('screen_name', 'user_id'))
]
monkeypatch.setattr('cliche.web.social.oauth.vendors', fake_new_vendors)
rv = fx_flask_client.get(get_url('oauth.oauth_authorized',
vendor='twitter'))
assert rv.status_code == 302
created_new_user = fx_session.query(User). \
filter(
User.credentials.any(
TwitterCredential.identifier == fake_new_res['user_id']
<|code_end|>
, predict the next line using imports from the current file:
import datetime
from cliche.credentials import TwitterCredential
from cliche.user import User
from cliche.web.social.oauth import Vendor, Version
from ...web_utils import assert_contain_text, get_url
and context including class names, function names, and sometimes code from other files:
# Path: cliche/credentials.py
# class TwitterCredential(Credential):
# """Information about Twitter User"""
#
# #: (:class:`int`) The primary key from :class:`Credential.id`.
# id = Column(Integer, ForeignKey('credential.id'), primary_key=True)
#
# #: (:class:`int`) Twitter user id
# identifier = Column(BigInteger, nullable=False, unique=True)
#
# #: (:class:`str`) The oauth token.
# token = Column(String)
#
# #: (:class:`str`) The oauth secret token.
# token_secret = Column(String)
#
# __tablename__ = 'twitter_credential'
# __mapper_args__ = {'polymorphic_identity': 'twitter'}
# __repr_columns__ = id, identifier
#
# Path: cliche/web/social/oauth.py
# class Version(enum.Enum):
# class OAuthVendorConverter(BaseConverter):
# def login(vendor):
# def oauth_authorized(vendor):
# def make_account(credential_table, name, user_id):
# def __init__(self, url_map):
# def to_python(self, value):
# def to_url(self, value):
#
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | ) |
Predict the next line for this snippet: <|code_start|> rv = fx_flask_client.get(get_url('oauth.oauth_authorized',
vendor='twitter'))
assert rv.status_code == 302
rv = fx_flask_client.get(get_url('index'))
assert_contain_text('You denied the request to sign in.', 'ul.flash>li',
rv.data)
def test_twitter_authorize_new_id(fx_session, fx_flask_client,
fx_twitter_config, monkeypatch):
fake_res = dict(
user_id='1234567890',
screen_name='cliche.io',
oauth_token='ASDFGHJKL:"',
oauth_token_secret='QWERTYUIOP{}',
)
fake_vendors = [
Vendor('twitter', TwitterCredential, Version.oauth1,
FakeProvider(fake_res), ('screen_name', 'user_id'))
]
monkeypatch.setattr('cliche.web.social.oauth.vendors', fake_vendors)
rv = fx_flask_client.get(get_url('oauth.oauth_authorized',
vendor='twitter'))
assert rv.status_code == 302
created_user = fx_session.query(User).\
filter(
<|code_end|>
with the help of current file imports:
import datetime
from cliche.credentials import TwitterCredential
from cliche.user import User
from cliche.web.social.oauth import Vendor, Version
from ...web_utils import assert_contain_text, get_url
and context from other files:
# Path: cliche/credentials.py
# class TwitterCredential(Credential):
# """Information about Twitter User"""
#
# #: (:class:`int`) The primary key from :class:`Credential.id`.
# id = Column(Integer, ForeignKey('credential.id'), primary_key=True)
#
# #: (:class:`int`) Twitter user id
# identifier = Column(BigInteger, nullable=False, unique=True)
#
# #: (:class:`str`) The oauth token.
# token = Column(String)
#
# #: (:class:`str`) The oauth secret token.
# token_secret = Column(String)
#
# __tablename__ = 'twitter_credential'
# __mapper_args__ = {'polymorphic_identity': 'twitter'}
# __repr_columns__ = id, identifier
#
# Path: cliche/web/social/oauth.py
# class Version(enum.Enum):
# class OAuthVendorConverter(BaseConverter):
# def login(vendor):
# def oauth_authorized(vendor):
# def make_account(credential_table, name, user_id):
# def __init__(self, url_map):
# def to_python(self, value):
# def to_url(self, value):
#
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
, which may contain function names, class names, or code. Output only the next line. | User.credentials.any( |
Given the code snippet: <|code_start|>
rv = fx_flask_client.get(get_url('oauth.login', vendor='twitter'))
assert rv.status_code == 302
def test_twitter_login(fx_flask_client, fx_twitter_config):
rv = fx_flask_client.get(get_url('oauth.login', vendor='twitter'))
assert rv.status_code == 302
def test_undefined_vendor_authorize(fx_flask_client, fx_twitter_config):
rv = fx_flask_client.get(get_url('oauth.oauth_authorized', vendor='naver'))
assert rv.status_code == 404
def test_authorize_who_logged(fx_session, fx_flask_client, fx_twitter_config):
user = User(name='cliche.io')
with fx_session.begin():
fx_session.add(user)
with fx_flask_client.session_transaction() as sess:
sess['logged_id'] = user.id
sess['logged_time'] = datetime.datetime.utcnow()
rv = fx_flask_client.get(get_url('oauth.oauth_authorized',
vendor='twitter'))
assert rv.status_code == 302
def test_twitter_authorize_failed(fx_flask_client,
<|code_end|>
, generate the next line using the imports in this file:
import datetime
from cliche.credentials import TwitterCredential
from cliche.user import User
from cliche.web.social.oauth import Vendor, Version
from ...web_utils import assert_contain_text, get_url
and context (functions, classes, or occasionally code) from other files:
# Path: cliche/credentials.py
# class TwitterCredential(Credential):
# """Information about Twitter User"""
#
# #: (:class:`int`) The primary key from :class:`Credential.id`.
# id = Column(Integer, ForeignKey('credential.id'), primary_key=True)
#
# #: (:class:`int`) Twitter user id
# identifier = Column(BigInteger, nullable=False, unique=True)
#
# #: (:class:`str`) The oauth token.
# token = Column(String)
#
# #: (:class:`str`) The oauth secret token.
# token_secret = Column(String)
#
# __tablename__ = 'twitter_credential'
# __mapper_args__ = {'polymorphic_identity': 'twitter'}
# __repr_columns__ = id, identifier
#
# Path: cliche/web/social/oauth.py
# class Version(enum.Enum):
# class OAuthVendorConverter(BaseConverter):
# def login(vendor):
# def oauth_authorized(vendor):
# def make_account(credential_table, name, user_id):
# def __init__(self, url_map):
# def to_python(self, value):
# def to_url(self, value):
#
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | fx_twitter_config, monkeypatch): |
Given the code snippet: <|code_start|>
rv = fx_flask_client.get(get_url('oauth.oauth_authorized',
vendor='twitter'))
assert rv.status_code == 302
rv = fx_flask_client.get(get_url('index'))
assert_contain_text('You denied the request to sign in.', 'ul.flash>li',
rv.data)
def test_twitter_authorize_new_id(fx_session, fx_flask_client,
fx_twitter_config, monkeypatch):
fake_res = dict(
user_id='1234567890',
screen_name='cliche.io',
oauth_token='ASDFGHJKL:"',
oauth_token_secret='QWERTYUIOP{}',
)
fake_vendors = [
Vendor('twitter', TwitterCredential, Version.oauth1,
FakeProvider(fake_res), ('screen_name', 'user_id'))
]
monkeypatch.setattr('cliche.web.social.oauth.vendors', fake_vendors)
rv = fx_flask_client.get(get_url('oauth.oauth_authorized',
vendor='twitter'))
assert rv.status_code == 302
created_user = fx_session.query(User).\
<|code_end|>
, generate the next line using the imports in this file:
import datetime
from cliche.credentials import TwitterCredential
from cliche.user import User
from cliche.web.social.oauth import Vendor, Version
from ...web_utils import assert_contain_text, get_url
and context (functions, classes, or occasionally code) from other files:
# Path: cliche/credentials.py
# class TwitterCredential(Credential):
# """Information about Twitter User"""
#
# #: (:class:`int`) The primary key from :class:`Credential.id`.
# id = Column(Integer, ForeignKey('credential.id'), primary_key=True)
#
# #: (:class:`int`) Twitter user id
# identifier = Column(BigInteger, nullable=False, unique=True)
#
# #: (:class:`str`) The oauth token.
# token = Column(String)
#
# #: (:class:`str`) The oauth secret token.
# token_secret = Column(String)
#
# __tablename__ = 'twitter_credential'
# __mapper_args__ = {'polymorphic_identity': 'twitter'}
# __repr_columns__ = id, identifier
#
# Path: cliche/web/social/oauth.py
# class Version(enum.Enum):
# class OAuthVendorConverter(BaseConverter):
# def login(vendor):
# def oauth_authorized(vendor):
# def make_account(credential_table, name, user_id):
# def __init__(self, url_map):
# def to_python(self, value):
# def to_url(self, value):
#
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | filter( |
Given the following code snippet before the placeholder: <|code_start|>
def test_login_expire(fx_session, fx_flask_client):
user = User(name='cliche.io')
with fx_session.begin():
fx_session.add(user)
with fx_flask_client.session_transaction() as sess:
sess['logged_id'] = user.id
sess['logged_time'] = datetime.datetime.utcnow() - \
datetime.timedelta(hours=1)
rv = fx_flask_client.get(get_url('index'))
assert not rv.headers.get('X-Cliche-Login-User-Id', None)
def test_login_renewal(fx_session, fx_flask_client):
user = User(name='cliche.io')
with fx_session.begin():
fx_session.add(user)
with fx_flask_client.session_transaction() as sess:
sess['logged_id'] = user.id
sess['logged_time'] = datetime.datetime.utcnow() - \
datetime.timedelta(minutes=59)
rv = fx_flask_client.get(get_url('index'))
assert rv.headers['X-Cliche-Login-User-Id'] == str(user.id)
<|code_end|>
, predict the next line using imports from the current file:
import datetime
import urllib.parse
from cliche.user import User
from .web_utils import assert_contain_text, get_url
and context including class names, function names, and sometimes code from other files:
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | def test_raven_js_installed(fx_flask_client, fx_sentry_config): |
Next line prediction: <|code_start|>
def test_index(fx_flask_client):
rv = fx_flask_client.get(get_url('index'))
assert_contain_text('Cliche.io', 'h1', rv.data)
def test_login_expire(fx_session, fx_flask_client):
user = User(name='cliche.io')
with fx_session.begin():
fx_session.add(user)
with fx_flask_client.session_transaction() as sess:
sess['logged_id'] = user.id
sess['logged_time'] = datetime.datetime.utcnow() - \
<|code_end|>
. Use current file imports:
(import datetime
import urllib.parse
from cliche.user import User
from .web_utils import assert_contain_text, get_url)
and context including class names, function names, or small code snippets from other files:
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | datetime.timedelta(hours=1) |
Given the code snippet: <|code_start|>
def test_logout_logged_user(fx_session, fx_flask_client):
user = User(name='cliche.io')
with fx_session.begin():
fx_session.add(user)
with fx_flask_client.session_transaction() as sess:
sess['logged_id'] = user.id
sess['logged_time'] = datetime.datetime.utcnow()
rv = fx_flask_client.get(get_url('user.logout'))
assert rv.status_code == 302
rv = fx_flask_client.get(get_url('index'))
assert_contain_text('You were logged out.', 'ul.flash>li', rv.data)
def test_logout_non_logged_user(fx_flask_client):
rv = fx_flask_client.get(get_url('user.logout'))
<|code_end|>
, generate the next line using the imports in this file:
import datetime
from cliche.user import User
from ..web_utils import assert_contain_text, get_url
and context (functions, classes, or occasionally code) from other files:
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | assert rv.status_code == 302 |
Using the snippet: <|code_start|>
def test_logout_logged_user(fx_session, fx_flask_client):
user = User(name='cliche.io')
with fx_session.begin():
fx_session.add(user)
with fx_flask_client.session_transaction() as sess:
sess['logged_id'] = user.id
sess['logged_time'] = datetime.datetime.utcnow()
rv = fx_flask_client.get(get_url('user.logout'))
<|code_end|>
, determine the next line of code. You have imports:
import datetime
from cliche.user import User
from ..web_utils import assert_contain_text, get_url
and context (class names, function names, or code) available:
# Path: tests/web_utils.py
# def assert_contain_text(text, expr, data):
# def traverse(elements):
# for element in elements:
# if text in element.text_content():
# return True
# else:
# return False
#
# tree = document_fromstring(str(data)).cssselect(expr)
# assert tree
# assert traverse(tree)
#
# def get_url(endpoint, **kwargs):
# url = None
# with app.test_request_context():
# url = url_for(endpoint, **kwargs)
# return url
. Output only the next line. | assert rv.status_code == 302 |
Continue the code snippet: <|code_start|>
def test_fetch_link(monkeypatch, fx_session, fx_celery_app):
url = 'http://tvtropes.org/pmwiki/pmwiki.php/Main/GodJob'
text = '<div class="pagetitle"><div class="article_title"><h1>' \
'<span>God Job</span></h1></div></div>'
def mockreturn(path):
req = requests.Request()
req.url = url
req.text = text
return req
monkeypatch.setattr(requests, "get", mockreturn)
result = fetch_link(url, fx_session)
<|code_end|>
. Use current file imports:
import requests
from cliche.services.tvtropes.crawler import fetch_link
and context (classes, functions, or code) from other files:
# Path: cliche/services/tvtropes/crawler.py
# def fetch_link(url, session, *, log_prefix=''):
# '''Returns result, tree, namespace, name, final_url.'''
# logger = get_task_logger(__name__ + '.fetch_link')
# if not is_wiki_page(url):
# return False, None, None, None, url
# r = requests.get(url)
# try:
# final_url = r.url[:r.url.index('?')]
# except ValueError:
# final_url = r.url
# if not is_wiki_page(final_url):
# return False, None, None, None, final_url
# tree = document_fromstring(r.text)
# try:
# name = tree.find_class('article_title')[0].text_content()
# except (AttributeError, AssertionError, IndexError):
# logger.warning('%sWarning on url %s: '
# 'There is no pagetitle on this page. Ignoring.',
# log_prefix, url)
# return False, tree, None, None, final_url
# else:
# *namespace, name = name.split(':')
# name = name.strip()
# namespace = 'Main' if not namespace else namespace[0]
# type = determine_type(namespace)
# if type == 'Administrivia':
# return False, tree, namespace, name, final_url
# upsert_entity(session, namespace, name, type, final_url)
# process_redirections(session, url, final_url, namespace, name)
# return True, tree, namespace, name, final_url
. Output only the next line. | assert result[-3:] == ('Main', 'God Job', url) |
Based on the snippet: <|code_start|>
__all__ = ('DEFAULT_DATABASE_URL', 'get_database_url', 'get_engine',
'get_session')
DEFAULT_DATABASE_URL = 'sqlite://'
def get_database_url(url=None):
return url or os.environ.get(
'CLICHE_TEST_DATABASE_URL',
DEFAULT_DATABASE_URL
)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import contextlib
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool, StaticPool
from cliche.celery import app as celery_app
from cliche.orm import Base, Session
from cliche.web.app import app as flask_app
and context (classes, functions, sometimes code) from other files:
# Path: cliche/celery.py
# class Loader(BaseLoader):
# def read_configuration(self):
# def get_database_engine() -> Engine:
# def get_session() -> Session:
# def close_session(task_id, task, *args, **kwargs):
# def get_raven_client() -> Client:
# def setup_raven_logging(conf=None, **kwargs):
# def report_task_failure(task_id, exception, args, kwargs,
# traceback, einfo, sender):
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
#
# Path: cliche/web/app.py
# def setup_sentry():
# def get_sentry() -> Sentry:
# def index():
# def check_login_status():
# def template_processor():
# def add_login_header(response):
. Output only the next line. | def get_engine(url=None, echo=False): |
Continue the code snippet: <|code_start|>
__all__ = ('DEFAULT_DATABASE_URL', 'get_database_url', 'get_engine',
'get_session')
DEFAULT_DATABASE_URL = 'sqlite://'
<|code_end|>
. Use current file imports:
import os
import contextlib
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool, StaticPool
from cliche.celery import app as celery_app
from cliche.orm import Base, Session
from cliche.web.app import app as flask_app
and context (classes, functions, or code) from other files:
# Path: cliche/celery.py
# class Loader(BaseLoader):
# def read_configuration(self):
# def get_database_engine() -> Engine:
# def get_session() -> Session:
# def close_session(task_id, task, *args, **kwargs):
# def get_raven_client() -> Client:
# def setup_raven_logging(conf=None, **kwargs):
# def report_task_failure(task_id, exception, args, kwargs,
# traceback, einfo, sender):
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
#
# Path: cliche/web/app.py
# def setup_sentry():
# def get_sentry() -> Sentry:
# def index():
# def check_login_status():
# def template_processor():
# def add_login_header(response):
. Output only the next line. | def get_database_url(url=None): |
Given the code snippet: <|code_start|>
__all__ = ('DEFAULT_DATABASE_URL', 'get_database_url', 'get_engine',
'get_session')
DEFAULT_DATABASE_URL = 'sqlite://'
def get_database_url(url=None):
return url or os.environ.get(
'CLICHE_TEST_DATABASE_URL',
<|code_end|>
, generate the next line using the imports in this file:
import os
import contextlib
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool, StaticPool
from cliche.celery import app as celery_app
from cliche.orm import Base, Session
from cliche.web.app import app as flask_app
and context (functions, classes, or occasionally code) from other files:
# Path: cliche/celery.py
# class Loader(BaseLoader):
# def read_configuration(self):
# def get_database_engine() -> Engine:
# def get_session() -> Session:
# def close_session(task_id, task, *args, **kwargs):
# def get_raven_client() -> Client:
# def setup_raven_logging(conf=None, **kwargs):
# def report_task_failure(task_id, exception, args, kwargs,
# traceback, einfo, sender):
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
#
# Path: cliche/web/app.py
# def setup_sentry():
# def get_sentry() -> Sentry:
# def index():
# def check_login_status():
# def template_processor():
# def add_login_header(response):
. Output only the next line. | DEFAULT_DATABASE_URL |
Based on the snippet: <|code_start|> DEFAULT_DATABASE_URL
)
def get_engine(url=None, echo=False):
url = get_database_url(url)
flask_app.config['DATABASE_URL'] = url
connect_args = {}
options = {'connect_args': connect_args, 'poolclass': NullPool}
if url == DEFAULT_DATABASE_URL:
# We have to use SQLite :memory: database across multiple threads
# for testing.
# http://bit.ly/1dF3SL3#using-a-memory-database-in-multiple-threads
connect_args['check_same_thread'] = False
options['poolclass'] = StaticPool
engine = create_engine(url, echo=echo, **options)
flask_app.config['DATABASE_ENGINE'] = engine
celery_app.conf['DATABASE_ENGINE'] = engine
return engine
@contextlib.contextmanager
def get_session(database_url=None, echo_sql=False):
engine = get_engine(database_url, echo=echo_sql)
try:
metadata = Base.metadata
metadata.drop_all(bind=engine)
metadata.create_all(bind=engine)
session = Session(bind=engine)
yield session
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import contextlib
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool, StaticPool
from cliche.celery import app as celery_app
from cliche.orm import Base, Session
from cliche.web.app import app as flask_app
and context (classes, functions, sometimes code) from other files:
# Path: cliche/celery.py
# class Loader(BaseLoader):
# def read_configuration(self):
# def get_database_engine() -> Engine:
# def get_session() -> Session:
# def close_session(task_id, task, *args, **kwargs):
# def get_raven_client() -> Client:
# def setup_raven_logging(conf=None, **kwargs):
# def report_task_failure(task_id, exception, args, kwargs,
# traceback, einfo, sender):
#
# Path: cliche/orm.py
# def make_repr(self):
# def get_alembic_config(engine):
# def initialize_database(engine):
# def get_database_revision(engine):
# def get_revision(rev, context):
# def upgrade_database(engine, revision='head'):
# def upgrade(rev, context):
# def update_current_rev(old, new):
# def downgrade_database(engine, revision):
# def import_all_modules(dry_run=False): # FIXME
#
# Path: cliche/web/app.py
# def setup_sentry():
# def get_sentry() -> Sentry:
# def index():
# def check_login_status():
# def template_processor():
# def add_login_header(response):
. Output only the next line. | session.rollback() |
Given the code snippet: <|code_start|>
def test_config_dict():
c = ConfigDict(A=1)
assert c['A'] == 1
with raises(ConfigKeyError):
c['B']
def test_config_key_error():
e = ConfigKeyError('key')
assert isinstance(e, KeyError)
assert str(e) == "missing configuration: 'key'"
config_yaml = '''
<|code_end|>
, generate the next line using the imports in this file:
from pytest import mark, raises
from cliche.config import (ConfigDict, ConfigKeyError,
read_config, read_config_from_python,
read_config_from_yaml)
and context (functions, classes, or occasionally code) from other files:
# Path: cliche/config.py
# class ConfigDict(dict):
# """Almost the same to the built-in :class:`dict` except it raises
# :exc:`ConfigKeyError` instead of :exc:`KeyError` with finer error
# message.
#
# """
#
# def __getitem__(self, key):
# try:
# return super().__getitem__(key)
# except KeyError:
# raise ConfigKeyError(key)
#
# class ConfigKeyError(KeyError):
# """The exception raised when there's no such configured key, that
# is a subtype of built-in :exc:`KeyError`.
#
# """
#
# def __str__(self):
# return 'missing configuration: ' + super().__str__()
#
# def read_config(filename):
# """Read Cliche app configuration from the given filename. ::
#
# config = read_config(filename='dev.cfg.yml')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param filename: read config from a *filename* of yaml or
# python source code
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`ConfigDict`
#
# """
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# if filename.suffix in ('.yml', '.yaml'):
# return read_config_from_yaml(filename=filename)
# if filename.suffix != '.py':
# warnings.warn(
# 'the suffix of {0} represents niether Python (.py) nor '
# 'YAML (.yml/.yaml); treat it as Python',
# RuntimeWarning, stacklevel=2
# )
# return read_config_from_python(filename=filename)
#
# def read_config_from_python(*, string=None, file=None, filename=None):
# """Read Cliche app configuration from Python code i.e. Flask-style
# configuration::
#
# config = read_config_from_python(filename='dev.cfg.py')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param string: read config from a python source code string
# :type string: :class:`str`
# :param file: read config from a *file object* of python source code
# :param filename: read config from a *filename* of python source code
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`collections.abc.Mapping`
#
# """
# args_number = sum(a is not None for a in {string, file, filename})
# if args_number > 1:
# raise TypeError('it takes a keyword at a time; keywords are '
# 'exclusive to each other')
# elif not args_number:
# raise TypeError('missing keyword')
# elif string is not None:
# if not isinstance(string, str):
# raise TypeError('expected a string, not ' + repr(string))
# filename = '<string>'
# elif file is not None:
# filename = getattr(file, 'name', '<file>')
# string = file.read()
# else:
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# try:
# with filename.open() as f:
# string = f.read()
# except IOError as e:
# if e.errno in (errno.ENOENT, errno.EISDIR):
# e.strerror = 'unable to load configuration file ({})'.format(
# e.strerror
# )
# raise
# config = {}
# exec(compile(string, str(filename), 'exec'), config)
# return ConfigDict((k, v) for k, v in config.items() if k.isupper())
#
# def read_config_from_yaml(*, string=None, file=None, filename=None):
# """Read Cliche app configuration from YAML. ::
#
# config = read_config_from_yaml(filename='dev.cfg.yml')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param string: read config from a yaml string
# :type string: :class:`str`
# :param file: read config from a *file object* of yaml
# :param filename: read config from a *filename* of yaml
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`ConfigDict`
#
# """
# args_number = sum(a is not None for a in {string, file, filename})
# if args_number > 1:
# raise TypeError('it takes a keyword at a time; keywords are '
# 'exclusive to each other')
# elif not args_number:
# raise TypeError('missing keyword')
# elif string is not None:
# if not isinstance(string, str):
# raise TypeError('expected a string, not ' + repr(string))
# dictionary = load(string)
# elif file is not None:
# if not callable(getattr(file, 'read', None)):
# raise TypeError('expected a file-like object, not ' + repr(file))
# dictionary = load(file)
# else:
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# with filename.open() as f:
# dictionary = load(f)
# return ConfigDict((k.upper(), v) for k, v in dictionary.items())
. Output only the next line. | debug: true |
Using the snippet: <|code_start|>
def test_config_dict():
c = ConfigDict(A=1)
assert c['A'] == 1
with raises(ConfigKeyError):
c['B']
def test_config_key_error():
e = ConfigKeyError('key')
<|code_end|>
, determine the next line of code. You have imports:
from pytest import mark, raises
from cliche.config import (ConfigDict, ConfigKeyError,
read_config, read_config_from_python,
read_config_from_yaml)
and context (class names, function names, or code) available:
# Path: cliche/config.py
# class ConfigDict(dict):
# """Almost the same to the built-in :class:`dict` except it raises
# :exc:`ConfigKeyError` instead of :exc:`KeyError` with finer error
# message.
#
# """
#
# def __getitem__(self, key):
# try:
# return super().__getitem__(key)
# except KeyError:
# raise ConfigKeyError(key)
#
# class ConfigKeyError(KeyError):
# """The exception raised when there's no such configured key, that
# is a subtype of built-in :exc:`KeyError`.
#
# """
#
# def __str__(self):
# return 'missing configuration: ' + super().__str__()
#
# def read_config(filename):
# """Read Cliche app configuration from the given filename. ::
#
# config = read_config(filename='dev.cfg.yml')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param filename: read config from a *filename* of yaml or
# python source code
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`ConfigDict`
#
# """
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# if filename.suffix in ('.yml', '.yaml'):
# return read_config_from_yaml(filename=filename)
# if filename.suffix != '.py':
# warnings.warn(
# 'the suffix of {0} represents niether Python (.py) nor '
# 'YAML (.yml/.yaml); treat it as Python',
# RuntimeWarning, stacklevel=2
# )
# return read_config_from_python(filename=filename)
#
# def read_config_from_python(*, string=None, file=None, filename=None):
# """Read Cliche app configuration from Python code i.e. Flask-style
# configuration::
#
# config = read_config_from_python(filename='dev.cfg.py')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param string: read config from a python source code string
# :type string: :class:`str`
# :param file: read config from a *file object* of python source code
# :param filename: read config from a *filename* of python source code
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`collections.abc.Mapping`
#
# """
# args_number = sum(a is not None for a in {string, file, filename})
# if args_number > 1:
# raise TypeError('it takes a keyword at a time; keywords are '
# 'exclusive to each other')
# elif not args_number:
# raise TypeError('missing keyword')
# elif string is not None:
# if not isinstance(string, str):
# raise TypeError('expected a string, not ' + repr(string))
# filename = '<string>'
# elif file is not None:
# filename = getattr(file, 'name', '<file>')
# string = file.read()
# else:
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# try:
# with filename.open() as f:
# string = f.read()
# except IOError as e:
# if e.errno in (errno.ENOENT, errno.EISDIR):
# e.strerror = 'unable to load configuration file ({})'.format(
# e.strerror
# )
# raise
# config = {}
# exec(compile(string, str(filename), 'exec'), config)
# return ConfigDict((k, v) for k, v in config.items() if k.isupper())
#
# def read_config_from_yaml(*, string=None, file=None, filename=None):
# """Read Cliche app configuration from YAML. ::
#
# config = read_config_from_yaml(filename='dev.cfg.yml')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param string: read config from a yaml string
# :type string: :class:`str`
# :param file: read config from a *file object* of yaml
# :param filename: read config from a *filename* of yaml
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`ConfigDict`
#
# """
# args_number = sum(a is not None for a in {string, file, filename})
# if args_number > 1:
# raise TypeError('it takes a keyword at a time; keywords are '
# 'exclusive to each other')
# elif not args_number:
# raise TypeError('missing keyword')
# elif string is not None:
# if not isinstance(string, str):
# raise TypeError('expected a string, not ' + repr(string))
# dictionary = load(string)
# elif file is not None:
# if not callable(getattr(file, 'read', None)):
# raise TypeError('expected a file-like object, not ' + repr(file))
# dictionary = load(file)
# else:
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# with filename.open() as f:
# dictionary = load(f)
# return ConfigDict((k.upper(), v) for k, v in dictionary.items())
. Output only the next line. | assert isinstance(e, KeyError) |
Given snippet: <|code_start|>
def test_config_dict():
c = ConfigDict(A=1)
assert c['A'] == 1
with raises(ConfigKeyError):
c['B']
def test_config_key_error():
e = ConfigKeyError('key')
assert isinstance(e, KeyError)
assert str(e) == "missing configuration: 'key'"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from pytest import mark, raises
from cliche.config import (ConfigDict, ConfigKeyError,
read_config, read_config_from_python,
read_config_from_yaml)
and context:
# Path: cliche/config.py
# class ConfigDict(dict):
# """Almost the same to the built-in :class:`dict` except it raises
# :exc:`ConfigKeyError` instead of :exc:`KeyError` with finer error
# message.
#
# """
#
# def __getitem__(self, key):
# try:
# return super().__getitem__(key)
# except KeyError:
# raise ConfigKeyError(key)
#
# class ConfigKeyError(KeyError):
# """The exception raised when there's no such configured key, that
# is a subtype of built-in :exc:`KeyError`.
#
# """
#
# def __str__(self):
# return 'missing configuration: ' + super().__str__()
#
# def read_config(filename):
# """Read Cliche app configuration from the given filename. ::
#
# config = read_config(filename='dev.cfg.yml')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param filename: read config from a *filename* of yaml or
# python source code
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`ConfigDict`
#
# """
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# if filename.suffix in ('.yml', '.yaml'):
# return read_config_from_yaml(filename=filename)
# if filename.suffix != '.py':
# warnings.warn(
# 'the suffix of {0} represents niether Python (.py) nor '
# 'YAML (.yml/.yaml); treat it as Python',
# RuntimeWarning, stacklevel=2
# )
# return read_config_from_python(filename=filename)
#
# def read_config_from_python(*, string=None, file=None, filename=None):
# """Read Cliche app configuration from Python code i.e. Flask-style
# configuration::
#
# config = read_config_from_python(filename='dev.cfg.py')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param string: read config from a python source code string
# :type string: :class:`str`
# :param file: read config from a *file object* of python source code
# :param filename: read config from a *filename* of python source code
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`collections.abc.Mapping`
#
# """
# args_number = sum(a is not None for a in {string, file, filename})
# if args_number > 1:
# raise TypeError('it takes a keyword at a time; keywords are '
# 'exclusive to each other')
# elif not args_number:
# raise TypeError('missing keyword')
# elif string is not None:
# if not isinstance(string, str):
# raise TypeError('expected a string, not ' + repr(string))
# filename = '<string>'
# elif file is not None:
# filename = getattr(file, 'name', '<file>')
# string = file.read()
# else:
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# try:
# with filename.open() as f:
# string = f.read()
# except IOError as e:
# if e.errno in (errno.ENOENT, errno.EISDIR):
# e.strerror = 'unable to load configuration file ({})'.format(
# e.strerror
# )
# raise
# config = {}
# exec(compile(string, str(filename), 'exec'), config)
# return ConfigDict((k, v) for k, v in config.items() if k.isupper())
#
# def read_config_from_yaml(*, string=None, file=None, filename=None):
# """Read Cliche app configuration from YAML. ::
#
# config = read_config_from_yaml(filename='dev.cfg.yml')
#
# Note that it takes only one keyword argument at a time. All parameters
# are mutually exclusive for each other.
#
# :param string: read config from a yaml string
# :type string: :class:`str`
# :param file: read config from a *file object* of yaml
# :param filename: read config from a *filename* of yaml
# :type filename: :class:`pathlib.Path`
# :returns: the parsed dictionary with uppercase keys
# :rtype: :class:`ConfigDict`
#
# """
# args_number = sum(a is not None for a in {string, file, filename})
# if args_number > 1:
# raise TypeError('it takes a keyword at a time; keywords are '
# 'exclusive to each other')
# elif not args_number:
# raise TypeError('missing keyword')
# elif string is not None:
# if not isinstance(string, str):
# raise TypeError('expected a string, not ' + repr(string))
# dictionary = load(string)
# elif file is not None:
# if not callable(getattr(file, 'read', None)):
# raise TypeError('expected a file-like object, not ' + repr(file))
# dictionary = load(file)
# else:
# if not isinstance(filename, pathlib.Path):
# raise TypeError(
# 'expected an instance of {0.__module__}.{0.__qualname__}'
# ', not {1!r}'.format(pathlib.Path, filename)
# )
# with filename.open() as f:
# dictionary = load(f)
# return ConfigDict((k.upper(), v) for k, v in dictionary.items())
which might include code, classes, or functions. Output only the next line. | config_yaml = ''' |
Given snippet: <|code_start|>
urlpatterns = [
url(r'^member/(?P<member_id>[0-9]+)/membership/new/$', NewMembershipView.as_view(), name='new_membership'),
url(r'^$', DashboardView.as_view(), name='home')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.conf.urls import url
from .views import DashboardView, NewMembershipView
and context:
# Path: core/views.py
# class DashboardView(View):
# def get(self, request):
# return TemplateResponse(request, 'dashboard.html', context={'DEBUG': settings.DEBUG})
#
# class NewMembershipView(TemplateView):
# template_name = 'membership_form.html'
#
# def get(self, request, member_id, **kwargs):
# membership_form = MembershipForm(initial=dict(member=member_id))
# payment_form = PaymentForm()
# return self.render_to_response(dict(membership_form=membership_form, payment_form=payment_form))
#
# def post(self, request, member_id):
# membership_form = MembershipForm(request.POST, initial=dict(member=member_id))
# payment_form = PaymentForm(request.POST)
# member = Member.objects.get(id=member_id)
#
# if membership_form.is_valid() and payment_form.is_valid():
# new_payment = payment_form.save()
# new_membership = membership_form.save()
# new_membership.payment = new_payment
# new_membership.save()
# messages.add_message(request, messages.SUCCESS, 'Successfully created our newest member, {first} {last}'
# .format(first=member.first_name, last=member.last_name))
# return HttpResponseRedirect(reverse('member_edit', kwargs=dict(member_id=member_id)))
# return self.render_to_response(dict(membership_form=membership_form, payment_form=payment_form))
which might include code, classes, or functions. Output only the next line. | ] |
Here is a snippet: <|code_start|>
apiRoutes = (
(r'members', MemberViewSet),
)
urlpatterns = [
url(r'^new/$', MemberFormView.as_view(), name='member_new'),
url(r'^search/(?P<query>[\w|\W]+)/$', MemberSearchView.as_view(), name='member_search'),
url(r'^edit/(?P<member_id>[0-9]+)/$', MemberFormView.as_view(), name='member_edit'),
url(r'^signin/$', MemberSignIn.as_view(), name='member_signin'),
url(r'^$', Members.as_view(), name='members'),
<|code_end|>
. Write the next line using the current file imports:
from django.conf.urls import url
from .views import MemberFormView, MemberSearchView, MemberSignIn, Members, MemberViewSet
and context from other files:
# Path: registration/views.py
# class MemberFormView(View):
# def get(self, request, member_id=None):
# try:
# member = Member.objects.get(id=member_id)
# form = MemberForm(instance=member)
# except Member.DoesNotExist:
# form = MemberForm()
# member = None
#
# context = dict(form=form)
# if member:
# context['member'] = member
# return TemplateResponse(request, 'edit_member_form.html', context=context)
#
# return TemplateResponse(request, 'member_form.html', context=context)
#
# def post(self, request, member_id=None):
# try:
# member = Member.objects.get(id=member_id)
# form = MemberForm(request.POST, instance=member)
# except Member.DoesNotExist:
# member = None
# form = MemberForm(request.POST)
#
# if form.is_valid():
# member_instance = form.save()
# return HttpResponseRedirect(reverse('member_edit', kwargs=dict(member_id=member_instance.id)))
#
# context = {'form': form}
# if member:
# context['member'] = member
# return TemplateResponse(request, 'member_form.html', context=context)
#
# class MemberSearchView(View):
# def get(self, request, query):
# sqs = SearchQuerySet().models(Member).autocomplete(text=query)[:5]
# results = [dict(name=result.object.get_full_name(), email=result.object.email, id=result.object.id)
# for result in sqs]
#
# data = json.dumps(dict(results=results))
#
# return HttpResponse(data, content_type='application/json')
#
# class MemberSignIn(View):
# @method_decorator(csrf_exempt)
# def dispatch(self, request, *args, **kwargs):
# return super(MemberSignIn, self).dispatch(request, *args, **kwargs)
#
# def post(self, request):
# member = get_object_or_404(Member, id=request.POST.get('id'))
# try:
# visit = signin_member(member, request.POST.get('purpose'))
# membership = Membership.objects.select_related('payment').filter(member=member).last()
# except ObjectDoesNotExist:
# membership = None
# except ValidationError:
# return JsonResponse(data=dict(), status=status.HTTP_400_BAD_REQUEST)
#
# membership_dict = dict(renewed_at=membership.renewed_at, payment=membership.payment.type,
# expires_at=membership.expires_at) if membership else None
# data = dict(results=dict(id=member.id, first_name=member.first_name, last_name=member.last_name,
# suspended=member.suspended, banned=member.banned,
# created_at=visit.created_at.isoformat(), notes=member.notes,
# membership=membership_dict))
#
# return JsonResponse(data=data, safe=False, status=201)
#
# def get(self, request):
# visits = get_signed_in_members().prefetch_related()
# serializer = VisitSerializer(visits, many=True)
#
# return JsonResponse(data=serializer.data, safe=False, status=200)
#
# class Members(TemplateView):
# template_name = 'members.html'
#
# def get(self, request):
# members = Member.objects.all()
# return self.render_to_response(dict(members=members))
#
# class MemberViewSet(viewsets.ModelViewSet):
# queryset = Member.objects.all()
# serializer_class = MemberSerializer
, which may include functions, classes, or code. Output only the next line. | ] |
Using the snippet: <|code_start|>
@receiver(post_save, sender=Member, dispatch_uid='member.save_member')
def update_mailchimp(sender, instance, **kwargs):
if instance.email and settings.MAILCHIMP_API_KEY:
involvement = {id: True for id in instance.involvement}
client = MailChimp(settings.MAILCHIMP_USERNAME, settings.MAILCHIMP_API_KEY)
try:
response = client.lists.members.create_or_update('1c664549e2',
hashlib.md5(bytes(instance.email, 'utf-8')).hexdigest(), {
'email_address': instance.email,
<|code_end|>
, determine the next line of code. You have imports:
import hashlib
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from mailchimp3 import MailChimp
from requests import HTTPError
from registration.models import Member
and context (class names, function names, or code) available:
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | 'status': 'subscribed' if instance.email_consent else 'unsubscribed', |
Given the following code snippet before the placeholder: <|code_start|>
logger = logging.getLogger('bikeshop')
class TestMemberFormView(TestCase):
def setUp(self):
self.user = mommy.make(CustomUser)
self.member = mommy.make(Member)
def test_get_member_new(self):
url = reverse('member_new')
c = Client()
c.force_login(self.user)
response = c.get(url)
self.assertEqual(response.status_code, 200)
def test_post_member_new(self):
url = reverse('member_new')
c = Client()
c.force_login(self.user)
member_data = {
'first_name': 'First',
'last_name': 'Last',
'post_code': 'H0H0H0',
<|code_end|>
, predict the next line using imports from the current file:
import json
import logging
from datetime import datetime, timedelta
from django.urls import reverse
from django.http import JsonResponse
from django.test import Client, TestCase
from rest_framework.exceptions import ValidationError
from core.models import Visit
from model_mommy import mommy
from django.utils import timezone
from ..models import CustomUser, Member
and context including class names, function names, and sometimes code from other files:
# Path: core/models.py
# class Visit(models.Model):
# VOLUNTEER = 'VOLUNTEER'
# FIX = 'FIX'
# BUILD = 'BUILD'
# WORKSHOP = 'WORKSHOP'
# VISIT = 'VISIT'
# DONATE = 'DONATE'
# STAFF = 'STAFF'
# PARTS = 'PARTS'
# BUY_BIKE = 'BUY_BIKE'
# TOUR = 'TOUR'
#
# visit_choices = (
# (VOLUNTEER, 'volunteer'),
# (FIX, 'fix bike'),
# (BUILD, 'build bike'),
# (WORKSHOP, 'workshop'),
# (VISIT, 'visit'),
# (DONATE, 'donate'),
# (STAFF, 'staff'),
# (PARTS, 'parts'),
# (BUY_BIKE, 'buy bike'),
# (TOUR, 'tour / visit')
# )
#
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE
# )
# created_at = models.DateTimeField(default=timezone.now)
# purpose = models.CharField(max_length=50, choices=visit_choices)
#
# def __str__(self):
# return '<Visit purpose: {purpose} created_at: {created_at}>'.format(purpose=self.purpose,
# created_at=self.created_at.isoformat())
#
# Path: registration/models.py
# class CustomUser(AbstractBaseUser, PermissionsMixin):
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=True,
# )
# is_admin = models.BooleanField(default=False)
# is_active = models.BooleanField(default=True)
#
# objects = CustomUserManager()
#
# USERNAME_FIELD = 'email'
#
# @property
# def is_staff(self):
# # Simplest possible answer: All admins are staff
# return self.is_admin
#
# def get_short_name(self):
# return self.email
#
# def get_full_name(self):
# return self.email
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# class Meta:
# verbose_name = 'User'
# verbose_name_plural = 'Users'
#
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | } |
Next line prediction: <|code_start|>
class MembershipForm(ModelForm):
member = CharField(required=True, widget=HiddenInput())
self_ident_other = CharField(required=False, label='Self identification',
widget=TextInput(attrs={'class': 'mdl-textfield__input'}))
gender_other = CharField(required=False, label='Other', widget=TextInput(attrs={'class': 'mdl-textfield__input'}))
safe_space = BooleanField(required=True, widget=CheckboxInput(
attrs={'class': 'mdl-checkbox__input'}
))
respect_community = BooleanField(required=True, widget=CheckboxInput(
attrs={'class': 'mdl-checkbox__input'}
))
give_back = BooleanField(required=True, widget=CheckboxInput(
attrs={'class': 'mdl-checkbox__input'}
))
respect_shop = BooleanField(required=True, widget=CheckboxInput(
attrs={'class': 'mdl-checkbox__input'}
))
class Meta:
model = Membership
fields = ['renewed_at', 'self_identification', 'gender']
self_ident_choices = (
('First Nations; Métis; or Inuit', 'First Nations; Métis; or Inuit'),
('visible minority', 'Visible Minority'),
('caucasian', 'Caucasian'),
('other', 'Other')
)
<|code_end|>
. Use current file imports:
(import logging
from django.forms import BooleanField, CharField, CheckboxInput, RadioSelect, ModelForm, TextInput, HiddenInput
from registration.models import Member
from .models import Membership, Payment)
and context including class names, function names, or small code snippets from other files:
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
. Output only the next line. | gender_choices = ( |
Continue the code snippet: <|code_start|> )
gender_choices = (
('male', 'Male'),
('female', 'Female'),
('other', 'Other')
)
widgets = {
'self_identification': RadioSelect(choices=self_ident_choices, attrs={'class': 'mdl-radio__button'}),
'gender': RadioSelect(choices=gender_choices, attrs={'class': 'mdl-radio__button'}),
'renewed_at': TextInput(attrs={'class': 'mdl-textfield__input'}),
}
def save(self, commit=True):
instance = super(MembershipForm, self).save(commit=False)
member = Member.objects.get(id=self.cleaned_data['member'])
instance.member = member
logger.debug(self.cleaned_data['self_identification'])
logger.debug(self.cleaned_data['gender'])
if self.cleaned_data['gender_other']:
instance.gender = self.cleaned_data['gender_other']
if self.cleaned_data['self_ident_other']:
instance.self_identification = self.cleaned_data['self_ident_other']
if commit:
instance.save()
<|code_end|>
. Use current file imports:
import logging
from django.forms import BooleanField, CharField, CheckboxInput, RadioSelect, ModelForm, TextInput, HiddenInput
from registration.models import Member
from .models import Membership, Payment
and context (classes, functions, or code) from other files:
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
. Output only the next line. | return instance |
Next line prediction: <|code_start|> attrs={'class': 'mdl-checkbox__input'}
))
give_back = BooleanField(required=True, widget=CheckboxInput(
attrs={'class': 'mdl-checkbox__input'}
))
respect_shop = BooleanField(required=True, widget=CheckboxInput(
attrs={'class': 'mdl-checkbox__input'}
))
class Meta:
model = Membership
fields = ['renewed_at', 'self_identification', 'gender']
self_ident_choices = (
('First Nations; Métis; or Inuit', 'First Nations; Métis; or Inuit'),
('visible minority', 'Visible Minority'),
('caucasian', 'Caucasian'),
('other', 'Other')
)
gender_choices = (
('male', 'Male'),
('female', 'Female'),
('other', 'Other')
)
widgets = {
'self_identification': RadioSelect(choices=self_ident_choices, attrs={'class': 'mdl-radio__button'}),
'gender': RadioSelect(choices=gender_choices, attrs={'class': 'mdl-radio__button'}),
'renewed_at': TextInput(attrs={'class': 'mdl-textfield__input'}),
<|code_end|>
. Use current file imports:
(import logging
from django.forms import BooleanField, CharField, CheckboxInput, RadioSelect, ModelForm, TextInput, HiddenInput
from registration.models import Member
from .models import Membership, Payment)
and context including class names, function names, or small code snippets from other files:
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
. Output only the next line. | } |
Given the code snippet: <|code_start|>
logger = logging.getLogger(__name__)
@method_decorator(login_required, name='dispatch')
class DashboardView(View):
def get(self, request):
<|code_end|>
, generate the next line using the imports in this file:
import logging
from django.conf import settings
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.views.generic import TemplateView, View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from registration.models import Member
from .forms import MembershipForm, PaymentForm
and context (functions, classes, or occasionally code) from other files:
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: core/forms.py
# class MembershipForm(ModelForm):
# member = CharField(required=True, widget=HiddenInput())
# self_ident_other = CharField(required=False, label='Self identification',
# widget=TextInput(attrs={'class': 'mdl-textfield__input'}))
# gender_other = CharField(required=False, label='Other', widget=TextInput(attrs={'class': 'mdl-textfield__input'}))
# safe_space = BooleanField(required=True, widget=CheckboxInput(
# attrs={'class': 'mdl-checkbox__input'}
# ))
# respect_community = BooleanField(required=True, widget=CheckboxInput(
# attrs={'class': 'mdl-checkbox__input'}
# ))
# give_back = BooleanField(required=True, widget=CheckboxInput(
# attrs={'class': 'mdl-checkbox__input'}
# ))
# respect_shop = BooleanField(required=True, widget=CheckboxInput(
# attrs={'class': 'mdl-checkbox__input'}
# ))
#
# class Meta:
# model = Membership
# fields = ['renewed_at', 'self_identification', 'gender']
#
# self_ident_choices = (
# ('First Nations; Métis; or Inuit', 'First Nations; Métis; or Inuit'),
# ('visible minority', 'Visible Minority'),
# ('caucasian', 'Caucasian'),
# ('other', 'Other')
# )
#
# gender_choices = (
# ('male', 'Male'),
# ('female', 'Female'),
# ('other', 'Other')
# )
#
# widgets = {
# 'self_identification': RadioSelect(choices=self_ident_choices, attrs={'class': 'mdl-radio__button'}),
# 'gender': RadioSelect(choices=gender_choices, attrs={'class': 'mdl-radio__button'}),
# 'renewed_at': TextInput(attrs={'class': 'mdl-textfield__input'}),
# }
#
# def save(self, commit=True):
# instance = super(MembershipForm, self).save(commit=False)
# member = Member.objects.get(id=self.cleaned_data['member'])
# instance.member = member
# logger.debug(self.cleaned_data['self_identification'])
# logger.debug(self.cleaned_data['gender'])
#
# if self.cleaned_data['gender_other']:
# instance.gender = self.cleaned_data['gender_other']
#
# if self.cleaned_data['self_ident_other']:
# instance.self_identification = self.cleaned_data['self_ident_other']
#
# if commit:
# instance.save()
#
# return instance
#
# class PaymentForm(ModelForm):
# class Meta:
# model = Payment
# fields = ['type']
# widgets = {
# 'type': RadioSelect(attrs={'class': 'mdl-radio__button'})
# }
. Output only the next line. | return TemplateResponse(request, 'dashboard.html', context={'DEBUG': settings.DEBUG}) |
Given snippet: <|code_start|>
# Register your models here.
admin.site.register([Membership, Payment])
@admin.register(Visit)
class VisitAdmin(admin.ModelAdmin):
list_select_related = ('member',)
fields = ('member', 'purpose', 'created_at')
ordering = ('created_at',)
list_display = ('full_name', 'purpose', 'created_at')
list_filter = (
('purpose', admin.ChoicesFieldListFilter),
)
search_fields = ['member__email', 'member__last_name', 'member__first_name']
def full_name(self, obj):
return obj.member.full_name
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.contrib import admin
from .models import Membership, Payment, Visit
and context:
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
#
# class Visit(models.Model):
# VOLUNTEER = 'VOLUNTEER'
# FIX = 'FIX'
# BUILD = 'BUILD'
# WORKSHOP = 'WORKSHOP'
# VISIT = 'VISIT'
# DONATE = 'DONATE'
# STAFF = 'STAFF'
# PARTS = 'PARTS'
# BUY_BIKE = 'BUY_BIKE'
# TOUR = 'TOUR'
#
# visit_choices = (
# (VOLUNTEER, 'volunteer'),
# (FIX, 'fix bike'),
# (BUILD, 'build bike'),
# (WORKSHOP, 'workshop'),
# (VISIT, 'visit'),
# (DONATE, 'donate'),
# (STAFF, 'staff'),
# (PARTS, 'parts'),
# (BUY_BIKE, 'buy bike'),
# (TOUR, 'tour / visit')
# )
#
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE
# )
# created_at = models.DateTimeField(default=timezone.now)
# purpose = models.CharField(max_length=50, choices=visit_choices)
#
# def __str__(self):
# return '<Visit purpose: {purpose} created_at: {created_at}>'.format(purpose=self.purpose,
# created_at=self.created_at.isoformat())
which might include code, classes, or functions. Output only the next line. | full_name.admin_order_field = 'member__last_name' |
Given the following code snippet before the placeholder: <|code_start|>
# Register your models here.
admin.site.register([Membership, Payment])
@admin.register(Visit)
class VisitAdmin(admin.ModelAdmin):
list_select_related = ('member',)
fields = ('member', 'purpose', 'created_at')
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib import admin
from .models import Membership, Payment, Visit
and context including class names, function names, and sometimes code from other files:
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
#
# class Visit(models.Model):
# VOLUNTEER = 'VOLUNTEER'
# FIX = 'FIX'
# BUILD = 'BUILD'
# WORKSHOP = 'WORKSHOP'
# VISIT = 'VISIT'
# DONATE = 'DONATE'
# STAFF = 'STAFF'
# PARTS = 'PARTS'
# BUY_BIKE = 'BUY_BIKE'
# TOUR = 'TOUR'
#
# visit_choices = (
# (VOLUNTEER, 'volunteer'),
# (FIX, 'fix bike'),
# (BUILD, 'build bike'),
# (WORKSHOP, 'workshop'),
# (VISIT, 'visit'),
# (DONATE, 'donate'),
# (STAFF, 'staff'),
# (PARTS, 'parts'),
# (BUY_BIKE, 'buy bike'),
# (TOUR, 'tour / visit')
# )
#
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE
# )
# created_at = models.DateTimeField(default=timezone.now)
# purpose = models.CharField(max_length=50, choices=visit_choices)
#
# def __str__(self):
# return '<Visit purpose: {purpose} created_at: {created_at}>'.format(purpose=self.purpose,
# created_at=self.created_at.isoformat())
. Output only the next line. | ordering = ('created_at',) |
Using the snippet: <|code_start|>
class TestCustomUserManager(TestCase):
def test_create_user(self):
new_user = CustomUser.objects.create_user('test@example.com')
self.assertTrue(new_user.pk)
<|code_end|>
, determine the next line of code. You have imports:
from django.test import TestCase
from ..models import CustomUser, Member
and context (class names, function names, or code) available:
# Path: registration/models.py
# class CustomUser(AbstractBaseUser, PermissionsMixin):
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=True,
# )
# is_admin = models.BooleanField(default=False)
# is_active = models.BooleanField(default=True)
#
# objects = CustomUserManager()
#
# USERNAME_FIELD = 'email'
#
# @property
# def is_staff(self):
# # Simplest possible answer: All admins are staff
# return self.is_admin
#
# def get_short_name(self):
# return self.email
#
# def get_full_name(self):
# return self.email
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# class Meta:
# verbose_name = 'User'
# verbose_name_plural = 'Users'
#
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | def test_create_user_no_email(self): |
Next line prediction: <|code_start|>
def test_create_superuser(self):
new_user = CustomUser.objects\
.create_superuser(email='super@example.com', password='password')
self.assertTrue(new_user.is_admin)
self.assertTrue(new_user.is_staff)
self.assertTrue(new_user.check_password('password'))
self.assertTrue(new_user.pk)
class TestCustomUser(TestCase):
def setUp(self):
self.new_user = CustomUser.objects.create_user('test@example.com')
def test_get_short_name(self):
self.assertEqual(self.new_user.get_short_name(), 'test@example.com')
def test_get_full_name(self):
self.assertEqual(self.new_user.get_full_name(), 'test@example.com')
class TestMember(TestCase):
def setUp(self):
self.new_member = Member.objects.create(
first_name='First',
last_name='Last',
post_code='H0H0H0'
)
def test_get_full_name(self):
<|code_end|>
. Use current file imports:
(from django.test import TestCase
from ..models import CustomUser, Member)
and context including class names, function names, or small code snippets from other files:
# Path: registration/models.py
# class CustomUser(AbstractBaseUser, PermissionsMixin):
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=True,
# )
# is_admin = models.BooleanField(default=False)
# is_active = models.BooleanField(default=True)
#
# objects = CustomUserManager()
#
# USERNAME_FIELD = 'email'
#
# @property
# def is_staff(self):
# # Simplest possible answer: All admins are staff
# return self.is_admin
#
# def get_short_name(self):
# return self.email
#
# def get_full_name(self):
# return self.email
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# class Meta:
# verbose_name = 'User'
# verbose_name_plural = 'Users'
#
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | self.assertEqual(self.new_member.get_full_name(), 'First Last') |
Continue the code snippet: <|code_start|>
class MemberSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Member
fields = ('id', 'email', 'email_consent', 'email_consent', 'first_name', 'last_name', 'preferred_name',
'date_of_birth', 'guardian_name', 'phone', 'street', 'city', 'province', 'country', 'post_code',
<|code_end|>
. Use current file imports:
from rest_framework import serializers
from .models import Member
and context (classes, functions, or code) from other files:
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | 'waiver', 'is_active', 'banned', 'suspended') |
Continue the code snippet: <|code_start|> )
field_errors_required = {
'colour': 'Required.',
'make': 'Required.',
'size': 'Required.',
'source': 'Required',
'price': 'Required',
'stripped': 'Bike must be stripped first.',
'cpic_searched_at': 'This bike has not been checked in CPIC. Please dispatch a check now.'
}
@property
def CLAIMED_ERROR(self):
return f'This bike is already claimed by ${self.claimed_by.full_name} and was last worked on less than four weeks ago.'
colour = models.TextField(blank=False, null=False)
make = models.TextField(blank=False, null=False)
size = models.TextField(choices=size_choices, blank=True, null=True, max_length=2)
serial_number = models.TextField(blank=False, null=False)
source = models.TextField(blank=False, null=False, choices=source_choices)
stripped = models.NullBooleanField()
price = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)
state = FSMField(default=BikeState.RECEIVED, choices=BikeState.CHOICES, protected=True)
claimed_by = models.ForeignKey(Member, on_delete=models.SET_NULL, null=True, related_name='claimed_bike')
stolen = models.NullBooleanField()
purchased_by = models.ForeignKey(Member, on_delete=models.SET_NULL, null=True, related_name='purchased_bike')
donated_by = models.TextField(blank=True, null=True)
donated_at = models.DateTimeField(blank=False, null=False, default=timezone.now)
<|code_end|>
. Use current file imports:
from collections import Set
from datetime import timedelta
from django.db import models
from django.utils import timezone
from django_fsm import FSMField, transition
from rest_framework.exceptions import ValidationError
from registration.models import Member
and context (classes, functions, or code) from other files:
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | created_at = models.DateTimeField(default=timezone.now) |
Here is a snippet: <|code_start|>
class GetSignedInMembersTests(TestCase):
def setUp(self):
self.now = timezone.now()
self.member1 = mommy.make(Member)
self.member2 = mommy.make(Member)
self.member3 = mommy.make(Member)
three_hours_ago = self.now - timedelta(hours=3)
<|code_end|>
. Write the next line using the current file imports:
from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from model_mommy import mommy
from core.models import Visit
from registration.models import Member
from registration.utils import signin_member, AlreadySignedInError, member_signed_in, get_signed_in_members
and context from other files:
# Path: core/models.py
# class Visit(models.Model):
# VOLUNTEER = 'VOLUNTEER'
# FIX = 'FIX'
# BUILD = 'BUILD'
# WORKSHOP = 'WORKSHOP'
# VISIT = 'VISIT'
# DONATE = 'DONATE'
# STAFF = 'STAFF'
# PARTS = 'PARTS'
# BUY_BIKE = 'BUY_BIKE'
# TOUR = 'TOUR'
#
# visit_choices = (
# (VOLUNTEER, 'volunteer'),
# (FIX, 'fix bike'),
# (BUILD, 'build bike'),
# (WORKSHOP, 'workshop'),
# (VISIT, 'visit'),
# (DONATE, 'donate'),
# (STAFF, 'staff'),
# (PARTS, 'parts'),
# (BUY_BIKE, 'buy bike'),
# (TOUR, 'tour / visit')
# )
#
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE
# )
# created_at = models.DateTimeField(default=timezone.now)
# purpose = models.CharField(max_length=50, choices=visit_choices)
#
# def __str__(self):
# return '<Visit purpose: {purpose} created_at: {created_at}>'.format(purpose=self.purpose,
# created_at=self.created_at.isoformat())
#
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: registration/utils.py
# def signin_member(member: Member, purpose: str) -> Visit:
# """
# Signs in a member, creating a new `Visit`
# :param member: the member to be signed in
# :param purpose: The reason for visit. E.g. Fix a bike or volunteer
# :return: a new `Visit`
# :raise: `AlreadySignedInError` or `ValidationError`
# """
# if not member_signed_in(member):
# instance = Visit.objects.create(member=member, purpose=purpose)
# form = VisitForm(instance=instance, data={'member': member, 'purpose': purpose})
# if form.is_valid():
# return instance
# raise ValidationError(instance)
#
# raise AlreadySignedInError
#
# class AlreadySignedInError(ValueError):
# pass
#
# def member_signed_in(member: Member, window: int = 4) -> bool:
# return get_signed_in_members(window=window).filter(member_id__in=[member.id]).exists()
#
# def get_signed_in_members(window: int = 4, end: Optional[datetime] = None) -> QuerySet:
# new_end = end if end else timezone.now()
# start = new_end - timedelta(hours=window)
# visits = Visit.objects.filter(created_at__lte=new_end, created_at__gte=start)
# return visits
, which may include functions, classes, or code. Output only the next line. | five_hours_ago = self.now - timedelta(hours=5) |
Next line prediction: <|code_start|>
class GetSignedInMembersTests(TestCase):
def setUp(self):
self.now = timezone.now()
self.member1 = mommy.make(Member)
self.member2 = mommy.make(Member)
self.member3 = mommy.make(Member)
three_hours_ago = self.now - timedelta(hours=3)
<|code_end|>
. Use current file imports:
(from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from model_mommy import mommy
from core.models import Visit
from registration.models import Member
from registration.utils import signin_member, AlreadySignedInError, member_signed_in, get_signed_in_members)
and context including class names, function names, or small code snippets from other files:
# Path: core/models.py
# class Visit(models.Model):
# VOLUNTEER = 'VOLUNTEER'
# FIX = 'FIX'
# BUILD = 'BUILD'
# WORKSHOP = 'WORKSHOP'
# VISIT = 'VISIT'
# DONATE = 'DONATE'
# STAFF = 'STAFF'
# PARTS = 'PARTS'
# BUY_BIKE = 'BUY_BIKE'
# TOUR = 'TOUR'
#
# visit_choices = (
# (VOLUNTEER, 'volunteer'),
# (FIX, 'fix bike'),
# (BUILD, 'build bike'),
# (WORKSHOP, 'workshop'),
# (VISIT, 'visit'),
# (DONATE, 'donate'),
# (STAFF, 'staff'),
# (PARTS, 'parts'),
# (BUY_BIKE, 'buy bike'),
# (TOUR, 'tour / visit')
# )
#
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE
# )
# created_at = models.DateTimeField(default=timezone.now)
# purpose = models.CharField(max_length=50, choices=visit_choices)
#
# def __str__(self):
# return '<Visit purpose: {purpose} created_at: {created_at}>'.format(purpose=self.purpose,
# created_at=self.created_at.isoformat())
#
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: registration/utils.py
# def signin_member(member: Member, purpose: str) -> Visit:
# """
# Signs in a member, creating a new `Visit`
# :param member: the member to be signed in
# :param purpose: The reason for visit. E.g. Fix a bike or volunteer
# :return: a new `Visit`
# :raise: `AlreadySignedInError` or `ValidationError`
# """
# if not member_signed_in(member):
# instance = Visit.objects.create(member=member, purpose=purpose)
# form = VisitForm(instance=instance, data={'member': member, 'purpose': purpose})
# if form.is_valid():
# return instance
# raise ValidationError(instance)
#
# raise AlreadySignedInError
#
# class AlreadySignedInError(ValueError):
# pass
#
# def member_signed_in(member: Member, window: int = 4) -> bool:
# return get_signed_in_members(window=window).filter(member_id__in=[member.id]).exists()
#
# def get_signed_in_members(window: int = 4, end: Optional[datetime] = None) -> QuerySet:
# new_end = end if end else timezone.now()
# start = new_end - timedelta(hours=window)
# visits = Visit.objects.filter(created_at__lte=new_end, created_at__gte=start)
# return visits
. Output only the next line. | five_hours_ago = self.now - timedelta(hours=5) |
Next line prediction: <|code_start|>
class GetSignedInMembersTests(TestCase):
def setUp(self):
self.now = timezone.now()
self.member1 = mommy.make(Member)
self.member2 = mommy.make(Member)
self.member3 = mommy.make(Member)
three_hours_ago = self.now - timedelta(hours=3)
<|code_end|>
. Use current file imports:
(from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from model_mommy import mommy
from core.models import Visit
from registration.models import Member
from registration.utils import signin_member, AlreadySignedInError, member_signed_in, get_signed_in_members)
and context including class names, function names, or small code snippets from other files:
# Path: core/models.py
# class Visit(models.Model):
# VOLUNTEER = 'VOLUNTEER'
# FIX = 'FIX'
# BUILD = 'BUILD'
# WORKSHOP = 'WORKSHOP'
# VISIT = 'VISIT'
# DONATE = 'DONATE'
# STAFF = 'STAFF'
# PARTS = 'PARTS'
# BUY_BIKE = 'BUY_BIKE'
# TOUR = 'TOUR'
#
# visit_choices = (
# (VOLUNTEER, 'volunteer'),
# (FIX, 'fix bike'),
# (BUILD, 'build bike'),
# (WORKSHOP, 'workshop'),
# (VISIT, 'visit'),
# (DONATE, 'donate'),
# (STAFF, 'staff'),
# (PARTS, 'parts'),
# (BUY_BIKE, 'buy bike'),
# (TOUR, 'tour / visit')
# )
#
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE
# )
# created_at = models.DateTimeField(default=timezone.now)
# purpose = models.CharField(max_length=50, choices=visit_choices)
#
# def __str__(self):
# return '<Visit purpose: {purpose} created_at: {created_at}>'.format(purpose=self.purpose,
# created_at=self.created_at.isoformat())
#
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: registration/utils.py
# def signin_member(member: Member, purpose: str) -> Visit:
# """
# Signs in a member, creating a new `Visit`
# :param member: the member to be signed in
# :param purpose: The reason for visit. E.g. Fix a bike or volunteer
# :return: a new `Visit`
# :raise: `AlreadySignedInError` or `ValidationError`
# """
# if not member_signed_in(member):
# instance = Visit.objects.create(member=member, purpose=purpose)
# form = VisitForm(instance=instance, data={'member': member, 'purpose': purpose})
# if form.is_valid():
# return instance
# raise ValidationError(instance)
#
# raise AlreadySignedInError
#
# class AlreadySignedInError(ValueError):
# pass
#
# def member_signed_in(member: Member, window: int = 4) -> bool:
# return get_signed_in_members(window=window).filter(member_id__in=[member.id]).exists()
#
# def get_signed_in_members(window: int = 4, end: Optional[datetime] = None) -> QuerySet:
# new_end = end if end else timezone.now()
# start = new_end - timedelta(hours=window)
# visits = Visit.objects.filter(created_at__lte=new_end, created_at__gte=start)
# return visits
. Output only the next line. | five_hours_ago = self.now - timedelta(hours=5) |
Continue the code snippet: <|code_start|>
class GetSignedInMembersTests(TestCase):
def setUp(self):
self.now = timezone.now()
self.member1 = mommy.make(Member)
self.member2 = mommy.make(Member)
self.member3 = mommy.make(Member)
<|code_end|>
. Use current file imports:
from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from model_mommy import mommy
from core.models import Visit
from registration.models import Member
from registration.utils import signin_member, AlreadySignedInError, member_signed_in, get_signed_in_members
and context (classes, functions, or code) from other files:
# Path: core/models.py
# class Visit(models.Model):
# VOLUNTEER = 'VOLUNTEER'
# FIX = 'FIX'
# BUILD = 'BUILD'
# WORKSHOP = 'WORKSHOP'
# VISIT = 'VISIT'
# DONATE = 'DONATE'
# STAFF = 'STAFF'
# PARTS = 'PARTS'
# BUY_BIKE = 'BUY_BIKE'
# TOUR = 'TOUR'
#
# visit_choices = (
# (VOLUNTEER, 'volunteer'),
# (FIX, 'fix bike'),
# (BUILD, 'build bike'),
# (WORKSHOP, 'workshop'),
# (VISIT, 'visit'),
# (DONATE, 'donate'),
# (STAFF, 'staff'),
# (PARTS, 'parts'),
# (BUY_BIKE, 'buy bike'),
# (TOUR, 'tour / visit')
# )
#
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE
# )
# created_at = models.DateTimeField(default=timezone.now)
# purpose = models.CharField(max_length=50, choices=visit_choices)
#
# def __str__(self):
# return '<Visit purpose: {purpose} created_at: {created_at}>'.format(purpose=self.purpose,
# created_at=self.created_at.isoformat())
#
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
#
# Path: registration/utils.py
# def signin_member(member: Member, purpose: str) -> Visit:
# """
# Signs in a member, creating a new `Visit`
# :param member: the member to be signed in
# :param purpose: The reason for visit. E.g. Fix a bike or volunteer
# :return: a new `Visit`
# :raise: `AlreadySignedInError` or `ValidationError`
# """
# if not member_signed_in(member):
# instance = Visit.objects.create(member=member, purpose=purpose)
# form = VisitForm(instance=instance, data={'member': member, 'purpose': purpose})
# if form.is_valid():
# return instance
# raise ValidationError(instance)
#
# raise AlreadySignedInError
#
# class AlreadySignedInError(ValueError):
# pass
#
# def member_signed_in(member: Member, window: int = 4) -> bool:
# return get_signed_in_members(window=window).filter(member_id__in=[member.id]).exists()
#
# def get_signed_in_members(window: int = 4, end: Optional[datetime] = None) -> QuerySet:
# new_end = end if end else timezone.now()
# start = new_end - timedelta(hours=window)
# visits = Visit.objects.filter(created_at__lte=new_end, created_at__gte=start)
# return visits
. Output only the next line. | three_hours_ago = self.now - timedelta(hours=3) |
Using the snippet: <|code_start|>
def email_generator():
url = 'http://randomword.setgetgo.com/get.php'
local = []
for idx in range(2):
r = requests.get(url)
local.append(r.text)
return '{0}.{1}@example.com'.format(*local)
def get_payment_type(pt):
payment_types = Payment.payment_choices
try:
return [payment_type for payment_type in payment_types if payment_type[1].lower() == pt.lower()][0]
except IndexError:
return 'UNKNOWN', 'Unknown'
def member_import():
filename = os.path.join(os.getcwd(), '2016 BCBC Membership Agreement (Responses) - Form Responses 1.csv')
<|code_end|>
, determine the next line of code. You have imports:
import csv
import os
import requests
import dateutil.parser
from django.utils import timezone
from django.db import IntegrityError
from core.models import Membership, Payment
from registration.models import Member
and context (class names, function names, or code) available:
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
#
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | with open(filename, newline='') as csvfile: |
Using the snippet: <|code_start|>
try:
renewed_at = dateutil.parser.parse(row.get('timestamp', None))
except ValueError:
renewed_at = timezone.now()
try:
new_member = Member.objects.create(
email=row.get('email', None),
email_consent=row.get('email_consent', False),
first_name=row.get('first_name'),
last_name=row.get('last_name'),
preferred_name=row.get('handle', None),
date_of_birth=dob,
guardian_name=row.get('guardian', None),
phone=row.get('phone', None),
street=row.get('address', None),
city=row.get('city', None),
province=row.get('province', None),
country=row.get('country', None),
post_code=row.get('postal', None),
waiver=waiver
)
payment = Payment.objects.create(
type=get_payment_type(row.get('payment'))[0],
)
Membership.objects.create(
renewed_at=renewed_at,
<|code_end|>
, determine the next line of code. You have imports:
import csv
import os
import requests
import dateutil.parser
from django.utils import timezone
from django.db import IntegrityError
from core.models import Membership, Payment
from registration.models import Member
and context (class names, function names, or code) available:
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
#
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
. Output only the next line. | self_identification=row.get('self_identification', None), |
Predict the next line for this snippet: <|code_start|>
def email_generator():
url = 'http://randomword.setgetgo.com/get.php'
local = []
for idx in range(2):
r = requests.get(url)
local.append(r.text)
return '{0}.{1}@example.com'.format(*local)
def get_payment_type(pt):
payment_types = Payment.payment_choices
try:
return [payment_type for payment_type in payment_types if payment_type[1].lower() == pt.lower()][0]
<|code_end|>
with the help of current file imports:
import csv
import os
import requests
import dateutil.parser
from django.utils import timezone
from django.db import IntegrityError
from core.models import Membership, Payment
from registration.models import Member
and context from other files:
# Path: core/models.py
# class Membership(models.Model):
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# renewed_at = models.DateTimeField(default=timezone.now)
# self_identification = models.CharField(max_length=255, null=True, blank=True)
# gender = models.CharField(max_length=255, null=True, blank=True)
# involvement = models.CharField(max_length=255, null=True, blank=True)
# member = models.ForeignKey(
# 'registration.Member',
# on_delete=models.CASCADE,
# related_name='memberships',
# blank=True,
# null=True
# )
# payment = models.OneToOneField(
# 'Payment',
# on_delete=models.CASCADE,
# related_name='membership',
# blank=False,
# null=True
# )
#
# @cached_property
# def expires_at(self):
# return self.renewed_at + relativedelta(years=1)
#
# class Payment(models.Model):
# payment_choices = (
# ('NONE', 'None'),
# ('CASH', 'Cash'),
# ('CHEQUE', 'Cheque'),
# ('VOLUNTEERING', 'Volunteering'),
# ('SQUARE', 'Square'),
# ('PAYPAL', 'PayPal'),
# ('YOUTH', 'Youth'),
# ('UNKNOWN', 'Unknown')
# )
# type = models.CharField(max_length=12, choices=payment_choices, default='NONE')
# created_at = models.DateTimeField(auto_now_add=True)
#
# Path: registration/models.py
# class Member(models.Model):
# involvement_choices = (
# ('21cd9799b6', 'General (receive email)'),
# ('3a5a719017', 'Volunteering'),
# ('0ebb0b5468', 'Events'),
# ('84309225e7', 'Workshops'),
# ('c96d389517', 'Shop'),
# )
#
# user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,
# null=True, blank=True)
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=False,
# null=True,
# blank=True,
# )
# email_consent = models.BooleanField(default=False, blank=False)
# first_name = models.CharField(max_length=255, null=False, blank=False)
# last_name = models.CharField(max_length=255, null=False, blank=False)
# preferred_name = models.CharField(max_length=255, null=True, blank=True)
# date_of_birth = models.DateField(null=True, blank=True)
# guardian_name = models.CharField(max_length=255, null=True, blank=True)
# phone = models.CharField(max_length=20, null=True, blank=True)
# street = models.CharField(max_length=255, null=True, blank=True)
# city = models.CharField(max_length=255, null=True, blank=True)
# province = models.CharField(max_length=255, null=True, blank=True)
# country = models.CharField(max_length=255, null=True, blank=True)
# post_code = models.CharField(max_length=20, null=True, blank=False)
# waiver = models.DateTimeField(null=True, blank=True)
# is_active = models.BooleanField(default=True)
# notes = models.TextField(null=True, blank=True)
# suspended = models.BooleanField(default=False)
# banned = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# modified_at = models.DateTimeField(auto_now=True)
# involvement = MultiSelectField(choices=involvement_choices, null=True, blank=True)
#
# @property
# def full_name(self):
# return self.get_full_name()
#
# def get_full_name(self):
# # The user is identified by their email address
# return '{0} {1}'.format(self.first_name, self.last_name)
#
# def get_short_name(self):
# # The user is identified by their email address
# if self.email:
# return self.email
# else:
# return self.last_name
#
# def __str__(self): # __unicode__ on Python 2
# return self.email
, which may contain function names, class names, or code. Output only the next line. | except IndexError: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.