Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|>
class TestServiceSSH():
def setup(self):
self._patcher_tcp_communication = patch(
'zabby.items.net.tcp.tcp_communication')
self.mock_tcp_communication = self._patcher_tcp_communication.start()
self.service_name = 'ssh'
def teardown(self):
self._patcher_tcp_communication.stop()
def test_running_if_server_message_matches_expectations(self):
self.mock_tcp_communication.return_value = [
b('SSH-2.0-OpenSSH_6.0p1 Debian-4\n')]
<|code_end|>
, generate the next line using the imports in this file:
from mock import patch
from nose.tools import assert_true, assert_false, assert_raises
from zabby.core.exceptions import WrongArgumentError
from zabby.core.six import b
from zabby.items.net import tcp
and context (functions, classes, or occasionally code) from other files:
# Path: zabby/core/exceptions.py
# class WrongArgumentError(Exception):
# """ Wrong argument was passed to a function"""
#
# Path: zabby/core/six.py
# def b(s):
# return s.encode('utf-8')
#
# Path: zabby/items/net/tcp.py
# LOG = logging.getLogger(__name__)
# SERVICES = {
# 'ssh': 22,
# }
# def service(service_name, ip='127.0.0.1', port=None, timeout=1.0):
# def _check_ssh(ip, port, timeout):
. Output only the next line. | running = bool(tcp.service(self.service_name)) |
Given the code snippet: <|code_start|>
class TestSize():
def setup(self):
self.host_os = Mock()
self.host_os.AVAILABLE_MEMORY_TYPES = set(['total', 'free', 'pfree'])
def test_size_raises_exception_if_wrong_mode_is_supplied(self):
<|code_end|>
, generate the next line using the imports in this file:
from mock import Mock
from nose.tools import assert_raises
from zabby.core.exceptions import WrongArgumentError
from zabby.items.vm import memory
and context (functions, classes, or occasionally code) from other files:
# Path: zabby/core/exceptions.py
# class WrongArgumentError(Exception):
# """ Wrong argument was passed to a function"""
#
# Path: zabby/items/vm/memory.py
# def size(mode='total', host_os=detect_host_os()):
. Output only the next line. | assert_raises(WrongArgumentError, memory.size, 'wrong', self.host_os) |
Continue the code snippet: <|code_start|>
class TestSize():
def setup(self):
self.host_os = Mock()
self.host_os.AVAILABLE_MEMORY_TYPES = set(['total', 'free', 'pfree'])
def test_size_raises_exception_if_wrong_mode_is_supplied(self):
<|code_end|>
. Use current file imports:
from mock import Mock
from nose.tools import assert_raises
from zabby.core.exceptions import WrongArgumentError
from zabby.items.vm import memory
and context (classes, functions, or code) from other files:
# Path: zabby/core/exceptions.py
# class WrongArgumentError(Exception):
# """ Wrong argument was passed to a function"""
#
# Path: zabby/items/vm/memory.py
# def size(mode='total', host_os=detect_host_os()):
. Output only the next line. | assert_raises(WrongArgumentError, memory.size, 'wrong', self.host_os) |
Using the snippet: <|code_start|>
__all__ = ['service', ]
LOG = logging.getLogger(__name__)
def service(service_name, ip='127.0.0.1', port=None, timeout=1.0):
"""
Returns 1 if service running on port accepts connections and behaves as
expected, 0 otherwise
:param service_name: specifies expected behaviour and port
ssh:
behavior: should respond with a greeting message upon connection
port: 22
:param port: overrides port specified by service_name
:raises: WrongArgumentError if unsupported service_name is supplied,
port is not an integer in range [0,65535] or
timeout is not a positive float
"""
validate_mode(service_name, SERVICES.keys())
if port:
try:
port = int(port)
if port < 0 or 65535 < port:
raise ValueError()
except ValueError:
<|code_end|>
, determine the next line of code. You have imports:
import logging
import re
from zabby.core.exceptions import WrongArgumentError
from zabby.core.utils import validate_mode, tcp_communication
and context (class names, function names, or code) available:
# Path: zabby/core/exceptions.py
# class WrongArgumentError(Exception):
# """ Wrong argument was passed to a function"""
#
# Path: zabby/core/utils.py
# def validate_mode(mode, available_modes):
# """
# Checks if mode is one of available_modes
#
# :raises: WrongArgumentError if mode is not one of available_modes
# """
# if mode not in available_modes:
# raise WrongArgumentError(
# "Unknown mode '{mode}' should be one of {modes}".format(
# mode=mode, modes=available_modes))
#
# def tcp_communication(port, host='localhost', requests=list(),
# receive_first=False, timeout=1.0):
# """
# Connects to port, optionally sending requests and returns any responses
#
# :param requests: list of binary objects that will be sent in order
# it is expected that there will be a response for every request
# :param receive_first: if true will try to receive data before sending any
# requests
#
# :raises: IOError, no exception handling is done in this function, most
# exceptions will be socket exceptions
# """
# if any([not isinstance(request, binary_type) for request in requests]):
# raise WrongArgumentError("Every request should be in binary. "
# "Requests: '{0}'".format(requests))
#
# conn = None
# responses = list()
# try:
# conn = socket.create_connection((host, port), timeout=timeout)
# if receive_first:
# responses.append(conn.recv(4096))
#
# for request in requests:
# conn.sendall(request)
# responses.append(conn.recv(4096))
#
# finally:
# if conn is not None:
# conn.close()
#
# return responses
. Output only the next line. | raise WrongArgumentError( |
Given snippet: <|code_start|>
__all__ = ['service', ]
LOG = logging.getLogger(__name__)
def service(service_name, ip='127.0.0.1', port=None, timeout=1.0):
"""
Returns 1 if service running on port accepts connections and behaves as
expected, 0 otherwise
:param service_name: specifies expected behaviour and port
ssh:
behavior: should respond with a greeting message upon connection
port: 22
:param port: overrides port specified by service_name
:raises: WrongArgumentError if unsupported service_name is supplied,
port is not an integer in range [0,65535] or
timeout is not a positive float
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import re
from zabby.core.exceptions import WrongArgumentError
from zabby.core.utils import validate_mode, tcp_communication
and context:
# Path: zabby/core/exceptions.py
# class WrongArgumentError(Exception):
# """ Wrong argument was passed to a function"""
#
# Path: zabby/core/utils.py
# def validate_mode(mode, available_modes):
# """
# Checks if mode is one of available_modes
#
# :raises: WrongArgumentError if mode is not one of available_modes
# """
# if mode not in available_modes:
# raise WrongArgumentError(
# "Unknown mode '{mode}' should be one of {modes}".format(
# mode=mode, modes=available_modes))
#
# def tcp_communication(port, host='localhost', requests=list(),
# receive_first=False, timeout=1.0):
# """
# Connects to port, optionally sending requests and returns any responses
#
# :param requests: list of binary objects that will be sent in order
# it is expected that there will be a response for every request
# :param receive_first: if true will try to receive data before sending any
# requests
#
# :raises: IOError, no exception handling is done in this function, most
# exceptions will be socket exceptions
# """
# if any([not isinstance(request, binary_type) for request in requests]):
# raise WrongArgumentError("Every request should be in binary. "
# "Requests: '{0}'".format(requests))
#
# conn = None
# responses = list()
# try:
# conn = socket.create_connection((host, port), timeout=timeout)
# if receive_first:
# responses.append(conn.recv(4096))
#
# for request in requests:
# conn.sendall(request)
# responses.append(conn.recv(4096))
#
# finally:
# if conn is not None:
# conn.close()
#
# return responses
which might include code, classes, or functions. Output only the next line. | validate_mode(service_name, SERVICES.keys()) |
Here is a snippet: <|code_start|> except ValueError:
raise WrongArgumentError(
"Port must be an integer in range [0,65535], got '{0}'".format(
port))
else:
port = SERVICES[service_name]
try:
timeout = float(timeout)
if timeout < 0.0:
raise ValueError()
except:
raise WrongArgumentError(
"Timeout must be float greater than 0, got '{0}'".format(timeout))
if service_name == 'ssh':
running = _check_ssh(ip, port, timeout)
else:
running = False
return int(running)
SERVICES = {
'ssh': 22,
}
def _check_ssh(ip, port, timeout):
running = False
try:
<|code_end|>
. Write the next line using the current file imports:
import logging
import re
from zabby.core.exceptions import WrongArgumentError
from zabby.core.utils import validate_mode, tcp_communication
and context from other files:
# Path: zabby/core/exceptions.py
# class WrongArgumentError(Exception):
# """ Wrong argument was passed to a function"""
#
# Path: zabby/core/utils.py
# def validate_mode(mode, available_modes):
# """
# Checks if mode is one of available_modes
#
# :raises: WrongArgumentError if mode is not one of available_modes
# """
# if mode not in available_modes:
# raise WrongArgumentError(
# "Unknown mode '{mode}' should be one of {modes}".format(
# mode=mode, modes=available_modes))
#
# def tcp_communication(port, host='localhost', requests=list(),
# receive_first=False, timeout=1.0):
# """
# Connects to port, optionally sending requests and returns any responses
#
# :param requests: list of binary objects that will be sent in order
# it is expected that there will be a response for every request
# :param receive_first: if true will try to receive data before sending any
# requests
#
# :raises: IOError, no exception handling is done in this function, most
# exceptions will be socket exceptions
# """
# if any([not isinstance(request, binary_type) for request in requests]):
# raise WrongArgumentError("Every request should be in binary. "
# "Requests: '{0}'".format(requests))
#
# conn = None
# responses = list()
# try:
# conn = socket.create_connection((host, port), timeout=timeout)
# if receive_first:
# responses.append(conn.recv(4096))
#
# for request in requests:
# conn.sendall(request)
# responses.append(conn.recv(4096))
#
# finally:
# if conn is not None:
# conn.close()
#
# return responses
, which may include functions, classes, or code. Output only the next line. | responses = tcp_communication(port, ip, receive_first=True, |
Using the snippet: <|code_start|>
@attr(os='linux')
class TestMd5Sum():
FILE_PATH = '/tmp/zabby_md5_test_file'
FILE_CONTENT = '''\
line 1
'''
def setup(self):
with open(self.FILE_PATH, 'w') as f:
f.write(self.FILE_CONTENT)
def teardown(self):
os.remove(self.FILE_PATH)
def test_result_is_equal_to_coreutils_md5sum(self):
<|code_end|>
, determine the next line of code. You have imports:
import os
from nose.plugins.attrib import attr
from nose.tools import assert_equal
from zabby.core.utils import sh
from zabby.items.vfs import file
and context (class names, function names, or code) available:
# Path: zabby/core/utils.py
# def sh(command, timeout=1.0, wait_step=0.01, raise_on_empty_out=True,
# raise_on_nonempty_err=False):
# """
# Creates and returns a function that when called will run command with shell
# and return it's output.
#
# Command can contain replacement fields as described in python documentation
# http://docs.python.org/library/string.html?highlight=formatter#format-string-syntax
#
# sh('command {0}')('argument') will call 'command argument'
#
# :param timeout: if command does not terminate in it will be killed and
# OperatingSystemError will be raised
# :param wait_step: poll interval for process running command
# :param raise_on_empty_out: whether exception should be raised if command
# does not write anything to stdout
# :param raise_on_nonempty_err: whether exception should be raised if command
# writes to stderr
#
# :raises: WrongArgumentError if command contains replacement fields and
# resulting function is called without arguments
# :raises: OperatingSystemError if command does not terminate until timeout
# """
#
# def call_command(*args):
# try:
# formatted_command = command.format(*args)
# except IndexError:
# raise WrongArgumentError(
# "'{0}' not enough arguments. Called with {1}".format(command,
# args))
# process = Popen(formatted_command, stdout=PIPE, stderr=PIPE,
# shell=True, close_fds=True, universal_newlines=True)
# try:
# if timeout:
# wait_time_remaining = timeout
# while process.poll() is None and wait_time_remaining > 0:
# time.sleep(wait_step)
# wait_time_remaining -= wait_step
#
# if wait_time_remaining <= 0:
# process.kill()
# raise OperatingSystemError(
# "{0} have not completed in {1} seconds".format(
# formatted_command, timeout))
# finally:
# (out, err) = process.communicate()
#
# (out, err) = (out.rstrip(), err.rstrip())
#
# if out == '' and raise_on_empty_out:
# raise OperatingSystemError(
# "'{0}' has not written to stdout".format(formatted_command))
#
# if err != '':
# message = "'{0}' has written to stderr: {1}".format(
# formatted_command, err)
#
# if raise_on_nonempty_err:
# raise OperatingSystemError(message)
# else:
# log = logging.getLogger('sh')
# log.warn(message)
#
# return out
#
# return call_command
#
# Path: zabby/items/vfs/file.py
# def md5sum(file_path, block_size=8192):
. Output only the next line. | coreutils_md5sum = sh('md5sum {0}')(self.FILE_PATH).split()[0] |
Based on the snippet: <|code_start|>
@attr(os='linux')
class TestMd5Sum():
FILE_PATH = '/tmp/zabby_md5_test_file'
FILE_CONTENT = '''\
line 1
'''
def setup(self):
with open(self.FILE_PATH, 'w') as f:
f.write(self.FILE_CONTENT)
def teardown(self):
os.remove(self.FILE_PATH)
def test_result_is_equal_to_coreutils_md5sum(self):
coreutils_md5sum = sh('md5sum {0}')(self.FILE_PATH).split()[0]
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from nose.plugins.attrib import attr
from nose.tools import assert_equal
from zabby.core.utils import sh
from zabby.items.vfs import file
and context (classes, functions, sometimes code) from other files:
# Path: zabby/core/utils.py
# def sh(command, timeout=1.0, wait_step=0.01, raise_on_empty_out=True,
# raise_on_nonempty_err=False):
# """
# Creates and returns a function that when called will run command with shell
# and return it's output.
#
# Command can contain replacement fields as described in python documentation
# http://docs.python.org/library/string.html?highlight=formatter#format-string-syntax
#
# sh('command {0}')('argument') will call 'command argument'
#
# :param timeout: if command does not terminate in it will be killed and
# OperatingSystemError will be raised
# :param wait_step: poll interval for process running command
# :param raise_on_empty_out: whether exception should be raised if command
# does not write anything to stdout
# :param raise_on_nonempty_err: whether exception should be raised if command
# writes to stderr
#
# :raises: WrongArgumentError if command contains replacement fields and
# resulting function is called without arguments
# :raises: OperatingSystemError if command does not terminate until timeout
# """
#
# def call_command(*args):
# try:
# formatted_command = command.format(*args)
# except IndexError:
# raise WrongArgumentError(
# "'{0}' not enough arguments. Called with {1}".format(command,
# args))
# process = Popen(formatted_command, stdout=PIPE, stderr=PIPE,
# shell=True, close_fds=True, universal_newlines=True)
# try:
# if timeout:
# wait_time_remaining = timeout
# while process.poll() is None and wait_time_remaining > 0:
# time.sleep(wait_step)
# wait_time_remaining -= wait_step
#
# if wait_time_remaining <= 0:
# process.kill()
# raise OperatingSystemError(
# "{0} have not completed in {1} seconds".format(
# formatted_command, timeout))
# finally:
# (out, err) = process.communicate()
#
# (out, err) = (out.rstrip(), err.rstrip())
#
# if out == '' and raise_on_empty_out:
# raise OperatingSystemError(
# "'{0}' has not written to stdout".format(formatted_command))
#
# if err != '':
# message = "'{0}' has written to stderr: {1}".format(
# formatted_command, err)
#
# if raise_on_nonempty_err:
# raise OperatingSystemError(message)
# else:
# log = logging.getLogger('sh')
# log.warn(message)
#
# return out
#
# return call_command
#
# Path: zabby/items/vfs/file.py
# def md5sum(file_path, block_size=8192):
. Output only the next line. | zabby_md5sum = file.md5sum(self.FILE_PATH) |
Continue the code snippet: <|code_start|>
urlpatterns = patterns(
'data_drivers.views',
url(r'^$', 'api_root', name='api_root'),
<|code_end|>
. Use current file imports:
from django.conf.urls.defaults import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
LibreMetadataList, SourceDataVersionList, SourceDataVersionDetail,
SourceDetail, SourceList, SourceGetAll, SourceGetOne)
and context (classes, functions, or code) from other files:
# Path: libre/apps/data_drivers/views.py
# class LibreMetadataList(generics.GenericAPIView):
# def get(self, request, *args, **kwargs):
# return Response(dict([(i, getattr(main, i)) for i in ['__author__', '__copyright__', '__credits__', '__email__', '__license__', '__maintainer__', '__status__', '__version__', '__version_info__']]))
#
# class SourceDataVersionList(CustomListAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDataVersionDetail(CustomRetrieveAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDetail(CustomRetrieveAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceList(CustomListAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceGetAll(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_all(parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
#
# class SourceGetOne(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_one(int(kwargs['id']), parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
. Output only the next line. | url(r'^libre/$', LibreMetadataList.as_view(), name='libremetadata-list'), |
Next line prediction: <|code_start|>
urlpatterns = patterns(
'data_drivers.views',
url(r'^$', 'api_root', name='api_root'),
url(r'^libre/$', LibreMetadataList.as_view(), name='libremetadata-list'),
url(r'^sources/$', SourceList.as_view(), name='source-list'),
url(r'^sources/(?P<slug>[-\w]+)/$', SourceDetail.as_view(), name='source-detail'),
url(r'^sources/(?P<slug>[-\w]+)/data/$', SourceGetAll.as_view(), name='source-get_all'),
url(r'^sources/(?P<slug>[-\w]+)/data/(?P<id>[0-9]+)/$', SourceGetOne.as_view(), name='source-get_one'),
<|code_end|>
. Use current file imports:
(from django.conf.urls.defaults import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
LibreMetadataList, SourceDataVersionList, SourceDataVersionDetail,
SourceDetail, SourceList, SourceGetAll, SourceGetOne))
and context including class names, function names, or small code snippets from other files:
# Path: libre/apps/data_drivers/views.py
# class LibreMetadataList(generics.GenericAPIView):
# def get(self, request, *args, **kwargs):
# return Response(dict([(i, getattr(main, i)) for i in ['__author__', '__copyright__', '__credits__', '__email__', '__license__', '__maintainer__', '__status__', '__version__', '__version_info__']]))
#
# class SourceDataVersionList(CustomListAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDataVersionDetail(CustomRetrieveAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDetail(CustomRetrieveAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceList(CustomListAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceGetAll(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_all(parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
#
# class SourceGetOne(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_one(int(kwargs['id']), parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
. Output only the next line. | url(r'^sources/data_version/$', SourceDataVersionList.as_view(), name='sourcedataversion-list'), |
Next line prediction: <|code_start|>
urlpatterns = patterns(
'data_drivers.views',
url(r'^$', 'api_root', name='api_root'),
url(r'^libre/$', LibreMetadataList.as_view(), name='libremetadata-list'),
url(r'^sources/$', SourceList.as_view(), name='source-list'),
url(r'^sources/(?P<slug>[-\w]+)/$', SourceDetail.as_view(), name='source-detail'),
url(r'^sources/(?P<slug>[-\w]+)/data/$', SourceGetAll.as_view(), name='source-get_all'),
url(r'^sources/(?P<slug>[-\w]+)/data/(?P<id>[0-9]+)/$', SourceGetOne.as_view(), name='source-get_one'),
url(r'^sources/data_version/$', SourceDataVersionList.as_view(), name='sourcedataversion-list'),
<|code_end|>
. Use current file imports:
(from django.conf.urls.defaults import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
LibreMetadataList, SourceDataVersionList, SourceDataVersionDetail,
SourceDetail, SourceList, SourceGetAll, SourceGetOne))
and context including class names, function names, or small code snippets from other files:
# Path: libre/apps/data_drivers/views.py
# class LibreMetadataList(generics.GenericAPIView):
# def get(self, request, *args, **kwargs):
# return Response(dict([(i, getattr(main, i)) for i in ['__author__', '__copyright__', '__credits__', '__email__', '__license__', '__maintainer__', '__status__', '__version__', '__version_info__']]))
#
# class SourceDataVersionList(CustomListAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDataVersionDetail(CustomRetrieveAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDetail(CustomRetrieveAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceList(CustomListAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceGetAll(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_all(parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
#
# class SourceGetOne(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_one(int(kwargs['id']), parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
. Output only the next line. | url(r'^sources/data_versions/(?P<pk>[0-9]+)/$', SourceDataVersionDetail.as_view(), name='sourcedataversion-detail'), |
Based on the snippet: <|code_start|>
urlpatterns = patterns(
'data_drivers.views',
url(r'^$', 'api_root', name='api_root'),
url(r'^libre/$', LibreMetadataList.as_view(), name='libremetadata-list'),
url(r'^sources/$', SourceList.as_view(), name='source-list'),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.conf.urls.defaults import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
LibreMetadataList, SourceDataVersionList, SourceDataVersionDetail,
SourceDetail, SourceList, SourceGetAll, SourceGetOne)
and context (classes, functions, sometimes code) from other files:
# Path: libre/apps/data_drivers/views.py
# class LibreMetadataList(generics.GenericAPIView):
# def get(self, request, *args, **kwargs):
# return Response(dict([(i, getattr(main, i)) for i in ['__author__', '__copyright__', '__credits__', '__email__', '__license__', '__maintainer__', '__status__', '__version__', '__version_info__']]))
#
# class SourceDataVersionList(CustomListAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDataVersionDetail(CustomRetrieveAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDetail(CustomRetrieveAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceList(CustomListAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceGetAll(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_all(parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
#
# class SourceGetOne(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_one(int(kwargs['id']), parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
. Output only the next line. | url(r'^sources/(?P<slug>[-\w]+)/$', SourceDetail.as_view(), name='source-detail'), |
Based on the snippet: <|code_start|>
urlpatterns = patterns(
'data_drivers.views',
url(r'^$', 'api_root', name='api_root'),
url(r'^libre/$', LibreMetadataList.as_view(), name='libremetadata-list'),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.conf.urls.defaults import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
LibreMetadataList, SourceDataVersionList, SourceDataVersionDetail,
SourceDetail, SourceList, SourceGetAll, SourceGetOne)
and context (classes, functions, sometimes code) from other files:
# Path: libre/apps/data_drivers/views.py
# class LibreMetadataList(generics.GenericAPIView):
# def get(self, request, *args, **kwargs):
# return Response(dict([(i, getattr(main, i)) for i in ['__author__', '__copyright__', '__credits__', '__email__', '__license__', '__maintainer__', '__status__', '__version__', '__version_info__']]))
#
# class SourceDataVersionList(CustomListAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDataVersionDetail(CustomRetrieveAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDetail(CustomRetrieveAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceList(CustomListAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceGetAll(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_all(parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
#
# class SourceGetOne(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_one(int(kwargs['id']), parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
. Output only the next line. | url(r'^sources/$', SourceList.as_view(), name='source-list'), |
Given the code snippet: <|code_start|>
urlpatterns = patterns(
'data_drivers.views',
url(r'^$', 'api_root', name='api_root'),
url(r'^libre/$', LibreMetadataList.as_view(), name='libremetadata-list'),
url(r'^sources/$', SourceList.as_view(), name='source-list'),
url(r'^sources/(?P<slug>[-\w]+)/$', SourceDetail.as_view(), name='source-detail'),
<|code_end|>
, generate the next line using the imports in this file:
from django.conf.urls.defaults import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
LibreMetadataList, SourceDataVersionList, SourceDataVersionDetail,
SourceDetail, SourceList, SourceGetAll, SourceGetOne)
and context (functions, classes, or occasionally code) from other files:
# Path: libre/apps/data_drivers/views.py
# class LibreMetadataList(generics.GenericAPIView):
# def get(self, request, *args, **kwargs):
# return Response(dict([(i, getattr(main, i)) for i in ['__author__', '__copyright__', '__credits__', '__email__', '__license__', '__maintainer__', '__status__', '__version__', '__version_info__']]))
#
# class SourceDataVersionList(CustomListAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDataVersionDetail(CustomRetrieveAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDetail(CustomRetrieveAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceList(CustomListAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceGetAll(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_all(parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
#
# class SourceGetOne(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_one(int(kwargs['id']), parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
. Output only the next line. | url(r'^sources/(?P<slug>[-\w]+)/data/$', SourceGetAll.as_view(), name='source-get_all'), |
Predict the next line for this snippet: <|code_start|>
urlpatterns = patterns(
'data_drivers.views',
url(r'^$', 'api_root', name='api_root'),
url(r'^libre/$', LibreMetadataList.as_view(), name='libremetadata-list'),
url(r'^sources/$', SourceList.as_view(), name='source-list'),
url(r'^sources/(?P<slug>[-\w]+)/$', SourceDetail.as_view(), name='source-detail'),
url(r'^sources/(?P<slug>[-\w]+)/data/$', SourceGetAll.as_view(), name='source-get_all'),
<|code_end|>
with the help of current file imports:
from django.conf.urls.defaults import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (
LibreMetadataList, SourceDataVersionList, SourceDataVersionDetail,
SourceDetail, SourceList, SourceGetAll, SourceGetOne)
and context from other files:
# Path: libre/apps/data_drivers/views.py
# class LibreMetadataList(generics.GenericAPIView):
# def get(self, request, *args, **kwargs):
# return Response(dict([(i, getattr(main, i)) for i in ['__author__', '__copyright__', '__credits__', '__email__', '__license__', '__maintainer__', '__status__', '__version__', '__version_info__']]))
#
# class SourceDataVersionList(CustomListAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDataVersionDetail(CustomRetrieveAPIView):
# serializer_class = SourceDataVersionSerializer
#
# def get_queryset(self):
# return SourceDataVersion.objects.filter(ready=True)
#
# class SourceDetail(CustomRetrieveAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceList(CustomListAPIView):
# serializer_class = SourceSerializer
#
# def get_queryset(self):
# return Source.allowed.for_user(self.request.user)
#
# class SourceGetAll(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_all(parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
#
# class SourceGetOne(LIBREView):
# def get(self, request, *args, **kwargs):
# initial_datetime = datetime.datetime.now()
#
# source = self.get_object()
# self.get_renderer_extra_variables(request)
# result = source.get_one(int(kwargs['id']), parameters=parse_request(request))
# logger.debug('Total view elapsed time: %s' % (datetime.datetime.now() - initial_datetime))
#
# return CustomResponse(result)
, which may contain function names, class names, or code. Output only the next line. | url(r'^sources/(?P<slug>[-\w]+)/data/(?P<id>[0-9]+)/$', SourceGetOne.as_view(), name='source-get_one'), |
Continue the code snippet: <|code_start|>
class Aggregate(object):
def __init__(self, argument):
self.argument = argument
self.field = argument
self.properties = None
if '.' in self.argument:
# Aggregate by an element property
self.field, self.properties = self.argument.split('.', 1)
def execute(self, elements):
try:
return self._execute(elements)
except KeyError:
raise LIBREFieldError('Unknown field: %s' % self.argument)
except AttributeError as exception:
raise LIBREFieldError('Field property error; %s' % exception)
except TypeError as exception:
raise LIBREFieldError('Field aggregation error; %s' % exception)
class Count(Aggregate):
def _execute(self, elements):
if self.argument == '*':
return len(list(elements))
else:
if self.properties:
<|code_end|>
. Use current file imports:
from operator import itemgetter
from .exceptions import LIBREFieldError
from .utils import return_attrib
and context (classes, functions, or code) from other files:
# Path: libre/apps/data_drivers/utils.py
# def return_attrib(obj, attrib):
# return reduce(get_value, attrib.split(u'.'), obj)
. Output only the next line. | return len(set([return_attrib(itemgetter(self.field)(element), self.properties) for element in elements])) |
Given snippet: <|code_start|>from __future__ import absolute_import
logger = logging.getLogger(__name__)
enclosed_parser = pyparsing.Forward()
nestedBrackets = pyparsing.nestedExpr('[', ']', content=enclosed_parser)
enclosed_parser << (pyparsing.Word(pyparsing.alphanums + '-' + '.' + '(' + ')') | ',' | nestedBrackets)
DATA_TYPE_FUNCTIONS = {
DATA_TYPE_STRING: lambda x: unicode(x).strip(),
DATA_TYPE_NUMBER: lambda x: convert_to_number(x),
DATA_TYPE_DATETIME: lambda x: parse(x),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from HTMLParser import HTMLParser
from operator import itemgetter
from urllib import unquote_plus
from dateutil.parser import parse
from shapely import geometry
from .literals import (
DATA_TYPE_DATE, DATA_TYPE_DATETIME, DATA_TYPE_NUMBER, DATA_TYPE_STRING,
DATA_TYPE_TIME, THOUSAND_SYMBOL, DECIMAL_SYMBOL)
from .exceptions import LIBREValueError
from .models import Source
import logging
import types
import pyparsing
and context:
# Path: libre/apps/data_drivers/literals.py
# DATA_TYPE_DATE = 4
#
# DATA_TYPE_DATETIME = 3
#
# DATA_TYPE_NUMBER = 2
#
# DATA_TYPE_STRING = 1
#
# DATA_TYPE_TIME = 5
#
# THOUSAND_SYMBOL = ','
#
# DECIMAL_SYMBOL = '.'
which might include code, classes, or functions. Output only the next line. | DATA_TYPE_DATE: lambda x: parse(x).date(), |
Continue the code snippet: <|code_start|>from __future__ import absolute_import
logger = logging.getLogger(__name__)
enclosed_parser = pyparsing.Forward()
nestedBrackets = pyparsing.nestedExpr('[', ']', content=enclosed_parser)
enclosed_parser << (pyparsing.Word(pyparsing.alphanums + '-' + '.' + '(' + ')') | ',' | nestedBrackets)
DATA_TYPE_FUNCTIONS = {
DATA_TYPE_STRING: lambda x: unicode(x).strip(),
DATA_TYPE_NUMBER: lambda x: convert_to_number(x),
<|code_end|>
. Use current file imports:
from HTMLParser import HTMLParser
from operator import itemgetter
from urllib import unquote_plus
from dateutil.parser import parse
from shapely import geometry
from .literals import (
DATA_TYPE_DATE, DATA_TYPE_DATETIME, DATA_TYPE_NUMBER, DATA_TYPE_STRING,
DATA_TYPE_TIME, THOUSAND_SYMBOL, DECIMAL_SYMBOL)
from .exceptions import LIBREValueError
from .models import Source
import logging
import types
import pyparsing
and context (classes, functions, or code) from other files:
# Path: libre/apps/data_drivers/literals.py
# DATA_TYPE_DATE = 4
#
# DATA_TYPE_DATETIME = 3
#
# DATA_TYPE_NUMBER = 2
#
# DATA_TYPE_STRING = 1
#
# DATA_TYPE_TIME = 5
#
# THOUSAND_SYMBOL = ','
#
# DECIMAL_SYMBOL = '.'
. Output only the next line. | DATA_TYPE_DATETIME: lambda x: parse(x), |
Based on the snippet: <|code_start|>from __future__ import absolute_import
logger = logging.getLogger(__name__)
enclosed_parser = pyparsing.Forward()
nestedBrackets = pyparsing.nestedExpr('[', ']', content=enclosed_parser)
enclosed_parser << (pyparsing.Word(pyparsing.alphanums + '-' + '.' + '(' + ')') | ',' | nestedBrackets)
DATA_TYPE_FUNCTIONS = {
DATA_TYPE_STRING: lambda x: unicode(x).strip(),
<|code_end|>
, predict the immediate next line with the help of imports:
from HTMLParser import HTMLParser
from operator import itemgetter
from urllib import unquote_plus
from dateutil.parser import parse
from shapely import geometry
from .literals import (
DATA_TYPE_DATE, DATA_TYPE_DATETIME, DATA_TYPE_NUMBER, DATA_TYPE_STRING,
DATA_TYPE_TIME, THOUSAND_SYMBOL, DECIMAL_SYMBOL)
from .exceptions import LIBREValueError
from .models import Source
import logging
import types
import pyparsing
and context (classes, functions, sometimes code) from other files:
# Path: libre/apps/data_drivers/literals.py
# DATA_TYPE_DATE = 4
#
# DATA_TYPE_DATETIME = 3
#
# DATA_TYPE_NUMBER = 2
#
# DATA_TYPE_STRING = 1
#
# DATA_TYPE_TIME = 5
#
# THOUSAND_SYMBOL = ','
#
# DECIMAL_SYMBOL = '.'
. Output only the next line. | DATA_TYPE_NUMBER: lambda x: convert_to_number(x), |
Next line prediction: <|code_start|>from __future__ import absolute_import
logger = logging.getLogger(__name__)
enclosed_parser = pyparsing.Forward()
nestedBrackets = pyparsing.nestedExpr('[', ']', content=enclosed_parser)
enclosed_parser << (pyparsing.Word(pyparsing.alphanums + '-' + '.' + '(' + ')') | ',' | nestedBrackets)
DATA_TYPE_FUNCTIONS = {
<|code_end|>
. Use current file imports:
(from HTMLParser import HTMLParser
from operator import itemgetter
from urllib import unquote_plus
from dateutil.parser import parse
from shapely import geometry
from .literals import (
DATA_TYPE_DATE, DATA_TYPE_DATETIME, DATA_TYPE_NUMBER, DATA_TYPE_STRING,
DATA_TYPE_TIME, THOUSAND_SYMBOL, DECIMAL_SYMBOL)
from .exceptions import LIBREValueError
from .models import Source
import logging
import types
import pyparsing)
and context including class names, function names, or small code snippets from other files:
# Path: libre/apps/data_drivers/literals.py
# DATA_TYPE_DATE = 4
#
# DATA_TYPE_DATETIME = 3
#
# DATA_TYPE_NUMBER = 2
#
# DATA_TYPE_STRING = 1
#
# DATA_TYPE_TIME = 5
#
# THOUSAND_SYMBOL = ','
#
# DECIMAL_SYMBOL = '.'
. Output only the next line. | DATA_TYPE_STRING: lambda x: unicode(x).strip(), |
Given snippet: <|code_start|>from __future__ import absolute_import
logger = logging.getLogger(__name__)
enclosed_parser = pyparsing.Forward()
nestedBrackets = pyparsing.nestedExpr('[', ']', content=enclosed_parser)
enclosed_parser << (pyparsing.Word(pyparsing.alphanums + '-' + '.' + '(' + ')') | ',' | nestedBrackets)
DATA_TYPE_FUNCTIONS = {
DATA_TYPE_STRING: lambda x: unicode(x).strip(),
DATA_TYPE_NUMBER: lambda x: convert_to_number(x),
DATA_TYPE_DATETIME: lambda x: parse(x),
DATA_TYPE_DATE: lambda x: parse(x).date(),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from HTMLParser import HTMLParser
from operator import itemgetter
from urllib import unquote_plus
from dateutil.parser import parse
from shapely import geometry
from .literals import (
DATA_TYPE_DATE, DATA_TYPE_DATETIME, DATA_TYPE_NUMBER, DATA_TYPE_STRING,
DATA_TYPE_TIME, THOUSAND_SYMBOL, DECIMAL_SYMBOL)
from .exceptions import LIBREValueError
from .models import Source
import logging
import types
import pyparsing
and context:
# Path: libre/apps/data_drivers/literals.py
# DATA_TYPE_DATE = 4
#
# DATA_TYPE_DATETIME = 3
#
# DATA_TYPE_NUMBER = 2
#
# DATA_TYPE_STRING = 1
#
# DATA_TYPE_TIME = 5
#
# THOUSAND_SYMBOL = ','
#
# DECIMAL_SYMBOL = '.'
which might include code, classes, or functions. Output only the next line. | DATA_TYPE_TIME: lambda x: parse(x).time(), |
Predict the next line for this snippet: <|code_start|>
logger = logging.getLogger(__name__)
enclosed_parser = pyparsing.Forward()
nestedBrackets = pyparsing.nestedExpr('[', ']', content=enclosed_parser)
enclosed_parser << (pyparsing.Word(pyparsing.alphanums + '-' + '.' + '(' + ')') | ',' | nestedBrackets)
DATA_TYPE_FUNCTIONS = {
DATA_TYPE_STRING: lambda x: unicode(x).strip(),
DATA_TYPE_NUMBER: lambda x: convert_to_number(x),
DATA_TYPE_DATETIME: lambda x: parse(x),
DATA_TYPE_DATE: lambda x: parse(x).date(),
DATA_TYPE_TIME: lambda x: parse(x).time(),
}
html_parser = HTMLParser()
def parse_enclosed(string):
return enclosed_parser.parseString(string).asList()
def convert_to_number(data):
if isinstance(data, (types.IntType, types.FloatType, types.LongType)):
# Is already a number
return data
else:
# Must be a string or unicode
# Get rid of dollar signs and thousand separators
<|code_end|>
with the help of current file imports:
from HTMLParser import HTMLParser
from operator import itemgetter
from urllib import unquote_plus
from dateutil.parser import parse
from shapely import geometry
from .literals import (
DATA_TYPE_DATE, DATA_TYPE_DATETIME, DATA_TYPE_NUMBER, DATA_TYPE_STRING,
DATA_TYPE_TIME, THOUSAND_SYMBOL, DECIMAL_SYMBOL)
from .exceptions import LIBREValueError
from .models import Source
import logging
import types
import pyparsing
and context from other files:
# Path: libre/apps/data_drivers/literals.py
# DATA_TYPE_DATE = 4
#
# DATA_TYPE_DATETIME = 3
#
# DATA_TYPE_NUMBER = 2
#
# DATA_TYPE_STRING = 1
#
# DATA_TYPE_TIME = 5
#
# THOUSAND_SYMBOL = ','
#
# DECIMAL_SYMBOL = '.'
, which may contain function names, class names, or code. Output only the next line. | data = data.replace(THOUSAND_SYMBOL, '').replace('$', '') |
Next line prediction: <|code_start|>nestedBrackets = pyparsing.nestedExpr('[', ']', content=enclosed_parser)
enclosed_parser << (pyparsing.Word(pyparsing.alphanums + '-' + '.' + '(' + ')') | ',' | nestedBrackets)
DATA_TYPE_FUNCTIONS = {
DATA_TYPE_STRING: lambda x: unicode(x).strip(),
DATA_TYPE_NUMBER: lambda x: convert_to_number(x),
DATA_TYPE_DATETIME: lambda x: parse(x),
DATA_TYPE_DATE: lambda x: parse(x).date(),
DATA_TYPE_TIME: lambda x: parse(x).time(),
}
html_parser = HTMLParser()
def parse_enclosed(string):
return enclosed_parser.parseString(string).asList()
def convert_to_number(data):
if isinstance(data, (types.IntType, types.FloatType, types.LongType)):
# Is already a number
return data
else:
# Must be a string or unicode
# Get rid of dollar signs and thousand separators
data = data.replace(THOUSAND_SYMBOL, '').replace('$', '')
if '(' and ')' in data:
# Is a negative number
return -convert_to_number(data.replace(')', '').replace('(', ''))
else:
<|code_end|>
. Use current file imports:
(from HTMLParser import HTMLParser
from operator import itemgetter
from urllib import unquote_plus
from dateutil.parser import parse
from shapely import geometry
from .literals import (
DATA_TYPE_DATE, DATA_TYPE_DATETIME, DATA_TYPE_NUMBER, DATA_TYPE_STRING,
DATA_TYPE_TIME, THOUSAND_SYMBOL, DECIMAL_SYMBOL)
from .exceptions import LIBREValueError
from .models import Source
import logging
import types
import pyparsing)
and context including class names, function names, or small code snippets from other files:
# Path: libre/apps/data_drivers/literals.py
# DATA_TYPE_DATE = 4
#
# DATA_TYPE_DATETIME = 3
#
# DATA_TYPE_NUMBER = 2
#
# DATA_TYPE_STRING = 1
#
# DATA_TYPE_TIME = 5
#
# THOUSAND_SYMBOL = ','
#
# DECIMAL_SYMBOL = '.'
. Output only the next line. | if DECIMAL_SYMBOL in data: |
Continue the code snippet: <|code_start|> def running(self):
if self._scheduler:
return self._scheduler.running
else:
return False
def clear(self):
for job in self.scheduled_jobs.values():
self.stop_job(job)
def stop_job(self, job):
if self.running:
self._scheduler.unschedule_job(job._job)
del(self.scheduled_jobs[job.name])
job.scheduler = None
def _schedule_job(self, job):
if isinstance(job, IntervalJob):
job._job = self._scheduler.add_interval_job(job.function, *job.args, **job.kwargs)
elif isinstance(job, DateJob):
job._job = self._scheduler.add_date_job(job.function, *job.args, **job.kwargs)
elif isinstance(job, CronJob):
job._job = self._scheduler.add_cron_job(job.function, *job.args, **job.kwargs)
else:
raise UnknownJobClass
def add_job(self, job):
logger.debug('adding job')
if job.scheduler or job.name in self.scheduled_jobs.keys():
<|code_end|>
. Use current file imports:
import logging
from apscheduler.scheduler import Scheduler as OriginalScheduler
from django.utils.translation import ugettext_lazy as _
from .exceptions import AlreadyScheduled, UnknownJob, UnknownJobClass
and context (classes, functions, or code) from other files:
# Path: libre/apps/scheduler/exceptions.py
# class AlreadyScheduled(Exception):
# """
# Raised when trying to schedule a Job instance of anything after it was
# already scheduled in any other scheduler
# """
# pass
#
# class UnknownJob(Exception):
# pass
#
# class UnknownJobClass(Exception):
# """
# Raised when trying to schedule a Job that is not of a a type:
# IntervalJob or DateJob
# """
# pass
. Output only the next line. | raise AlreadyScheduled |
Here is a snippet: <|code_start|> raise AlreadyScheduled
if self._scheduler:
self._schedule_job(job)
job.scheduler = self
self.scheduled_jobs[job.name] = job
def add_interval_job(self, name, label, function, *args, **kwargs):
job = IntervalJob(name=name, label=label, function=function, *args, **kwargs)
self.add_job(job)
return job
def add_date_job(self, name, label, function, *args, **kwargs):
job = DateJob(name=name, label=label, function=function, *args, **kwargs)
self.add_job(job)
return job
def add_cron_job(self, name, label, function, *args, **kwargs):
job = CronJob(name=name, label=label, function=function, *args, **kwargs)
self.add_job(job)
return job
def get_job_list(self):
return self.scheduled_jobs.values()
def get_job_by_name(self, name):
try:
return self.scheduled_jobs[name]
except KeyError:
<|code_end|>
. Write the next line using the current file imports:
import logging
from apscheduler.scheduler import Scheduler as OriginalScheduler
from django.utils.translation import ugettext_lazy as _
from .exceptions import AlreadyScheduled, UnknownJob, UnknownJobClass
and context from other files:
# Path: libre/apps/scheduler/exceptions.py
# class AlreadyScheduled(Exception):
# """
# Raised when trying to schedule a Job instance of anything after it was
# already scheduled in any other scheduler
# """
# pass
#
# class UnknownJob(Exception):
# pass
#
# class UnknownJobClass(Exception):
# """
# Raised when trying to schedule a Job that is not of a a type:
# IntervalJob or DateJob
# """
# pass
, which may include functions, classes, or code. Output only the next line. | raise UnknownJob |
Predict the next line for this snippet: <|code_start|> self._scheduler.shutdown()
del self._scheduler
self._scheduler = None
@property
def running(self):
if self._scheduler:
return self._scheduler.running
else:
return False
def clear(self):
for job in self.scheduled_jobs.values():
self.stop_job(job)
def stop_job(self, job):
if self.running:
self._scheduler.unschedule_job(job._job)
del(self.scheduled_jobs[job.name])
job.scheduler = None
def _schedule_job(self, job):
if isinstance(job, IntervalJob):
job._job = self._scheduler.add_interval_job(job.function, *job.args, **job.kwargs)
elif isinstance(job, DateJob):
job._job = self._scheduler.add_date_job(job.function, *job.args, **job.kwargs)
elif isinstance(job, CronJob):
job._job = self._scheduler.add_cron_job(job.function, *job.args, **job.kwargs)
else:
<|code_end|>
with the help of current file imports:
import logging
from apscheduler.scheduler import Scheduler as OriginalScheduler
from django.utils.translation import ugettext_lazy as _
from .exceptions import AlreadyScheduled, UnknownJob, UnknownJobClass
and context from other files:
# Path: libre/apps/scheduler/exceptions.py
# class AlreadyScheduled(Exception):
# """
# Raised when trying to schedule a Job instance of anything after it was
# already scheduled in any other scheduler
# """
# pass
#
# class UnknownJob(Exception):
# pass
#
# class UnknownJobClass(Exception):
# """
# Raised when trying to schedule a Job that is not of a a type:
# IntervalJob or DateJob
# """
# pass
, which may contain function names, class names, or code. Output only the next line. | raise UnknownJobClass |
Here is a snippet: <|code_start|> return 'File exists'
class URLFileTestCase(TestCase):
def setUp(self):
self.origin_url = OriginURLFile.objects.create(label='test origin', url='http://www.census.gov/population/estimates/puerto-rico/prmunnet.txt')
self.origin_url.copy_data()
def tearDown(self):
self.origin_url.discard_copy()
def test_data_iterator_creation(self):
self.assertEqual(temp_path_status(self.origin_url.data_iterator.name), 'File exists')
def test_hash(self):
self.assertEqual(self.origin_url.new_hash, '81f81877c664b9863628e253ebfdff2cc53b05cbf8020735cb37eef46901ebe8')
def test_iterator(self):
self.assertEqual(self.origin_url.data_iterator.next(), 'PR-99-1 Estimates of the Population of Puerto Rico Municipios, July 1, 1999, and\r\n')
self.assertEqual(self.origin_url.data_iterator.next(), 'Demographic Components of Population Change: April 1, 1990 to July 1, 1999\r\n')
self.origin_url.data_iterator.seek(0)
self.assertEqual(self.origin_url.data_iterator.next(), 'PR-99-1 Estimates of the Population of Puerto Rico Municipios, July 1, 1999, and\r\n')
def test_data_iterator(self):
self.assertEqual(len(self.origin_url.data_iterator.read()), 10713)
self.assertEqual(len(self.origin_url.data_iterator.read()), 0)
class OriginPathTestCase(TestCase):
def setUp(self):
<|code_end|>
. Write the next line using the current file imports:
import os
from django.conf import settings
from django.test import TestCase
from .models import OriginPath, OriginURLFile
and context from other files:
# Path: libre/apps/origins/models.py
# class OriginPath(Origin, ContainerOrigin):
# origin_type = _('disk path')
#
# path = models.TextField(blank=True, null=True, verbose_name=_('path to file'), help_text=_('Location to a file in the filesystem.'))
#
# def get_binary_iterator(self):
# """
# Generator to read a file piece by piece.
# """
# CHUNK_SIZE = 1024
# file_object = open(self.path)
#
# while True:
# data = file_object.read(CHUNK_SIZE)
# if not data:
# break
# yield data
#
# file_object.close()
#
# @property
# def identifier(self):
# return self.path
#
# class Meta:
# verbose_name = _('disk path origin')
# verbose_name_plural = _('disk path origins')
#
# class OriginURLFile(OriginURL, ContainerOrigin):
# origin_type = _('URL file')
#
# class Meta:
# verbose_name = _('URL file origin')
# verbose_name_plural = _('URL file origins')
, which may include functions, classes, or code. Output only the next line. | self.origin_url = OriginPath.objects.create(label='test origin', path=os.path.join(settings.PROJECT_ROOT, 'contrib', 'sample_data', TEST_FIXED_WIDTH_FILE)) |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import
TEST_FIXED_WIDTH_FILE = 'prmunnet.txt'
def temp_path_status(path):
if not os.path.exists(path):
return 'File does not exist'
if not os.path.isfile(path):
return 'Not a file'
if not os.access(path, os.R_OK):
return 'File is not readable'
return 'File exists'
class URLFileTestCase(TestCase):
def setUp(self):
<|code_end|>
with the help of current file imports:
import os
from django.conf import settings
from django.test import TestCase
from .models import OriginPath, OriginURLFile
and context from other files:
# Path: libre/apps/origins/models.py
# class OriginPath(Origin, ContainerOrigin):
# origin_type = _('disk path')
#
# path = models.TextField(blank=True, null=True, verbose_name=_('path to file'), help_text=_('Location to a file in the filesystem.'))
#
# def get_binary_iterator(self):
# """
# Generator to read a file piece by piece.
# """
# CHUNK_SIZE = 1024
# file_object = open(self.path)
#
# while True:
# data = file_object.read(CHUNK_SIZE)
# if not data:
# break
# yield data
#
# file_object.close()
#
# @property
# def identifier(self):
# return self.path
#
# class Meta:
# verbose_name = _('disk path origin')
# verbose_name_plural = _('disk path origins')
#
# class OriginURLFile(OriginURL, ContainerOrigin):
# origin_type = _('URL file')
#
# class Meta:
# verbose_name = _('URL file origin')
# verbose_name_plural = _('URL file origins')
, which may contain function names, class names, or code. Output only the next line. | self.origin_url = OriginURLFile.objects.create(label='test origin', url='http://www.census.gov/population/estimates/puerto-rico/prmunnet.txt') |
Continue the code snippet: <|code_start|>from __future__ import absolute_import
class IconAdmin(admin.ModelAdmin):
list_display = ('name', 'label', 'icon_file')
<|code_end|>
. Use current file imports:
from django.contrib import admin
from .models import Icon
and context (classes, functions, or code) from other files:
# Path: libre/apps/icons/models.py
# class Icon(models.Model):
# _cache = {}
#
# name = models.CharField(max_length=48, verbose_name=_(u'name'), unique=True)
# label = models.CharField(max_length=48, verbose_name=_(u'label'), blank=True)
# icon_file = models.FileField(upload_to='icons', verbose_name='file')
#
# def __unicode__(self):
# return self.label or self.name
#
# def compose(self, as_base64=False):
# try:
# self.__class__._cache.setdefault(self.pk, {})
# return self.__class__._cache[self.pk][as_base64]
# except KeyError:
# image = PIL.Image.open(self.icon_file.file)
# output = StringIO()
# image.save(output, 'PNG')
# contents = output.getvalue()
# output.close()
# if as_base64:
# contents = 'data:image/png;base64,%s' % base64.b64encode(contents)
# self.__class__._cache.setdefault(self.pk, {})
# self.__class__._cache[self.pk][as_base64] = contents
# return contents
#
# def compose_base64(self):
# return self.compose(as_base64=True)
#
# def get_absolute_url(self):
# return reverse('display', args=[self.name])
#
# class Meta:
# verbose_name = _(u'icon')
# verbose_name_plural = _(u'icons')
# ordering = ['label', 'name']
. Output only the next line. | admin.site.register(Icon, IconAdmin) |
Here is a snippet: <|code_start|>from __future__ import absolute_import
logger = logging.getLogger(__name__)
class Lock(models.Model):
creation_datetime = models.DateTimeField(verbose_name=_(u'creation datetime'))
<|code_end|>
. Write the next line using the current file imports:
import logging
from django.db import (models, transaction, DatabaseError)
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from .managers import LockManager
from .literals import DEFAULT_LOCK_TIMEOUT_VALUE
and context from other files:
# Path: libre/apps/lock_manager/literals.py
# DEFAULT_LOCK_TIMEOUT_VALUE = 30
, which may include functions, classes, or code. Output only the next line. | timeout = models.IntegerField(default=DEFAULT_LOCK_TIMEOUT_VALUE, verbose_name=_(u'timeout')) |
Based on the snippet: <|code_start|> logger.debug('evaluated parameters: %s' % parameters)
response = requests.get(self.url, params=parameters)
logger.debug('response: %s' % response)
return (item for item in response.json())
class Meta:
verbose_name = _('REST API origin')
verbose_name_plural = _('REST API origins')
class OriginSOAPWebService(OriginURL):
origin_type = _('SOAP webservice')
endpoint = models.CharField(max_length=64, verbose_name=_('endpoint'), help_text=_('Endpoint, function or method to call.'))
parameters = models.TextField(blank=True, verbose_name=_('parameters'))
def get_binary_iterator(self):
# We are a subclass of OriginURL which has a get_binary_iterator method, invalidate it
raise AttributeError
def get_non_binary_iterator(self):
client = Client(self.url)
if self.parameters:
parameters = literal_eval(self.parameters)
else:
parameters = {}
<|code_end|>
, predict the immediate next line with the help of imports:
from ast import literal_eval
from itertools import izip
from django.conf import settings
from django.db import models
from django.db import load_backend as django_load_backend
from django.utils.translation import ugettext_lazy as _
from model_utils.managers import InheritanceManager
from picklefield.fields import dbsafe_encode, dbsafe_decode
from suds.client import Client
from .literals import BACKEND_CHOICES, BACKEND_CLASSES
from .utils import recursive_asdict
import hashlib
import logging
import tempfile
import requests
and context (classes, functions, sometimes code) from other files:
# Path: libre/apps/origins/utils.py
# def recursive_asdict(d):
# """Convert Suds object into serializable format."""
# out = {}
# for k, v in asdict(d).iteritems():
# if hasattr(v, '__keylist__'):
# out[k] = recursive_asdict(v)
# elif isinstance(v, list):
# out[k] = []
# for item in v:
# if hasattr(item, '__keylist__'):
# out[k].append(recursive_asdict(item))
# else:
# out[k].append(item)
# else:
# out[k] = v
# return out
. Output only the next line. | return (recursive_asdict(item) for item in getattr(client.service, self.endpoint)(**parameters)) |
Based on the snippet: <|code_start|>"""Tests for creating and manipulating agents."""
class TestAgents(object):
"""The agent test class."""
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_agent_generic(self):
net = models.Network()
self.db.add(net)
<|code_end|>
, predict the immediate next line with the help of imports:
from wallace import nodes, information, db, models
from wallace.information import Meme, Gene
from nose.tools import raises
and context (classes, functions, sometimes code) from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
#
# Path: wallace/information.py
# class Meme(Info):
# """A meme."""
#
# __mapper_args__ = {
# "polymorphic_identity": "meme"
# }
#
# class Gene(Info):
# """A gene."""
#
# __mapper_args__ = {
# "polymorphic_identity": "gene"
# }
. Output only the next line. | agent = nodes.Agent(network=net) |
Given the code snippet: <|code_start|> net = models.Network()
self.db.add(net)
agent1 = nodes.Agent(network=net)
agent2 = nodes.Agent(network=net)
agent3 = nodes.Agent(network=net)
agent1.connect(direction="to", whom=agent2)
agent1.connect(direction="to", whom=agent3)
agent1.transmit(to_whom=models.Node)
def test_fail_agent(self):
net = models.Network()
self.db.add(net)
agent = nodes.Agent(network=net)
self.db.commit()
assert agent.failed is False
assert agent.time_of_death is None
agent.fail()
assert agent.failed is True
assert agent.time_of_death is not None
def test_create_replicator_agent(self):
net = models.Network()
self.db.add(net)
agent = nodes.ReplicatorAgent(network=net)
assert len(agent.infos()) is 0
<|code_end|>
, generate the next line using the imports in this file:
from wallace import nodes, information, db, models
from wallace.information import Meme, Gene
from nose.tools import raises
and context (functions, classes, or occasionally code) from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
#
# Path: wallace/information.py
# class Meme(Info):
# """A meme."""
#
# __mapper_args__ = {
# "polymorphic_identity": "meme"
# }
#
# class Gene(Info):
# """A gene."""
#
# __mapper_args__ = {
# "polymorphic_identity": "gene"
# }
. Output only the next line. | info = information.Info(origin=agent, contents="foo") |
Next line prediction: <|code_start|>"""Tests for creating and manipulating agents."""
class TestAgents(object):
"""The agent test class."""
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_agent_generic(self):
<|code_end|>
. Use current file imports:
(from wallace import nodes, information, db, models
from wallace.information import Meme, Gene
from nose.tools import raises)
and context including class names, function names, or small code snippets from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
#
# Path: wallace/information.py
# class Meme(Info):
# """A meme."""
#
# __mapper_args__ = {
# "polymorphic_identity": "meme"
# }
#
# class Gene(Info):
# """A gene."""
#
# __mapper_args__ = {
# "polymorphic_identity": "gene"
# }
. Output only the next line. | net = models.Network() |
Here is a snippet: <|code_start|>
agent1 = nodes.ReplicatorAgent(network=net)
agent2 = nodes.ReplicatorAgent(network=net)
agent3 = nodes.ReplicatorAgent(network=net)
agent1.connect(direction="to", whom=agent2)
agent1.connect(direction="to", whom=agent3)
info = models.Info(origin=agent1, contents="foo")
agent1.transmit(what=models.Info, to_whom=nodes.Agent)
agent2.receive()
agent3.receive()
assert agent1.infos()[0].contents == agent2.infos()[0].contents
assert agent1.infos()[0].contents == agent3.infos()[0].contents
assert agent1.infos()[0].id != agent2.infos()[0].id != agent3.infos()[0].id
transmissions = info.transmissions()
assert len(transmissions) == 2
def test_transmit_selector_default(self):
net = models.Network()
self.db.add(net)
# Create a network of two biological nodes.
agent1 = nodes.ReplicatorAgent(network=net)
agent2 = nodes.ReplicatorAgent(network=net)
agent1.connect(direction="to", whom=agent2)
<|code_end|>
. Write the next line using the current file imports:
from wallace import nodes, information, db, models
from wallace.information import Meme, Gene
from nose.tools import raises
and context from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
#
# Path: wallace/information.py
# class Meme(Info):
# """A meme."""
#
# __mapper_args__ = {
# "polymorphic_identity": "meme"
# }
#
# class Gene(Info):
# """A gene."""
#
# __mapper_args__ = {
# "polymorphic_identity": "gene"
# }
, which may include functions, classes, or code. Output only the next line. | information.Meme(origin=agent1, contents="foo") |
Continue the code snippet: <|code_start|> agent1 = nodes.ReplicatorAgent(network=net)
agent2 = nodes.ReplicatorAgent(network=net)
agent3 = nodes.ReplicatorAgent(network=net)
agent1.connect(direction="to", whom=agent2)
agent1.connect(direction="to", whom=agent3)
info = models.Info(origin=agent1, contents="foo")
agent1.transmit(what=models.Info, to_whom=nodes.Agent)
agent2.receive()
agent3.receive()
assert agent1.infos()[0].contents == agent2.infos()[0].contents
assert agent1.infos()[0].contents == agent3.infos()[0].contents
assert agent1.infos()[0].id != agent2.infos()[0].id != agent3.infos()[0].id
transmissions = info.transmissions()
assert len(transmissions) == 2
def test_transmit_selector_default(self):
net = models.Network()
self.db.add(net)
# Create a network of two biological nodes.
agent1 = nodes.ReplicatorAgent(network=net)
agent2 = nodes.ReplicatorAgent(network=net)
agent1.connect(direction="to", whom=agent2)
information.Meme(origin=agent1, contents="foo")
<|code_end|>
. Use current file imports:
from wallace import nodes, information, db, models
from wallace.information import Meme, Gene
from nose.tools import raises
and context (classes, functions, or code) from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
#
# Path: wallace/information.py
# class Meme(Info):
# """A meme."""
#
# __mapper_args__ = {
# "polymorphic_identity": "meme"
# }
#
# class Gene(Info):
# """A gene."""
#
# __mapper_args__ = {
# "polymorphic_identity": "gene"
# }
. Output only the next line. | information.Gene(origin=agent1, contents="bar") |
Predict the next line for this snippet: <|code_start|>
class TestTransformations(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_identity_transformation(self):
<|code_end|>
with the help of current file imports:
from wallace import db, models
and context from other files:
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
, which may contain function names, class names, or code. Output only the next line. | net = models.Network() |
Next line prediction: <|code_start|>"""Import custom routes into the experiment server."""
# Load the configuration options.
config = PsiturkConfig()
config.load_config()
myauth = PsiTurkAuthorization(config)
# Set logging options.
LOG_LEVELS = [
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL
]
LOG_LEVEL = LOG_LEVELS[config.getint('Server Parameters', 'loglevel')]
<|code_end|>
. Use current file imports:
(from flask import (
Blueprint,
request,
Response,
send_from_directory,
render_template
)
from psiturk.psiturk_config import PsiturkConfig
from psiturk.user_utils import PsiTurkAuthorization
from psiturk.db import init_db
from psiturk.db import db_session as session_psiturk
from wallace import db, models
from operator import attrgetter
from json import dumps
from datetime import datetime
from rq import Queue, get_current_job
from worker import conn
from sqlalchemy.orm.exc import NoResultFound
from psiturk.models import Participant as PsiturkParticipant
import imp
import inspect
import logging
import os
import requests
import traceback)
and context including class names, function names, or small code snippets from other files:
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
. Output only the next line. | db.logger.setLevel(LOG_LEVEL) |
Here is a snippet: <|code_start|> .format(request.url, request.method, parameter_type, parameter)
return error_response(error_type=msg)
def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit()
@custom_code.route("/participant/<worker_id>/<hit_id>/<assignment_id>/<mode>",
methods=["POST"])
def create_participant(worker_id, hit_id, assignment_id, mode):
"""Create a participant.
This route will be hit very early on as any nodes the participant creates
will be defined in reference to the participant object.
You must specify the worker_id, hit_id, assignment_id and mode in the url.
"""
# check this worker hasn't already taken part
<|code_end|>
. Write the next line using the current file imports:
from flask import (
Blueprint,
request,
Response,
send_from_directory,
render_template
)
from psiturk.psiturk_config import PsiturkConfig
from psiturk.user_utils import PsiTurkAuthorization
from psiturk.db import init_db
from psiturk.db import db_session as session_psiturk
from wallace import db, models
from operator import attrgetter
from json import dumps
from datetime import datetime
from rq import Queue, get_current_job
from worker import conn
from sqlalchemy.orm.exc import NoResultFound
from psiturk.models import Participant as PsiturkParticipant
import imp
import inspect
import logging
import os
import requests
import traceback
and context from other files:
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
, which may include functions, classes, or code. Output only the next line. | parts = models.Participant.query.filter_by(worker_id=worker_id).all() |
Given the code snippet: <|code_start|>
#: a generic column that can be used to store experiment-specific details in
#: String form.
property1 = Column(String(256), nullable=True, default=None)
#: a generic column that can be used to store experiment-specific details in
#: String form.
property2 = Column(String(256), nullable=True, default=None)
#: a generic column that can be used to store experiment-specific details in
#: String form.
property3 = Column(String(256), nullable=True, default=None)
#: a generic column that can be used to store experiment-specific details in
#: String form.
property4 = Column(String(256), nullable=True, default=None)
#: a generic column that can be used to store experiment-specific details in
#: String form.
property5 = Column(String(256), nullable=True, default=None)
#: boolean indicating whether the Network has failed which
#: prompts Wallace to ignore it unless specified otherwise. Objects are
#: usually failed to indicate something has gone wrong.
failed = Column(Boolean, nullable=False, default=False, index=True)
#: the time at which failing occurred
time_of_death = Column(DateTime, default=None)
<|code_end|>
, generate the next line using the imports in this file:
from datetime import datetime
from .db import Base
from sqlalchemy import ForeignKey, or_, and_
from sqlalchemy import (Column, String, Text, Enum, Integer, Boolean, DateTime,
Float)
from sqlalchemy.orm import relationship, validates
from operator import attrgetter
from transformations import Replication
from transformations import Mutation
from wallace.nodes import Source
import inspect
and context (functions, classes, or occasionally code) from other files:
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
. Output only the next line. | class Participant(Base, SharedMixin): |
Given snippet: <|code_start|>
class TestEnvironments(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_environment(self):
"""Create an environment"""
net = models.Network()
self.db.add(net)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from wallace import nodes, db, information, models
and context:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
which might include code, classes, or functions. Output only the next line. | environment = nodes.Environment(network=net) |
Continue the code snippet: <|code_start|>
class TestEnvironments(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_environment(self):
"""Create an environment"""
net = models.Network()
self.db.add(net)
environment = nodes.Environment(network=net)
<|code_end|>
. Use current file imports:
from wallace import nodes, db, information, models
and context (classes, functions, or code) from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
. Output only the next line. | information.State(origin=environment, contents="foo") |
Given the following code snippet before the placeholder: <|code_start|>
class TestEnvironments(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_environment(self):
"""Create an environment"""
<|code_end|>
, predict the next line using imports from the current file:
from wallace import nodes, db, information, models
and context including class names, function names, and sometimes code from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
. Output only the next line. | net = models.Network() |
Given snippet: <|code_start|>
class TestInformation(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_genome(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from wallace import models, information, db
and context:
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
which might include code, classes, or functions. Output only the next line. | net = models.Network() |
Given the code snippet: <|code_start|>
class TestInformation(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_genome(self):
net = models.Network()
self.db.add(net)
node = models.Node(network=net)
<|code_end|>
, generate the next line using the imports in this file:
from wallace import models, information, db
and context (functions, classes, or occasionally code) from other files:
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
#
# Path: wallace/information.py
# class Gene(Info):
# class Meme(Info):
# class State(Info):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
. Output only the next line. | info = information.Gene(origin=node) |
Predict the next line for this snippet: <|code_start|>
class TestNetworks(object):
def setup(self):
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def test_create_network(self):
net = models.Network()
assert isinstance(net, models.Network)
def test_node_failure(self):
<|code_end|>
with the help of current file imports:
from wallace import networks, nodes, db, models
from nose.tools import assert_raises, raises
import random
and context from other files:
# Path: wallace/networks.py
# class Chain(Network):
# class FullyConnected(Network):
# class Empty(Network):
# class Star(Network):
# class Burst(Network):
# class DiscreteGenerational(Network):
# class ScaleFree(Network):
# class SequentialMicrosociety(Network):
# def add_node(self, node):
# def add_node(self, node):
# def add_node(self, node):
# def add_source(self, source):
# def add_node(self, node):
# def add_node(self, node):
# def __init__(self, generations, generation_size, initial_source):
# def generations(self):
# def generation_size(self):
# def initial_source(self):
# def add_node(self, node):
# def __init__(self, m0, m):
# def m0(self):
# def m(self):
# def add_node(self, node):
# def __init__(self, n):
# def n(self):
# def add_node(self, node):
#
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
, which may contain function names, class names, or code. Output only the next line. | net = networks.Network() |
Here is a snippet: <|code_start|>
class TestNetworks(object):
def setup(self):
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def test_create_network(self):
net = models.Network()
assert isinstance(net, models.Network)
def test_node_failure(self):
net = networks.Network()
self.db.add(net)
self.db.commit()
for _ in range(5):
<|code_end|>
. Write the next line using the current file imports:
from wallace import networks, nodes, db, models
from nose.tools import assert_raises, raises
import random
and context from other files:
# Path: wallace/networks.py
# class Chain(Network):
# class FullyConnected(Network):
# class Empty(Network):
# class Star(Network):
# class Burst(Network):
# class DiscreteGenerational(Network):
# class ScaleFree(Network):
# class SequentialMicrosociety(Network):
# def add_node(self, node):
# def add_node(self, node):
# def add_node(self, node):
# def add_source(self, source):
# def add_node(self, node):
# def add_node(self, node):
# def __init__(self, generations, generation_size, initial_source):
# def generations(self):
# def generation_size(self):
# def initial_source(self):
# def add_node(self, node):
# def __init__(self, m0, m):
# def m0(self):
# def m(self):
# def add_node(self, node):
# def __init__(self, n):
# def n(self):
# def add_node(self, node):
#
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
, which may include functions, classes, or code. Output only the next line. | nodes.Agent(network=net) |
Given snippet: <|code_start|>
class TestNetworks(object):
def setup(self):
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def test_create_network(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from wallace import networks, nodes, db, models
from nose.tools import assert_raises, raises
import random
and context:
# Path: wallace/networks.py
# class Chain(Network):
# class FullyConnected(Network):
# class Empty(Network):
# class Star(Network):
# class Burst(Network):
# class DiscreteGenerational(Network):
# class ScaleFree(Network):
# class SequentialMicrosociety(Network):
# def add_node(self, node):
# def add_node(self, node):
# def add_node(self, node):
# def add_source(self, source):
# def add_node(self, node):
# def add_node(self, node):
# def __init__(self, generations, generation_size, initial_source):
# def generations(self):
# def generation_size(self):
# def initial_source(self):
# def add_node(self, node):
# def __init__(self, m0, m):
# def m0(self):
# def m(self):
# def add_node(self, node):
# def __init__(self, n):
# def n(self):
# def add_node(self, node):
#
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
which might include code, classes, or functions. Output only the next line. | net = models.Network() |
Given the code snippet: <|code_start|>
class TestSources(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_random_binary_string_source(self):
net = models.Network()
self.add(net)
<|code_end|>
, generate the next line using the imports in this file:
from wallace import nodes, db, models
and context (functions, classes, or occasionally code) from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
. Output only the next line. | source = nodes.RandomBinaryStringSource(network=net) |
Given the code snippet: <|code_start|>
class TestSources(object):
def setup(self):
"""Set up the environment by resetting the tables."""
self.db = db.init_db(drop_all=True)
def teardown(self):
self.db.rollback()
self.db.close()
def add(self, *args):
self.db.add_all(args)
self.db.commit()
def test_create_random_binary_string_source(self):
<|code_end|>
, generate the next line using the imports in this file:
from wallace import nodes, db, models
and context (functions, classes, or occasionally code) from other files:
# Path: wallace/nodes.py
# class Agent(Node):
# class ReplicatorAgent(Agent):
# class Source(Node):
# class RandomBinaryStringSource(Source):
# class Environment(Node):
# def fitness(self):
# def fitness(self, fitness):
# def fitness(self):
# def update(self, infos):
# def _what(self):
# def create_information(self):
# def _info_type(self):
# def _contents(self):
# def receive(self, what):
# def _contents(self):
# def state(self, time=None):
# def _what(self):
#
# Path: wallace/db.py
# def sessions_scope(local_session, commit=False):
# def scoped_session_decorator(func):
# def wrapper(*args, **kwargs):
# def init_db(drop_all=False):
#
# Path: wallace/models.py
# DATETIME_FMT = "%Y-%m-%dT%H:%M:%S.%f"
# def timenow():
# def __init__(self, worker_id, assignment_id, hit_id, mode):
# def __json__(self):
# def nodes(self, type=None, failed=False):
# def questions(self, type=None):
# def infos(self, type=None, failed=False):
# def fail(self):
# def __init__(self, participant, question, response, number):
# def fail(self):
# def __json__(self):
# def __repr__(self):
# def __json__(self):
# def nodes(self, type=None, failed=False, participant_id=None):
# def size(self, type=None, failed=False):
# def infos(self, type=None, failed=False):
# def transmissions(self, status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def latest_transmission_recipient(self):
# def vectors(self, failed=False):
# def add_node(self, node):
# def fail(self):
# def calculate_full(self):
# def print_verbose(self):
# def __init__(self, network, participant=None):
# def __repr__(self):
# def __json__(self):
# def vectors(self, direction="all", failed=False):
# def neighbors(self, type=None, direction="to", failed=None):
# def is_connected(self, whom, direction="to", failed=None):
# def infos(self, type=None, failed=False):
# def received_infos(self, type=None, failed=None):
# def transmissions(self, direction="outgoing", status="all", failed=False):
# def transformations(self, type=None, failed=False):
# def fail(self):
# def connect(self, whom, direction="to"):
# def flatten(self, l):
# def transmit(self, what=None, to_whom=None):
# def _what(self):
# def _to_whom(self):
# def receive(self, what=None):
# def update(self, infos):
# def replicate(self, info_in):
# def mutate(self, info_in):
# def __init__(self, origin, destination):
# def __repr__(self):
# def __json__(self):
# def transmissions(self, status="all"):
# def fail(self):
# def __init__(self, origin, contents=None):
# def _write_once(self, key, value):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def transmissions(self, status="all"):
# def transformations(self, relationship="all"):
# def _mutated_contents(self):
# def __init__(self, vector, info):
# def mark_received(self):
# def __repr__(self):
# def __json__(self):
# def fail(self):
# def __repr__(self):
# def __init__(self, info_in, info_out):
# def __json__(self):
# def fail(self):
# class SharedMixin(object):
# class Participant(Base, SharedMixin):
# class Question(Base, SharedMixin):
# class Network(Base, SharedMixin):
# class Node(Base, SharedMixin):
# class Vector(Base, SharedMixin):
# class Info(Base, SharedMixin):
# class Transmission(Base, SharedMixin):
# class Transformation(Base, SharedMixin):
# class Notification(Base, SharedMixin):
. Output only the next line. | net = models.Network() |
Given snippet: <|code_start|> def transform(self, obj, value):
default_vals = {tr.Undefined, None}
if any((value is x for x in default_vals)):
transformed = []
else:
transformed = [self.flag, value]
return transformed
def validate(self, obj, value):
value = super(Option, self).validate(obj, value)
return value
class IdentityList(TransformedTrait, tr.List):
def transform(self, obj, value):
return value
class TraitMenu(_BaseTraits):
_base_command = ('',)
_traits_ignore = ('_menu',)
base_command = IdentityList(
trait=tr.CUnicode(),
)
@tr.default('base_command')
def _default_username(self):
return self._base_command
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging as _logging
import traitlets as tr
from .menu import Menu, _BaseTraits
from collections import namedtuple as _namedtuple
from itertools import chain as _chain
from copy import copy
and context:
# Path: src/dynmen/menu.py
# class Menu(_BaseTraits):
# process_mode = tr.CaselessStrEnum(
# ('blocking', 'async', 'futures'),
# default_value='blocking',
# )
# command = tr.List(trait=CUnicodeNull())
# entry_sep = tr.CUnicode('\n')
#
# def __init__(self, command=(), entry_sep='\n', process_mode='blocking'):
# """Create a python wrapper for command
#
# Menu.__call__ sends entries to the stdin of the process given by command
# the behavior of __call__ changes depending on process_mode
#
# process_mode is either
# - blocking -> subprocess and blocks until process finishes
# - futures -> run the process in a thread pool and immediately return a future
# - async -> do not start process, but return a coroutine that can be scheduled
# """
# self.command = command
# self.entry_sep = entry_sep
# self.process_mode = process_mode
#
# def __call__(self, entries=(), entry_sep=None, **kw):
# """Send entries to menu, return selected entry
#
# entries is an iterable where each element corresponds
# to an entry in the menu.
# """
# if entry_sep is None:
# entry_sep = self.entry_sep
# cmd, launch = self.command, self._get_launch_fn(self.process_mode)
# _logr.debug('Building cmd: %r using the %r launcher',
# cmd, self.process_mode)
# if isinstance(entries, _GeneratorType):
# entries = list(entries)
#
# fn_input = _partial(self._convert_entries, entries, entry_sep)
# fn_transform = _partial(
# self._transform_output,
# entries=entries,
# entry_sep=entry_sep,
# )
# return launch(cmd, fn_input, fn_transform, **kw)
#
# @staticmethod
# def _get_launch_fn(process_mode):
# mod = _import_module('..cmd.' + process_mode, __name__)
# return mod.launch
#
# @staticmethod
# def _transform_output(result, entries, entry_sep):
# stdout, stderr, returncode = result
# if returncode != 0:
# msg = 'Nonzero exit status: {!r}'.format(result)
# raise MenuError(msg)
# selected = stdout.decode('utf8')
# if selected.endswith('\n'):
# selected = selected[:-1]
# try:
# value = entries[selected]
# except (TypeError, KeyError):
# value = None
# return MenuResult(selected, value)
#
# @staticmethod
# def _convert_entries(elements, entry_sep):
# "Convert elements to a bytes string"
# if isinstance(elements, bytes):
# return elements
# try:
# return elements.encode('utf8')
# except AttributeError:
# pass
#
# bentry_sep = entry_sep.encode('utf8')
# try:
# elements = [x.encode('utf8') for x in elements]
# except AttributeError:
# pass
# return bentry_sep.join(elements)
#
# class _BaseTraits(tr.HasTraits):
# def setup_instance(self, *args, **kwargs):
# self._trait_transformed = {}
# super(_BaseTraits, self).setup_instance(*args, **kwargs)
#
# _traits_ignore = ()
#
# def _restricted_traits(self):
# traits = self.traits()
# for trait in self._traits_ignore:
# traits.pop(trait, None)
# return traits
#
# def __hash__(self):
# d_traits = self._restricted_traits()
# info = [(x, getattr(self, x)) for x in sorted(d_traits)]
# info_tuple = (self.__class__, repr(info))
# return hash(info_tuple)
#
# def __eq__(self, other):
# if isinstance(other, self.__class__):
# if hash(self) == hash(other):
# return True
# return False
#
# def __repr__(self):
# clsname = self.__class__.__name__
# traits = []
# for name, descriptor in self._restricted_traits().items():
# try:
# if not self._trait_transformed[name]:
# continue
# except KeyError:
# pass
# record = descriptor.get(self)
# txt = '{}={!r}'.format(name, record)
# traits.append(txt)
# toret = [clsname, '(', ', '.join(traits), ')']
# return ''.join(toret)
which might include code, classes, or functions. Output only the next line. | _menu = tr.Instance(klass=Menu) |
Here is a snippet: <|code_start|>
def validate(self, obj, value):
val = super(Flag, self).validate(obj, value)
return value
class Option(TransformedTrait):
def __init__(self, flag, default_value=None, **kwargs):
super(Option, self).__init__(default_value=default_value, **kwargs)
self.flag = flag
def transform(self, obj, value):
default_vals = {tr.Undefined, None}
if any((value is x for x in default_vals)):
transformed = []
else:
transformed = [self.flag, value]
return transformed
def validate(self, obj, value):
value = super(Option, self).validate(obj, value)
return value
class IdentityList(TransformedTrait, tr.List):
def transform(self, obj, value):
return value
<|code_end|>
. Write the next line using the current file imports:
import logging as _logging
import traitlets as tr
from .menu import Menu, _BaseTraits
from collections import namedtuple as _namedtuple
from itertools import chain as _chain
from copy import copy
and context from other files:
# Path: src/dynmen/menu.py
# class Menu(_BaseTraits):
# process_mode = tr.CaselessStrEnum(
# ('blocking', 'async', 'futures'),
# default_value='blocking',
# )
# command = tr.List(trait=CUnicodeNull())
# entry_sep = tr.CUnicode('\n')
#
# def __init__(self, command=(), entry_sep='\n', process_mode='blocking'):
# """Create a python wrapper for command
#
# Menu.__call__ sends entries to the stdin of the process given by command
# the behavior of __call__ changes depending on process_mode
#
# process_mode is either
# - blocking -> subprocess and blocks until process finishes
# - futures -> run the process in a thread pool and immediately return a future
# - async -> do not start process, but return a coroutine that can be scheduled
# """
# self.command = command
# self.entry_sep = entry_sep
# self.process_mode = process_mode
#
# def __call__(self, entries=(), entry_sep=None, **kw):
# """Send entries to menu, return selected entry
#
# entries is an iterable where each element corresponds
# to an entry in the menu.
# """
# if entry_sep is None:
# entry_sep = self.entry_sep
# cmd, launch = self.command, self._get_launch_fn(self.process_mode)
# _logr.debug('Building cmd: %r using the %r launcher',
# cmd, self.process_mode)
# if isinstance(entries, _GeneratorType):
# entries = list(entries)
#
# fn_input = _partial(self._convert_entries, entries, entry_sep)
# fn_transform = _partial(
# self._transform_output,
# entries=entries,
# entry_sep=entry_sep,
# )
# return launch(cmd, fn_input, fn_transform, **kw)
#
# @staticmethod
# def _get_launch_fn(process_mode):
# mod = _import_module('..cmd.' + process_mode, __name__)
# return mod.launch
#
# @staticmethod
# def _transform_output(result, entries, entry_sep):
# stdout, stderr, returncode = result
# if returncode != 0:
# msg = 'Nonzero exit status: {!r}'.format(result)
# raise MenuError(msg)
# selected = stdout.decode('utf8')
# if selected.endswith('\n'):
# selected = selected[:-1]
# try:
# value = entries[selected]
# except (TypeError, KeyError):
# value = None
# return MenuResult(selected, value)
#
# @staticmethod
# def _convert_entries(elements, entry_sep):
# "Convert elements to a bytes string"
# if isinstance(elements, bytes):
# return elements
# try:
# return elements.encode('utf8')
# except AttributeError:
# pass
#
# bentry_sep = entry_sep.encode('utf8')
# try:
# elements = [x.encode('utf8') for x in elements]
# except AttributeError:
# pass
# return bentry_sep.join(elements)
#
# class _BaseTraits(tr.HasTraits):
# def setup_instance(self, *args, **kwargs):
# self._trait_transformed = {}
# super(_BaseTraits, self).setup_instance(*args, **kwargs)
#
# _traits_ignore = ()
#
# def _restricted_traits(self):
# traits = self.traits()
# for trait in self._traits_ignore:
# traits.pop(trait, None)
# return traits
#
# def __hash__(self):
# d_traits = self._restricted_traits()
# info = [(x, getattr(self, x)) for x in sorted(d_traits)]
# info_tuple = (self.__class__, repr(info))
# return hash(info_tuple)
#
# def __eq__(self, other):
# if isinstance(other, self.__class__):
# if hash(self) == hash(other):
# return True
# return False
#
# def __repr__(self):
# clsname = self.__class__.__name__
# traits = []
# for name, descriptor in self._restricted_traits().items():
# try:
# if not self._trait_transformed[name]:
# continue
# except KeyError:
# pass
# record = descriptor.get(self)
# txt = '{}={!r}'.format(name, record)
# traits.append(txt)
# toret = [clsname, '(', ', '.join(traits), ')']
# return ''.join(toret)
, which may include functions, classes, or code. Output only the next line. | class TraitMenu(_BaseTraits): |
Given the code snippet: <|code_start|>
def testopen():
"""Tests that the RDF parser is capable of loading an RDF file
successfully."""
r = RDFParser(None, open('tests/resources/rdf/pass.rdf'))
assert r.rdf
def test_load_bad():
"""Tests that the RDF parser throws an error for invalid, damaged,
or corrupt RDF files."""
<|code_end|>
, generate the next line using the imports in this file:
from StringIO import StringIO
from validator.rdf import RDFParser, RDFException
import pytest
and context (functions, classes, or occasionally code) from other files:
# Path: validator/rdf.py
# class RDFParser(object):
# """Parser wrapper for RDF files."""
#
# def __init__(self, err, data, namespace=None):
# self.err = err
# self.manifest = u'urn:mozilla:install-manifest'
# self.namespace = namespace or 'http://www.mozilla.org/2004/em-rdf'
#
# if (hasattr(data, 'read') and hasattr(data, 'readline') or
# isinstance(data, StringIO)
# ):
# # It could be a file-like object, let's read it so that we can
# # wrap it in StringIO so that we can re-open at any time
# data.seek(0)
# data = data.read()
#
# try:
# # Use an empty ContentHandler, we just want to make sure it parses.
# parse(StringIO(data), ContentHandler())
# except SAXParseException as ex:
# # Raise the SAX parse exceptions so we get some line info.
# raise RDFException(orig_exception=ex)
#
# from rdflib.plugins.parsers import rdfxml
# orig_create_parser = rdfxml.create_parser
#
# try:
# # Patch rdflib to not resolve URL entities.
# def create_parser(*args, **kwargs):
# parser = orig_create_parser(*args, **kwargs)
# parser.setEntityResolver(AddonRDFEntityResolver(err))
# return parser
# rdfxml.create_parser = create_parser
#
# # Load up and parse the file in XML format.
# graph = Graph()
# graph.parse(StringIO(data), format='xml')
# self.rdf = graph
#
# except ParserError as ex:
# # Re-raise the exception in a local exception type.
# raise RDFException(message=ex.message)
# except SAXParseException as ex:
# # Raise the SAX parse exceptions so we get some line info.
# raise RDFException(orig_exception=ex)
# finally:
# # If we fail, we don't want to sully up the creation function.
# rdfxml.create_parser = orig_create_parser
#
# def uri(self, element, namespace=None):
# 'Returns a URIRef object for use with the RDF document.'
#
# if namespace is None:
# namespace = self.namespace
#
# return URIRef('%s#%s' % (namespace, element))
#
# def get_root_subject(self):
# 'Returns the BNode which describes the topmost subject of the graph.'
#
# manifest = URIRef(self.manifest)
#
# if list(self.rdf.triples((manifest, None, None))):
# return manifest
# else:
# return self.rdf.subjects(None, self.manifest).next()
#
# def get_object(self, subject=None, predicate=None):
# """Eliminates some of the glue code for searching RDF. Pass
# in a URIRef object (generated by the `uri` function above or
# a BNode object (returned by this function) for either of the
# parameters."""
#
# # Get the result of the search
# results = self.rdf.objects(subject, predicate)
# as_list = list(results)
#
# # Don't raise exceptions, value test!
# if not as_list:
# return None
#
# return as_list[0]
#
# def get_objects(self, subject=None, predicate=None):
# """Same as get_object, except returns a list of objects which
# satisfy the query rather than a single result."""
#
# # Get the result of the search
# results = self.rdf.objects(subject, predicate)
# return list(results)
#
# def get_applications(self):
# """Return the list of supported applications."""
# applications = []
#
# # Isolate all of the bnodes referring to target applications
# for target_app in self.get_objects(None,
# self.uri('targetApplication')):
# applications.append({
# 'guid': self.get_object(target_app, self.uri('id')),
# 'min_version': self.get_object(target_app,
# self.uri('minVersion')),
# 'max_version': self.get_object(target_app,
# self.uri('maxVersion'))})
# return applications
#
# class RDFException(Exception):
# """Exception thrown when the RDF parser encounters a problem."""
#
# def __init__(self, message=None, orig_exception=None):
# if message is None and orig_exception is not None:
# message = orig_exception.getMessage()
#
# super(RDFException, self).__init__(message)
# self.orig_exception = orig_exception
#
# def line(self):
# return (self.orig_exception.getLineNumber() if self.orig_exception else
# None)
. Output only the next line. | with pytest.raises(RDFException): |
Using the snippet: <|code_start|>
FLAGGED_FILES = set(['.DS_Store', 'Thumbs.db'])
FLAGGED_EXTENSIONS = set(['.orig', '.old', '~'])
OSX_REGEX = re.compile('__MACOSX')
hash_library_allowed = {}
for hash_list in 'hashes-allowed.txt', 'static_hashes.txt':
with open(os.path.join(os.path.dirname(__file__), hash_list)) as f:
hash_library_allowed.update(s.strip().split(None, 1) for s in f)
hash_library_warning = {}
with open(os.path.join(os.path.dirname(__file__), "hashes-warning.txt")) as f:
hash_library_warning.update(s.strip().split(None, 1) for s in f)
hash_library_error = {}
with open(os.path.join(os.path.dirname(__file__), "hashes-error.txt")) as f:
hash_library_error.update(s.strip().split(None, 1) for s in f)
<|code_end|>
, determine the next line of code. You have imports:
import hashlib
import os
import re
import validator.testcases.markup.markuptester as testendpoint_markup
import validator.testcases.markup.csstester as testendpoint_css
import validator.testcases.scripting as testendpoint_js
import validator.testcases.langpack as testendpoint_langpack
from StringIO import StringIO
from zipfile import BadZipfile
from regex import run_regex_tests
from validator.constants import MAX_JS_THRESHOLD
from validator import decorator
from validator import submain as testendpoint_validator
from validator import unicodehelper
from validator.constants import (BUGZILLA_BUG, PACKAGE_LANGPACK,
PACKAGE_SUBPACKAGE, PACKAGE_THEME)
from validator.xpi import XPIManager
and context (class names, function names, or code) available:
# Path: validator/decorator.py
# TEST_TIERS = {}
# CLEANUP_FUNCTIONS = []
# def register_test(tier=1, expected_type=None, simple=False, versions=None):
# def wrap(function):
# def register_cleanup(cleanup):
# def cleanup():
# def get_tiers():
# def get_tests(tier, type_=None):
# def version_range(guid, version, before=None, app_versions=None):
#
# Path: validator/submain.py
# def prepare_package(err, path, expectation=0, for_appversions=None,
# timeout=-1):
# def timeout_handler(signum, frame):
# def test_search(err, package, expectation=0):
# def test_package(err, file_, name, expectation=PACKAGE_ANY,
# for_appversions=None):
# def _load_install_rdf(err, package, expectation):
# def _load_package_json(err, package, expectation):
# def _load_manifest_json(err, package, expectation):
# def populate_chrome_manifest(err, xpi_package):
# def get_linked_manifest(path, from_path, from_chrome, from_triple):
# def test_inner_package(err, xpi_package, for_appversions=None):
#
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
. Output only the next line. | @decorator.register_test(tier=1) |
Here is a snippet: <|code_start|>
# If that item is a container file, unzip it and scan it.
if name_lower.endswith('.jar'):
# This is either a subpackage or a nested theme.
is_subpackage = not err.get_resource('is_multipackage')
# Unpack the package and load it up.
package = StringIO(file_data)
try:
sub_xpi = XPIManager(package, mode='r', name=name,
subpackage=is_subpackage)
except BadZipfile:
err.error(('testcases_content',
'test_packed_packages',
'jar_subpackage_corrupt'),
'Subpackage corrupt.',
'The subpackage appears to be corrupt, and could not '
'be opened.',
name)
return None
# Let the error bunder know we're in a sub-package.
err.push_state(name)
err.detected_type = (PACKAGE_SUBPACKAGE if is_subpackage else
PACKAGE_THEME)
err.set_tier(1)
supported_versions = (err.supported_versions.copy() if
err.supported_versions else
err.supported_versions)
if is_subpackage:
<|code_end|>
. Write the next line using the current file imports:
import hashlib
import os
import re
import validator.testcases.markup.markuptester as testendpoint_markup
import validator.testcases.markup.csstester as testendpoint_css
import validator.testcases.scripting as testendpoint_js
import validator.testcases.langpack as testendpoint_langpack
from StringIO import StringIO
from zipfile import BadZipfile
from regex import run_regex_tests
from validator.constants import MAX_JS_THRESHOLD
from validator import decorator
from validator import submain as testendpoint_validator
from validator import unicodehelper
from validator.constants import (BUGZILLA_BUG, PACKAGE_LANGPACK,
PACKAGE_SUBPACKAGE, PACKAGE_THEME)
from validator.xpi import XPIManager
and context from other files:
# Path: validator/decorator.py
# TEST_TIERS = {}
# CLEANUP_FUNCTIONS = []
# def register_test(tier=1, expected_type=None, simple=False, versions=None):
# def wrap(function):
# def register_cleanup(cleanup):
# def cleanup():
# def get_tiers():
# def get_tests(tier, type_=None):
# def version_range(guid, version, before=None, app_versions=None):
#
# Path: validator/submain.py
# def prepare_package(err, path, expectation=0, for_appversions=None,
# timeout=-1):
# def timeout_handler(signum, frame):
# def test_search(err, package, expectation=0):
# def test_package(err, file_, name, expectation=PACKAGE_ANY,
# for_appversions=None):
# def _load_install_rdf(err, package, expectation):
# def _load_package_json(err, package, expectation):
# def _load_manifest_json(err, package, expectation):
# def populate_chrome_manifest(err, xpi_package):
# def get_linked_manifest(path, from_path, from_chrome, from_triple):
# def test_inner_package(err, xpi_package, for_appversions=None):
#
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
, which may include functions, classes, or code. Output only the next line. | testendpoint_validator.test_inner_package(err, sub_xpi) |
Next line prediction: <|code_start|>
total_scripts = sum(len(bundle['scripts']) for bundle in scripts)
exhaustive = True
if total_scripts > MAX_JS_THRESHOLD:
err.warning(
err_id=('testcases_content', 'packed_js', 'too_much_js'),
warning='TOO MUCH JS FOR EXHAUSTIVE VALIDATION',
description='There are too many JS files for the validator to '
'process sequentially. An editor must manually '
'review the JS in this add-on.')
exhaustive = False
# Get the chrome manifest in case there's information about pollution
# exemptions.
chrome = err.get_resource('chrome.manifest_nopush')
marked_scripts = err.get_resource('marked_scripts')
if not marked_scripts:
marked_scripts = set()
# Process all of the scripts that were found seperately from the rest of
# the package contents.
for script_bundle in scripts:
package = script_bundle['package']
# Set the error bundle's package state to what it was when we first
# encountered the script file during the content tests.
for archive in script_bundle['state']:
err.push_state(archive)
for script in script_bundle['scripts']:
<|code_end|>
. Use current file imports:
(import hashlib
import os
import re
import validator.testcases.markup.markuptester as testendpoint_markup
import validator.testcases.markup.csstester as testendpoint_css
import validator.testcases.scripting as testendpoint_js
import validator.testcases.langpack as testendpoint_langpack
from StringIO import StringIO
from zipfile import BadZipfile
from regex import run_regex_tests
from validator.constants import MAX_JS_THRESHOLD
from validator import decorator
from validator import submain as testendpoint_validator
from validator import unicodehelper
from validator.constants import (BUGZILLA_BUG, PACKAGE_LANGPACK,
PACKAGE_SUBPACKAGE, PACKAGE_THEME)
from validator.xpi import XPIManager)
and context including class names, function names, or small code snippets from other files:
# Path: validator/decorator.py
# TEST_TIERS = {}
# CLEANUP_FUNCTIONS = []
# def register_test(tier=1, expected_type=None, simple=False, versions=None):
# def wrap(function):
# def register_cleanup(cleanup):
# def cleanup():
# def get_tiers():
# def get_tests(tier, type_=None):
# def version_range(guid, version, before=None, app_versions=None):
#
# Path: validator/submain.py
# def prepare_package(err, path, expectation=0, for_appversions=None,
# timeout=-1):
# def timeout_handler(signum, frame):
# def test_search(err, package, expectation=0):
# def test_package(err, file_, name, expectation=PACKAGE_ANY,
# for_appversions=None):
# def _load_install_rdf(err, package, expectation):
# def _load_package_json(err, package, expectation):
# def _load_manifest_json(err, package, expectation):
# def populate_chrome_manifest(err, xpi_package):
# def get_linked_manifest(path, from_path, from_chrome, from_triple):
# def test_inner_package(err, xpi_package, for_appversions=None):
#
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
. Output only the next line. | file_data = unicodehelper.decode(package.read(script)) |
Next line prediction: <|code_start|>
FIREFOX_GUID = '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
MOBILE_GUID = '{a23983c0-fd0e-11dc-95ff-0800200c9a66}'
THUNDERBIRD_GUID = '{3550f703-e582-4d05-9a08-453d09bdfdc6}'
def _validator(file_path, for_appversions=None, overrides=None):
js = os.environ.get('SPIDERMONKEY_INSTALLATION', 'js')
apps = os.path.join(os.path.dirname(validator.__file__),
'app_versions.json')
if not os.path.exists(apps):
raise EnvironmentError('Could not locate app_versions.json in git '
'repo for validator. Tried: %s' % apps)
orig = sys.stderr
sys.stderr = StringIO()
try:
<|code_end|>
. Use current file imports:
(import json
import os
import sys
import unittest
import validator
import validator.constants
from cStringIO import StringIO
from validator.validate import validate
from validator.testcases import scripting)
and context including class names, function names, or small code snippets from other files:
# Path: validator/validate.py
# def validate(path, format='json',
# approved_applications=None,
# determined=True,
# listed=True,
# expectation=PACKAGE_ANY,
# for_appversions=None,
# overrides=None,
# timeout=-1,
# compat_test=False,
# **kw):
# """
# Perform validation in one easy step!
#
# `path`:
# *Required*
# A file system path to the package to be validated.
# `format`:
# The format to return the results in. Defaults to "json". Currently, any
# other format will simply return the error bundle.
# `approved_applications`:
# Path to the list of approved application versions
# `determined`:
# If set to `False`, validation will halt at the end of the first tier
# that raises errors.
# `listed`:
# Whether the app is headed for the app marketplace or AMO. Defaults to
# `True`.
# `expectation`:
# The type of package that should be expected. Must be a symbolic
# constant from validator.constants (i.e.:
# validator.constants.PACKAGE_*). Defaults to PACKAGE_ANY.
# `for_appversions`:
# A dict of app GUIDs referencing lists of versions. Determines which
# version-dependant tests should be run.
# `timeout`:
# Number of seconds before aborting addon validation, or -1 to
# run with no timeout.
# `compat_tests`:
# A flag to signal the validator to skip tests which should not be run
# during compatibility bumps. Defaults to `False`.
# """
#
# bundle = ErrorBundle(listed=listed, determined=determined,
# overrides=overrides, for_appversions=for_appversions)
# bundle.save_resource('is_compat_test', compat_test)
#
# if approved_applications is None:
# approved_applications = os.path.join(os.path.dirname(__file__),
# 'app_versions.json')
#
# if isinstance(approved_applications, types.StringTypes):
# # Load up the target applications if the approved applications is a
# # path (string).
# with open(approved_applications) as approved_apps:
# apps = json.load(approved_apps)
# elif isinstance(approved_applications, dict):
# # If the lists of approved applications are already in a dict, just use
# # that instead of trying to pull from a file.
# apps = approved_applications
# else:
# raise ValueError('Unknown format for `approved_applications`.')
#
# constants.APPROVED_APPLICATIONS.clear()
# constants.APPROVED_APPLICATIONS.update(apps)
#
# submain.prepare_package(bundle, path, expectation,
# for_appversions=for_appversions,
# timeout=timeout)
#
# return format_result(bundle, format)
. Output only the next line. | result = validate(file_path, format='json', |
Using the snippet: <|code_start|>
manifest_json = """{
"name": "My Awesome Addon",
"version": "1.25",
"applications": {
"gecko": {
"id": "my@awesome.addon"
}
}
}"""
manifest_json_with_versions = """{
"name": "My Awesome Addon",
"version": "1.25",
"applications": {
"gecko": {
"id": "my@awesome.addon",
"strict_min_version": "43.0",
"strict_max_version": "50.*"
}
}
}"""
def test_parser():
<|code_end|>
, determine the next line of code. You have imports:
import json
from validator.constants import FIREFOX_GUID
from validator.json_parser import ManifestJsonParser
and context (class names, function names, or code) available:
# Path: validator/json_parser.py
# class ManifestJsonParser(object):
# """Parser wrapper for manifest.json files."""
#
# def __init__(self, err, data, namespace=None):
# self.err = err
# self.data = json.loads(unicodehelper.decode(data))
#
# def get_applications(self):
# """Return the list of supported applications."""
# if ('applications' not in self.data or
# 'gecko' not in self.data['applications']):
# return []
# app = self.data['applications']['gecko']
# min_version = app.get('strict_min_version', u'42.0')
# max_version = app.get('strict_max_version', u'*')
# return [{u'guid': FIREFOX_GUID,
# u'min_version': min_version,
# u'max_version': max_version}]
. Output only the next line. | parser = ManifestJsonParser(None, manifest_json) |
Predict the next line after this snippet: <|code_start|>
def test_load_data():
"""Test that data is loaded properly into the CG."""
d = """abc
def
ghi"""
<|code_end|>
using the current file's imports:
from validator.contextgenerator import ContextGenerator
and any relevant context from other files:
# Path: validator/contextgenerator.py
# class ContextGenerator:
# """The context generator creates a line-by-line mapping of all files that
# are validated. It will then use that to help produce useful bits of code
# for errors, warnings, and the like."""
#
# def __init__(self, data=None):
# if isinstance(data, StringIO):
# data = data.getvalue()
#
# self.data = data.split('\n')
#
# def get_context(self, line=1, column=0):
# 'Returns a tuple containing the context for a line'
#
# line -= 1 # The line is one-based
#
# # If there is no data in the file, there can be no context.
# datalen = len(self.data)
# if datalen <= line:
# return None
#
# build = [self.data[line]]
#
# # Add surrounding lines if they're available. There must always be
# # three elements in the context.
# if line > 0:
# build.insert(0, self.data[line - 1])
# else:
# build.insert(0, None)
#
# if line < datalen - 1:
# build.append(self.data[line + 1])
# else:
# build.append(None)
#
# leading_counts = []
#
# # Count whitespace to determine how much needs to be stripped.
# lstrip_count = INFINITY
# for line in build:
# # Don't count empty/whitespace-only lines.
# if line is None or not line.strip():
# leading_counts.append(lstrip_count)
# continue
#
# # Isolate the leading whitespace.
# ws_count = len(line) - len(line.lstrip())
# leading_counts.append(ws_count)
# if ws_count < lstrip_count:
# lstrip_count = ws_count
#
# # If all of the lines were skipped over, it means everything was
# # whitespace.
# if lstrip_count == INFINITY:
# return ('', '', '')
#
# for lnum in range(3):
# # Skip edge lines.
# if not build[lnum]:
# continue
#
# line = build[lnum].strip()
#
# # Empty lines stay empty.
# if not line:
# build[lnum] = ''
# continue
#
# line = self._format_line(line, column=column, rel_line=lnum)
# line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line)
#
# build[lnum] = line
#
# # Return the final output as a tuple.
# return tuple(build)
#
# def _format_line(self, data, column=0, rel_line=1):
# 'Formats a line from the data to be the appropriate length'
# line_length = len(data)
#
# if line_length > 140:
# if rel_line == 0:
# # Trim from the beginning
# data = '... %s' % data[-140:]
# elif rel_line == 1:
# # Trim surrounding the error position
# if column < 70:
# data = '%s ...' % data[:140]
# elif column > line_length - 70:
# data = '... %s' % data[-140:]
# else:
# data = '... %s ...' % data[column - 70:column + 70]
#
# elif rel_line == 2:
# # Trim from the end
# data = '%s ...' % data[:140]
#
# data = unicodehelper.decode(data)
# return data
#
# def get_line(self, position):
# 'Returns the line number that the given string position is found on'
#
# datalen = len(self.data)
# count = len(self.data[0])
# line = 1
# while count < position:
# if line >= datalen:
# break
# count += len(self.data[line]) + 1
# line += 1
#
# return line
. Output only the next line. | c = ContextGenerator(d) |
Given the code snippet: <|code_start|> """
def __init__(self):
# Use "version(180)" so we don't use the latest version (185 at the
# time of this writing:
# https://developer.mozilla.org/en-US/docs/Mozilla/Projects/SpiderMonkey/JSAPI_reference/JSVersion) # noqa
# which deprecates generators with 'function' (see
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/Legacy_generator_function) # noqa
super(JSShell, self).__init__(code=['version(180)', self.SCRIPT])
@classmethod
def get_shell(cls):
"""Get a running JSShell instance, or create a new one if one does not
already exist."""
if not cls.instance:
cls.instance = cls()
return cls.instance
@classmethod
def cleanup(cls):
"""Clear our saved instance, and terminate its Spidermonkey process,
if there are no further references to it."""
if cls.instance is not None and cls.instance.returncode is None:
cls.instance.terminate()
cls.instance = None
def get_tree(self, code):
if isinstance(code, str):
<|code_end|>
, generate the next line using the imports in this file:
import simplejson as json
from spidermonkey import Spidermonkey
from validator import unicodehelper
from validator.contextgenerator import ContextGenerator
from validator.decorator import register_cleanup
and context (functions, classes, or occasionally code) from other files:
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
#
# Path: validator/contextgenerator.py
# class ContextGenerator:
# """The context generator creates a line-by-line mapping of all files that
# are validated. It will then use that to help produce useful bits of code
# for errors, warnings, and the like."""
#
# def __init__(self, data=None):
# if isinstance(data, StringIO):
# data = data.getvalue()
#
# self.data = data.split('\n')
#
# def get_context(self, line=1, column=0):
# 'Returns a tuple containing the context for a line'
#
# line -= 1 # The line is one-based
#
# # If there is no data in the file, there can be no context.
# datalen = len(self.data)
# if datalen <= line:
# return None
#
# build = [self.data[line]]
#
# # Add surrounding lines if they're available. There must always be
# # three elements in the context.
# if line > 0:
# build.insert(0, self.data[line - 1])
# else:
# build.insert(0, None)
#
# if line < datalen - 1:
# build.append(self.data[line + 1])
# else:
# build.append(None)
#
# leading_counts = []
#
# # Count whitespace to determine how much needs to be stripped.
# lstrip_count = INFINITY
# for line in build:
# # Don't count empty/whitespace-only lines.
# if line is None or not line.strip():
# leading_counts.append(lstrip_count)
# continue
#
# # Isolate the leading whitespace.
# ws_count = len(line) - len(line.lstrip())
# leading_counts.append(ws_count)
# if ws_count < lstrip_count:
# lstrip_count = ws_count
#
# # If all of the lines were skipped over, it means everything was
# # whitespace.
# if lstrip_count == INFINITY:
# return ('', '', '')
#
# for lnum in range(3):
# # Skip edge lines.
# if not build[lnum]:
# continue
#
# line = build[lnum].strip()
#
# # Empty lines stay empty.
# if not line:
# build[lnum] = ''
# continue
#
# line = self._format_line(line, column=column, rel_line=lnum)
# line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line)
#
# build[lnum] = line
#
# # Return the final output as a tuple.
# return tuple(build)
#
# def _format_line(self, data, column=0, rel_line=1):
# 'Formats a line from the data to be the appropriate length'
# line_length = len(data)
#
# if line_length > 140:
# if rel_line == 0:
# # Trim from the beginning
# data = '... %s' % data[-140:]
# elif rel_line == 1:
# # Trim surrounding the error position
# if column < 70:
# data = '%s ...' % data[:140]
# elif column > line_length - 70:
# data = '... %s' % data[-140:]
# else:
# data = '... %s ...' % data[column - 70:column + 70]
#
# elif rel_line == 2:
# # Trim from the end
# data = '%s ...' % data[:140]
#
# data = unicodehelper.decode(data)
# return data
#
# def get_line(self, position):
# 'Returns the line number that the given string position is found on'
#
# datalen = len(self.data)
# count = len(self.data[0])
# line = 1
# while count < position:
# if line >= datalen:
# break
# count += len(self.data[line]) + 1
# line += 1
#
# return line
#
# Path: validator/decorator.py
# def register_cleanup(cleanup):
# """Register a cleanup function to be called at the end of every validation
# task. Takes either a callable (including a class with a __call_ method),
# or a class with a `cleanup` class method."""
#
# if not callable(cleanup):
# # Allow decorating a class with a `cleanup` classm ethod.
# cleanup = cleanup.cleanup
#
# CLEANUP_FUNCTIONS.append(cleanup.cleanup)
# return cleanup
. Output only the next line. | code = unicodehelper.decode(code) |
Using the snippet: <|code_start|>
def get_tree(code, err=None, filename=None, shell=None):
"""Retrieve the parse tree for a JS snippet."""
try:
return JSShell.get_shell().get_tree(code)
except JSReflectException as exc:
str_exc = str(exc)
if 'SyntaxError' in str_exc or 'ReferenceError' in str_exc:
err.warning(('testcases_scripting', 'test_js_file',
'syntax_error'),
'JavaScript Compile-Time Error',
['A compile-time error in the JavaScript halted '
'validation of that file.',
'Message: %s' % str_exc.split(':', 1)[-1].strip()],
filename=filename,
line=exc.line,
<|code_end|>
, determine the next line of code. You have imports:
import simplejson as json
from spidermonkey import Spidermonkey
from validator import unicodehelper
from validator.contextgenerator import ContextGenerator
from validator.decorator import register_cleanup
and context (class names, function names, or code) available:
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
#
# Path: validator/contextgenerator.py
# class ContextGenerator:
# """The context generator creates a line-by-line mapping of all files that
# are validated. It will then use that to help produce useful bits of code
# for errors, warnings, and the like."""
#
# def __init__(self, data=None):
# if isinstance(data, StringIO):
# data = data.getvalue()
#
# self.data = data.split('\n')
#
# def get_context(self, line=1, column=0):
# 'Returns a tuple containing the context for a line'
#
# line -= 1 # The line is one-based
#
# # If there is no data in the file, there can be no context.
# datalen = len(self.data)
# if datalen <= line:
# return None
#
# build = [self.data[line]]
#
# # Add surrounding lines if they're available. There must always be
# # three elements in the context.
# if line > 0:
# build.insert(0, self.data[line - 1])
# else:
# build.insert(0, None)
#
# if line < datalen - 1:
# build.append(self.data[line + 1])
# else:
# build.append(None)
#
# leading_counts = []
#
# # Count whitespace to determine how much needs to be stripped.
# lstrip_count = INFINITY
# for line in build:
# # Don't count empty/whitespace-only lines.
# if line is None or not line.strip():
# leading_counts.append(lstrip_count)
# continue
#
# # Isolate the leading whitespace.
# ws_count = len(line) - len(line.lstrip())
# leading_counts.append(ws_count)
# if ws_count < lstrip_count:
# lstrip_count = ws_count
#
# # If all of the lines were skipped over, it means everything was
# # whitespace.
# if lstrip_count == INFINITY:
# return ('', '', '')
#
# for lnum in range(3):
# # Skip edge lines.
# if not build[lnum]:
# continue
#
# line = build[lnum].strip()
#
# # Empty lines stay empty.
# if not line:
# build[lnum] = ''
# continue
#
# line = self._format_line(line, column=column, rel_line=lnum)
# line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line)
#
# build[lnum] = line
#
# # Return the final output as a tuple.
# return tuple(build)
#
# def _format_line(self, data, column=0, rel_line=1):
# 'Formats a line from the data to be the appropriate length'
# line_length = len(data)
#
# if line_length > 140:
# if rel_line == 0:
# # Trim from the beginning
# data = '... %s' % data[-140:]
# elif rel_line == 1:
# # Trim surrounding the error position
# if column < 70:
# data = '%s ...' % data[:140]
# elif column > line_length - 70:
# data = '... %s' % data[-140:]
# else:
# data = '... %s ...' % data[column - 70:column + 70]
#
# elif rel_line == 2:
# # Trim from the end
# data = '%s ...' % data[:140]
#
# data = unicodehelper.decode(data)
# return data
#
# def get_line(self, position):
# 'Returns the line number that the given string position is found on'
#
# datalen = len(self.data)
# count = len(self.data[0])
# line = 1
# while count < position:
# if line >= datalen:
# break
# count += len(self.data[line]) + 1
# line += 1
#
# return line
#
# Path: validator/decorator.py
# def register_cleanup(cleanup):
# """Register a cleanup function to be called at the end of every validation
# task. Takes either a callable (including a class with a __call_ method),
# or a class with a `cleanup` class method."""
#
# if not callable(cleanup):
# # Allow decorating a class with a `cleanup` classm ethod.
# cleanup = cleanup.cleanup
#
# CLEANUP_FUNCTIONS.append(cleanup.cleanup)
# return cleanup
. Output only the next line. | context=ContextGenerator(code)) |
Predict the next line for this snippet: <|code_start|> except JSReflectException as exc:
str_exc = str(exc)
if 'SyntaxError' in str_exc or 'ReferenceError' in str_exc:
err.warning(('testcases_scripting', 'test_js_file',
'syntax_error'),
'JavaScript Compile-Time Error',
['A compile-time error in the JavaScript halted '
'validation of that file.',
'Message: %s' % str_exc.split(':', 1)[-1].strip()],
filename=filename,
line=exc.line,
context=ContextGenerator(code))
elif 'InternalError: too much recursion' in str_exc:
err.notice(('testcases_scripting', 'test_js_file',
'recursion_error'),
'JS too deeply nested for validation',
'A JS file was encountered that could not be valiated '
'due to limitations with Spidermonkey. It should be '
'manually inspected.',
filename=filename)
else:
err.error(('testcases_scripting', 'test_js_file',
'retrieving_tree'),
'JS reflection error prevented validation',
['An error in the JavaScript file prevented it from '
'being properly read by the Spidermonkey JS engine.',
str(exc)],
filename=filename)
<|code_end|>
with the help of current file imports:
import simplejson as json
from spidermonkey import Spidermonkey
from validator import unicodehelper
from validator.contextgenerator import ContextGenerator
from validator.decorator import register_cleanup
and context from other files:
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
#
# Path: validator/contextgenerator.py
# class ContextGenerator:
# """The context generator creates a line-by-line mapping of all files that
# are validated. It will then use that to help produce useful bits of code
# for errors, warnings, and the like."""
#
# def __init__(self, data=None):
# if isinstance(data, StringIO):
# data = data.getvalue()
#
# self.data = data.split('\n')
#
# def get_context(self, line=1, column=0):
# 'Returns a tuple containing the context for a line'
#
# line -= 1 # The line is one-based
#
# # If there is no data in the file, there can be no context.
# datalen = len(self.data)
# if datalen <= line:
# return None
#
# build = [self.data[line]]
#
# # Add surrounding lines if they're available. There must always be
# # three elements in the context.
# if line > 0:
# build.insert(0, self.data[line - 1])
# else:
# build.insert(0, None)
#
# if line < datalen - 1:
# build.append(self.data[line + 1])
# else:
# build.append(None)
#
# leading_counts = []
#
# # Count whitespace to determine how much needs to be stripped.
# lstrip_count = INFINITY
# for line in build:
# # Don't count empty/whitespace-only lines.
# if line is None or not line.strip():
# leading_counts.append(lstrip_count)
# continue
#
# # Isolate the leading whitespace.
# ws_count = len(line) - len(line.lstrip())
# leading_counts.append(ws_count)
# if ws_count < lstrip_count:
# lstrip_count = ws_count
#
# # If all of the lines were skipped over, it means everything was
# # whitespace.
# if lstrip_count == INFINITY:
# return ('', '', '')
#
# for lnum in range(3):
# # Skip edge lines.
# if not build[lnum]:
# continue
#
# line = build[lnum].strip()
#
# # Empty lines stay empty.
# if not line:
# build[lnum] = ''
# continue
#
# line = self._format_line(line, column=column, rel_line=lnum)
# line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line)
#
# build[lnum] = line
#
# # Return the final output as a tuple.
# return tuple(build)
#
# def _format_line(self, data, column=0, rel_line=1):
# 'Formats a line from the data to be the appropriate length'
# line_length = len(data)
#
# if line_length > 140:
# if rel_line == 0:
# # Trim from the beginning
# data = '... %s' % data[-140:]
# elif rel_line == 1:
# # Trim surrounding the error position
# if column < 70:
# data = '%s ...' % data[:140]
# elif column > line_length - 70:
# data = '... %s' % data[-140:]
# else:
# data = '... %s ...' % data[column - 70:column + 70]
#
# elif rel_line == 2:
# # Trim from the end
# data = '%s ...' % data[:140]
#
# data = unicodehelper.decode(data)
# return data
#
# def get_line(self, position):
# 'Returns the line number that the given string position is found on'
#
# datalen = len(self.data)
# count = len(self.data[0])
# line = 1
# while count < position:
# if line >= datalen:
# break
# count += len(self.data[line]) + 1
# line += 1
#
# return line
#
# Path: validator/decorator.py
# def register_cleanup(cleanup):
# """Register a cleanup function to be called at the end of every validation
# task. Takes either a callable (including a class with a __call_ method),
# or a class with a `cleanup` class method."""
#
# if not callable(cleanup):
# # Allow decorating a class with a `cleanup` classm ethod.
# cleanup = cleanup.cleanup
#
# CLEANUP_FUNCTIONS.append(cleanup.cleanup)
# return cleanup
, which may contain function names, class names, or code. Output only the next line. | @register_cleanup |
Given the code snippet: <|code_start|>
# Compatibility app/version ranges:
def _build_definition(maj_version_num, firefox=True, fennec=True,
thunderbird=True, android=True):
definition = {}
app_version_range = (
<|code_end|>
, generate the next line using the imports in this file:
from validator.decorator import version_range
from validator.constants import (FIREFOX_GUID, FENNEC_GUID,
THUNDERBIRD_GUID as TB_GUID, ANDROID_GUID)
and context (functions, classes, or occasionally code) from other files:
# Path: validator/decorator.py
# def version_range(guid, version, before=None, app_versions=None):
# """Returns all values after (and including) `version` for the app `guid`"""
#
# if app_versions is None:
# app_versions = validator.constants.APPROVED_APPLICATIONS
# app_key = None
#
# # Support for shorthand instead of full GUIDs.
# for app_guid, app_name in APPLICATIONS.items():
# if app_name == guid:
# guid = app_guid
# break
#
# for key in app_versions.keys():
# if app_versions[key]['guid'] == guid:
# app_key = key
# break
#
# if not app_key or version not in app_versions[app_key]['versions']:
# raise Exception('Bad GUID or version provided for version range: %s'
# % version)
#
# all_versions = app_versions[app_key]['versions']
# version_pos = all_versions.index(version)
# before_pos = None
# if before is not None and before in all_versions:
# before_pos = all_versions.index(before)
#
# return all_versions[version_pos:before_pos]
. Output only the next line. | lambda app: version_range(app, '%d.0a1' % maj_version_num, |
Given snippet: <|code_start|> context=traverser.context)
remote_url = re.compile(r'^(https?|ftp|data):(//)?', re.I)
uri = unicode(uri.get_literal_value())
if uri.startswith('//') or remote_url.match(uri):
traverser.err.warning(
err_id=('js', 'instanceactions', '%s_remote_uri' % method),
warning='`%s` called with non-local URI.' % method,
description='Calling `%s` with a non-local URI will result in the '
'dialog being opened with chrome privileges.' % method,
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def Proxy_deprec(wrapper, arguments, traverser):
traverser.warning(
err_id=('testcases_javascript_calldefinitions', 'Proxy', 'deprec'),
warning='Proxy.create and Proxy.createFunction are no longer supported.',
description=(
'Proxy.create and Proxy.createFunction are no longer supported. '
'If this flag appears on Add-ons SDK code, make sure you download '
'the latest version of the SDK and submit a new version. '
'See %s for more information.' % BUGZILLA_BUG % 892903),
compatibility_type='error',
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import math
import re
import actions
import predefinedentities
from jstypes import JSArray, JSObject, JSWrapper
from validator.constants import BUGZILLA_BUG
from validator.compat import FX48_DEFINITION
and context:
# Path: validator/compat.py
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
which might include code, classes, or functions. Output only the next line. | for_appversions=FX48_DEFINITION) |
Given the following code snippet before the placeholder: <|code_start|> 'specify an update key. This means that serving '
'updates for this version will not be possible.',
'install.rdf',
signing_severity='trivial')
else:
err.notice(('testcases_installrdf',
'_test_rdf',
'missing_updateURL'),
'Missing updateURL element',
'Your add-on does not specify an update URL. This '
'means that it will be impossible for you to serve '
'updates to this add-on which are not listed publicly '
'on addons.mozilla.org.',
'install.rdf',
signing_severity='trivial')
# Once all of the predicates have been tested, make sure there are
# no mandatory elements that haven't been found.
if must_exist_once:
err.error(('testcases_installrdf',
'_test_rdf',
'missing_addon'),
'install.rdf missing element(s).',
['The element listed is a required element in the install '
'manifest specification. It must be added to your addon.',
'Missing elements: %s' % ', '.join(must_exist_once)],
'install.rdf')
PREDICATE_TESTS = {
<|code_end|>
, predict the next line using imports from the current file:
from validator import decorator
from validator.constants import PACKAGE_THEME
from validator import metadata_helpers
and context including class names, function names, and sometimes code from other files:
# Path: validator/decorator.py
# TEST_TIERS = {}
# CLEANUP_FUNCTIONS = []
# def register_test(tier=1, expected_type=None, simple=False, versions=None):
# def wrap(function):
# def register_cleanup(cleanup):
# def cleanup():
# def get_tiers():
# def get_tests(tier, type_=None):
# def version_range(guid, version, before=None, app_versions=None):
#
# Path: validator/metadata_helpers.py
# VERSION_PATTERN = re.compile('^[-+*.\w]{,32}$')
# def validate_name(err, value, source):
# def validate_id(err, value, source):
# def validate_version(err, value, source):
. Output only the next line. | 'id': metadata_helpers.validate_id, |
Given the following code snippet before the placeholder: <|code_start|>
class ManifestJsonParser(object):
"""Parser wrapper for manifest.json files."""
def __init__(self, err, data, namespace=None):
self.err = err
<|code_end|>
, predict the next line using imports from the current file:
import json
from validator import unicodehelper
from validator.constants import FIREFOX_GUID
and context including class names, function names, and sometimes code from other files:
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
. Output only the next line. | self.data = json.loads(unicodehelper.decode(data)) |
Based on the snippet: <|code_start|> 'signing_severity': 'high',
}
SECURITY_PREF_MESSAGE = {
'description':
'Changing this preference may have severe security implications, and '
'is forbidden under most circumstances.',
'editors_only': True,
'signing_help': ('Extensions which alter these settings are allowed '
'within the Firefox add-on ecosystem by exception '
'only, and under extremely limited circumstances.',
'Please remove any reference to these preference names '
'from your add-on.'),
'signing_severity': 'high',
}
MARIONETTE_MESSAGE = {
'warning': 'Marionette should not be accessed by extensions',
'description': 'References to the Marionette service are not acceptable '
'in extensions. Please remove them.',
}
def fuel_error(traverse_node, err):
traverse_node.im_self.warning(
err_id=('js', 'traverser', 'dangerous_global'),
warning='The FUEL library is no longer supported.',
description='The FUEL library is no longer supported. Please use the '
'Add-ons SDK instead. See %s for more information.'
% MDN_DOC % 'Add-ons/SDK',
<|code_end|>
, predict the immediate next line with the help of imports:
from functools import partial
from actions import _get_as_str
from call_definitions import xpcom_constructor as xpcom_const, python_wrap
from entity_values import entity
from jstypes import JSWrapper
from validator.compat import FX47_DEFINITION
from validator.constants import MDN_DOC
import math
import actions
import call_definitions
import instanceactions
and context (classes, functions, sometimes code) from other files:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
. Output only the next line. | for_appversions=FX47_DEFINITION, |
Predict the next line after this snippet: <|code_start|>
class PropertiesParser(object):
"""
Parses and serializes .properties files. Even though you can pretty
much do this in your sleep, it's still useful for L10n tests.
"""
def __init__(self, dtd):
"""
Properties parsers can initialized based on a file path
(provided as a string to the path), or directly (in memory as a
StringIO object).
"""
self.entities = {}
self.items = []
if isinstance(dtd, types.StringTypes):
data = open(dtd).read()
elif isinstance(dtd, StringIO):
data = dtd.getvalue()
elif isinstance(dtd, file):
data = dtd.read()
# Create a context!
<|code_end|>
using the current file's imports:
import types
from StringIO import StringIO
from validator.contextgenerator import ContextGenerator
and any relevant context from other files:
# Path: validator/contextgenerator.py
# class ContextGenerator:
# """The context generator creates a line-by-line mapping of all files that
# are validated. It will then use that to help produce useful bits of code
# for errors, warnings, and the like."""
#
# def __init__(self, data=None):
# if isinstance(data, StringIO):
# data = data.getvalue()
#
# self.data = data.split('\n')
#
# def get_context(self, line=1, column=0):
# 'Returns a tuple containing the context for a line'
#
# line -= 1 # The line is one-based
#
# # If there is no data in the file, there can be no context.
# datalen = len(self.data)
# if datalen <= line:
# return None
#
# build = [self.data[line]]
#
# # Add surrounding lines if they're available. There must always be
# # three elements in the context.
# if line > 0:
# build.insert(0, self.data[line - 1])
# else:
# build.insert(0, None)
#
# if line < datalen - 1:
# build.append(self.data[line + 1])
# else:
# build.append(None)
#
# leading_counts = []
#
# # Count whitespace to determine how much needs to be stripped.
# lstrip_count = INFINITY
# for line in build:
# # Don't count empty/whitespace-only lines.
# if line is None or not line.strip():
# leading_counts.append(lstrip_count)
# continue
#
# # Isolate the leading whitespace.
# ws_count = len(line) - len(line.lstrip())
# leading_counts.append(ws_count)
# if ws_count < lstrip_count:
# lstrip_count = ws_count
#
# # If all of the lines were skipped over, it means everything was
# # whitespace.
# if lstrip_count == INFINITY:
# return ('', '', '')
#
# for lnum in range(3):
# # Skip edge lines.
# if not build[lnum]:
# continue
#
# line = build[lnum].strip()
#
# # Empty lines stay empty.
# if not line:
# build[lnum] = ''
# continue
#
# line = self._format_line(line, column=column, rel_line=lnum)
# line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line)
#
# build[lnum] = line
#
# # Return the final output as a tuple.
# return tuple(build)
#
# def _format_line(self, data, column=0, rel_line=1):
# 'Formats a line from the data to be the appropriate length'
# line_length = len(data)
#
# if line_length > 140:
# if rel_line == 0:
# # Trim from the beginning
# data = '... %s' % data[-140:]
# elif rel_line == 1:
# # Trim surrounding the error position
# if column < 70:
# data = '%s ...' % data[:140]
# elif column > line_length - 70:
# data = '... %s' % data[-140:]
# else:
# data = '... %s ...' % data[column - 70:column + 70]
#
# elif rel_line == 2:
# # Trim from the end
# data = '%s ...' % data[:140]
#
# data = unicodehelper.decode(data)
# return data
#
# def get_line(self, position):
# 'Returns the line number that the given string position is found on'
#
# datalen = len(self.data)
# count = len(self.data[0])
# line = 1
# while count < position:
# if line >= datalen:
# break
# count += len(self.data[line]) + 1
# line += 1
#
# return line
. Output only the next line. | self.context = ContextGenerator(data) |
Predict the next line after this snippet: <|code_start|>
BAD_URL_PAT = ("url\(['\"]?(?!(chrome:|resource:))(\/\/|(ht|f)tps?:\/\/|data:)"
".*['\"]?\)")
BAD_URL = re.compile(BAD_URL_PAT, re.I)
REM_URL = re.compile("url\(['\"]?(\/\/|ht|f)tps?:\/\/.*['\"]?\)", re.I)
SKIP_TYPES = ('S', 'COMMENT')
DOWNLOADS_INDICATOR_BUG = 845408
def test_css_file(err, filename, data, line_start=1):
'Parse and test a whole CSS file.'
tokenizer = cssutils.tokenize2.Tokenizer()
<|code_end|>
using the current file's imports:
import re
import cssutils
from validator.constants import PACKAGE_THEME
from validator.contextgenerator import ContextGenerator
from validator.unicodehelper import NON_ASCII_FILTER
and any relevant context from other files:
# Path: validator/contextgenerator.py
# class ContextGenerator:
# """The context generator creates a line-by-line mapping of all files that
# are validated. It will then use that to help produce useful bits of code
# for errors, warnings, and the like."""
#
# def __init__(self, data=None):
# if isinstance(data, StringIO):
# data = data.getvalue()
#
# self.data = data.split('\n')
#
# def get_context(self, line=1, column=0):
# 'Returns a tuple containing the context for a line'
#
# line -= 1 # The line is one-based
#
# # If there is no data in the file, there can be no context.
# datalen = len(self.data)
# if datalen <= line:
# return None
#
# build = [self.data[line]]
#
# # Add surrounding lines if they're available. There must always be
# # three elements in the context.
# if line > 0:
# build.insert(0, self.data[line - 1])
# else:
# build.insert(0, None)
#
# if line < datalen - 1:
# build.append(self.data[line + 1])
# else:
# build.append(None)
#
# leading_counts = []
#
# # Count whitespace to determine how much needs to be stripped.
# lstrip_count = INFINITY
# for line in build:
# # Don't count empty/whitespace-only lines.
# if line is None or not line.strip():
# leading_counts.append(lstrip_count)
# continue
#
# # Isolate the leading whitespace.
# ws_count = len(line) - len(line.lstrip())
# leading_counts.append(ws_count)
# if ws_count < lstrip_count:
# lstrip_count = ws_count
#
# # If all of the lines were skipped over, it means everything was
# # whitespace.
# if lstrip_count == INFINITY:
# return ('', '', '')
#
# for lnum in range(3):
# # Skip edge lines.
# if not build[lnum]:
# continue
#
# line = build[lnum].strip()
#
# # Empty lines stay empty.
# if not line:
# build[lnum] = ''
# continue
#
# line = self._format_line(line, column=column, rel_line=lnum)
# line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line)
#
# build[lnum] = line
#
# # Return the final output as a tuple.
# return tuple(build)
#
# def _format_line(self, data, column=0, rel_line=1):
# 'Formats a line from the data to be the appropriate length'
# line_length = len(data)
#
# if line_length > 140:
# if rel_line == 0:
# # Trim from the beginning
# data = '... %s' % data[-140:]
# elif rel_line == 1:
# # Trim surrounding the error position
# if column < 70:
# data = '%s ...' % data[:140]
# elif column > line_length - 70:
# data = '... %s' % data[-140:]
# else:
# data = '... %s ...' % data[column - 70:column + 70]
#
# elif rel_line == 2:
# # Trim from the end
# data = '%s ...' % data[:140]
#
# data = unicodehelper.decode(data)
# return data
#
# def get_line(self, position):
# 'Returns the line number that the given string position is found on'
#
# datalen = len(self.data)
# count = len(self.data[0])
# line = 1
# while count < position:
# if line >= datalen:
# break
# count += len(self.data[line]) + 1
# line += 1
#
# return line
#
# Path: validator/unicodehelper.py
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
. Output only the next line. | context = ContextGenerator(data) |
Here is a snippet: <|code_start|>
BAD_URL_PAT = ("url\(['\"]?(?!(chrome:|resource:))(\/\/|(ht|f)tps?:\/\/|data:)"
".*['\"]?\)")
BAD_URL = re.compile(BAD_URL_PAT, re.I)
REM_URL = re.compile("url\(['\"]?(\/\/|ht|f)tps?:\/\/.*['\"]?\)", re.I)
SKIP_TYPES = ('S', 'COMMENT')
DOWNLOADS_INDICATOR_BUG = 845408
def test_css_file(err, filename, data, line_start=1):
'Parse and test a whole CSS file.'
tokenizer = cssutils.tokenize2.Tokenizer()
context = ContextGenerator(data)
if data:
# Remove any characters which aren't printable, 7-bit ASCII.
<|code_end|>
. Write the next line using the current file imports:
import re
import cssutils
from validator.constants import PACKAGE_THEME
from validator.contextgenerator import ContextGenerator
from validator.unicodehelper import NON_ASCII_FILTER
and context from other files:
# Path: validator/contextgenerator.py
# class ContextGenerator:
# """The context generator creates a line-by-line mapping of all files that
# are validated. It will then use that to help produce useful bits of code
# for errors, warnings, and the like."""
#
# def __init__(self, data=None):
# if isinstance(data, StringIO):
# data = data.getvalue()
#
# self.data = data.split('\n')
#
# def get_context(self, line=1, column=0):
# 'Returns a tuple containing the context for a line'
#
# line -= 1 # The line is one-based
#
# # If there is no data in the file, there can be no context.
# datalen = len(self.data)
# if datalen <= line:
# return None
#
# build = [self.data[line]]
#
# # Add surrounding lines if they're available. There must always be
# # three elements in the context.
# if line > 0:
# build.insert(0, self.data[line - 1])
# else:
# build.insert(0, None)
#
# if line < datalen - 1:
# build.append(self.data[line + 1])
# else:
# build.append(None)
#
# leading_counts = []
#
# # Count whitespace to determine how much needs to be stripped.
# lstrip_count = INFINITY
# for line in build:
# # Don't count empty/whitespace-only lines.
# if line is None or not line.strip():
# leading_counts.append(lstrip_count)
# continue
#
# # Isolate the leading whitespace.
# ws_count = len(line) - len(line.lstrip())
# leading_counts.append(ws_count)
# if ws_count < lstrip_count:
# lstrip_count = ws_count
#
# # If all of the lines were skipped over, it means everything was
# # whitespace.
# if lstrip_count == INFINITY:
# return ('', '', '')
#
# for lnum in range(3):
# # Skip edge lines.
# if not build[lnum]:
# continue
#
# line = build[lnum].strip()
#
# # Empty lines stay empty.
# if not line:
# build[lnum] = ''
# continue
#
# line = self._format_line(line, column=column, rel_line=lnum)
# line = '%s%s' % (' ' * (leading_counts[lnum] - lstrip_count), line)
#
# build[lnum] = line
#
# # Return the final output as a tuple.
# return tuple(build)
#
# def _format_line(self, data, column=0, rel_line=1):
# 'Formats a line from the data to be the appropriate length'
# line_length = len(data)
#
# if line_length > 140:
# if rel_line == 0:
# # Trim from the beginning
# data = '... %s' % data[-140:]
# elif rel_line == 1:
# # Trim surrounding the error position
# if column < 70:
# data = '%s ...' % data[:140]
# elif column > line_length - 70:
# data = '... %s' % data[-140:]
# else:
# data = '... %s ...' % data[column - 70:column + 70]
#
# elif rel_line == 2:
# # Trim from the end
# data = '%s ...' % data[:140]
#
# data = unicodehelper.decode(data)
# return data
#
# def get_line(self, position):
# 'Returns the line number that the given string position is found on'
#
# datalen = len(self.data)
# count = len(self.data[0])
# line = 1
# while count < position:
# if line >= datalen:
# break
# count += len(self.data[line]) + 1
# line += 1
#
# return line
#
# Path: validator/unicodehelper.py
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
, which may include functions, classes, or code. Output only the next line. | data = NON_ASCII_FILTER.sub('', data) |
Predict the next line after this snippet: <|code_start|> if files is not None and files != '':
fmsg = '\tFile:\t%s'
# Nested files (subpackes) are stored in a list.
if type(files) is list:
if files[-1] == '':
files[-1] = '(none)'
verbose_output.append(fmsg % ' > '.join(files))
else:
verbose_output.append(fmsg % files)
# If there is a line number, that gets put on the end.
if message['line']:
verbose_output.append('\tLine:\t%s' % message['line'])
if message['column'] and message['column'] != 0:
verbose_output.append('\tColumn:\t%d' % message['column'])
if message.get('context'):
verbose_output.append('\tContext:')
verbose_output.extend([('\t> %s' % x
if x is not None
else '\t>' + ('-' * 20))
for x
in message['context']])
# Stick it in with the standard items.
output.append('\n')
output.append('\n'.join(verbose_output))
# Send the final output to the handler to be rendered.
<|code_end|>
using the current file's imports:
import json
import logging
import types
import uuid
import validator
from StringIO import StringIO
from validator import unicodehelper
from validator.constants import SIGNING_SEVERITIES
from validator.outputhandlers.shellcolors import OutputHandler
and any relevant context from other files:
# Path: validator/unicodehelper.py
# UNICODE_BOMS = [
# (codecs.BOM_UTF8, 'utf-8'),
# (codecs.BOM_UTF32_LE, 'utf-32-le'),
# (codecs.BOM_UTF32_BE, 'utf-32-be'),
# (codecs.BOM_UTF16_LE, 'utf-16-le'),
# (codecs.BOM_UTF16_BE, 'utf-16-be'),
# ]
# COMMON_ENCODINGS = ('latin_1', 'utf-16')
# NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
# def decode(data):
. Output only the next line. | self.handler.write(u''.join(map(unicodehelper.decode, output))) |
Given the following code snippet before the placeholder: <|code_start|> 'because they usually identify binary '
'components. Please see '
'http://addons.mozilla.org/developers/docs/'
'policies/reviews#section-binary'
' for more information on the binary content '
'review process.',
'\n'.join(flagged_files)),
editors_only=True,
signing_help=HELP,
signing_severity='medium',
filename=', '.join(flagged_files))
@decorator.register_test(tier=1)
def test_godlikea(err, xpi_package):
"""Test to make sure that the godlikea namespace is not in use."""
if 'chrome/godlikea.jar' in xpi_package:
err.error(
err_id=('testcases_packagelayout',
'test_godlikea'),
error="Banned 'godlikea' chrome namespace",
description="The 'godlikea' chrome namepsace is generated from a "
'template and should be replaced with something '
'unique to your add-on to avoid name conflicts.',
filename='chrome/godlikea.jar')
@decorator.register_test(
tier=5,
<|code_end|>
, predict the next line using imports from the current file:
from fnmatch import fnmatch as fnm
from validator.constants import (FF4_MIN, FIREFOX_GUID, FENNEC_GUID,
THUNDERBIRD_GUID as TB_GUID, ANDROID_GUID,
PACKAGE_DICTIONARY, )
from validator.decorator import version_range
import validator.decorator as decorator
and context including class names, function names, and sometimes code from other files:
# Path: validator/decorator.py
# def version_range(guid, version, before=None, app_versions=None):
# """Returns all values after (and including) `version` for the app `guid`"""
#
# if app_versions is None:
# app_versions = validator.constants.APPROVED_APPLICATIONS
# app_key = None
#
# # Support for shorthand instead of full GUIDs.
# for app_guid, app_name in APPLICATIONS.items():
# if app_name == guid:
# guid = app_guid
# break
#
# for key in app_versions.keys():
# if app_versions[key]['guid'] == guid:
# app_key = key
# break
#
# if not app_key or version not in app_versions[app_key]['versions']:
# raise Exception('Bad GUID or version provided for version range: %s'
# % version)
#
# all_versions = app_versions[app_key]['versions']
# version_pos = all_versions.index(version)
# before_pos = None
# if before is not None and before in all_versions:
# before_pos = all_versions.index(before)
#
# return all_versions[version_pos:before_pos]
. Output only the next line. | versions={FIREFOX_GUID: version_range('firefox', FF4_MIN), |
Given the code snippet: <|code_start|>"""Tests various aspects of the JS traverser."""
@mock.patch('validator.testcases.javascript.traverser.JSWrapper')
def test_js_traversal_error_reporting(JSWrapper):
"""Test that an internal error in JS traversal is correctly reported as
a system error."""
JSWrapper.side_effect = Exception('Inigo Montoya...')
<|code_end|>
, generate the next line using the imports in this file:
import mock
from .js_helper import _do_real_test_raw as _test_js
and context (functions, classes, or occasionally code) from other files:
# Path: tests/js_helper.py
# def _do_real_test_raw(script, path='foo.js', versions=None, detected_type=None,
# metadata=None, resources=None, jetpack=False):
# """Perform a JS test using a non-mock bundler."""
#
# err = ErrorBundle(for_appversions=versions or {})
# if detected_type:
# err.detected_type = detected_type
# if metadata is not None:
# err.metadata = metadata
# if resources is not None:
# err.resources = resources
# if jetpack:
# err.metadata['is_jetpack'] = True
#
# validator.testcases.content._process_file(err, MockXPI(), path, script,
# path.lower())
# return err
. Output only the next line. | err = _test_js('hello();', path='my_name_is.js') |
Given snippet: <|code_start|> signing_help='Add-ons which use `newtab-url-changed` to change '
'the new tab url are not allowed.')
# Return the addObserver handler and a general dangerous warning.
return {
'return': on_addObserver,
'dangerous': lambda a, t, e:
e.get_resource('em:bootstrap') and
'Authors of bootstrapped add-ons must take care '
'to remove any added observers '
'at shutdown.'
}
@register_entity('nsIPK11TokenDB.listTokens')
@register_entity('nsIPKCS11ModuleDB.listModules')
@register_entity('nsIPKCS11Module.listSlots')
def nsIPK11TokenDB(traverser):
traverser.err.warning(
err_id=('testcases_javascript_entity_values',
'nsIPKThings'),
warning='listTokens(), listModules() and listSlots() now return '
'nsISimpleEnumerator instead of nsIEnumerator.',
description=(
'listTokens(), listModules() and listSlots() now return '
'nsISimpleEnumerator instead of nsIEnumerator.'
'See %s for more information.' % BUGZILLA_BUG % 1220237),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from call_definitions import open_in_chrome_context
from instanceproperties import _set_HTML_property
from validator.compat import (
FX47_DEFINITION, FX48_DEFINITION, FX50_DEFINITION, FX51_DEFINITION,
FX52_DEFINITION, FX53_DEFINITION, FX54_DEFINITION)
from validator.constants import BUGZILLA_BUG, MDN_DOC
from validator.testcases.javascript.predefinedentities import (
CUSTOMIZATION_API_HELP)
and context:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
#
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
#
# FX50_DEFINITION = _build_definition(50, fennec=False, android=False, thunderbird=False)
#
# FX51_DEFINITION = _build_definition(51, fennec=False, android=False, thunderbird=False)
#
# FX52_DEFINITION = _build_definition(52, fennec=False, android=False, thunderbird=False)
#
# FX53_DEFINITION = _build_definition(53, fennec=False, android=False, thunderbird=False)
#
# FX54_DEFINITION = _build_definition(54, fennec=False, android=False, thunderbird=False)
which might include code, classes, or functions. Output only the next line. | for_appversions=FX47_DEFINITION, |
Next line prediction: <|code_start|> 'listTokens(), listModules() and listSlots() now return '
'nsISimpleEnumerator instead of nsIEnumerator.'
'See %s for more information.' % BUGZILLA_BUG % 1220237),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
for_appversions=FX47_DEFINITION,
compatibility_type='error',
context=traverser.context,
tier=5)
@register_entity('nsIIOService.newChannel')
@register_entity('nsIIOService.newChannelFromURI')
@register_entity('nsIIOService.newChannelFromURIWithProxyFlags')
def nsIIOService(traverser):
traverser.err.warning(
err_id=('testcases_javascript_entity_values',
'nsIIOService'),
warning=(
'The "newChannel" functions have been deprecated in favor of '
'their new versions (ending with 2).'),
description=(
'The "newChannel" functions have been deprecated in favor of '
'their new versions (ending with 2). '
'See %s for more information.'
% MDN_DOC % 'Mozilla/Tech/XPCOM/Reference/Interface/nsIIOService'),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
<|code_end|>
. Use current file imports:
(from call_definitions import open_in_chrome_context
from instanceproperties import _set_HTML_property
from validator.compat import (
FX47_DEFINITION, FX48_DEFINITION, FX50_DEFINITION, FX51_DEFINITION,
FX52_DEFINITION, FX53_DEFINITION, FX54_DEFINITION)
from validator.constants import BUGZILLA_BUG, MDN_DOC
from validator.testcases.javascript.predefinedentities import (
CUSTOMIZATION_API_HELP))
and context including class names, function names, or small code snippets from other files:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
#
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
#
# FX50_DEFINITION = _build_definition(50, fennec=False, android=False, thunderbird=False)
#
# FX51_DEFINITION = _build_definition(51, fennec=False, android=False, thunderbird=False)
#
# FX52_DEFINITION = _build_definition(52, fennec=False, android=False, thunderbird=False)
#
# FX53_DEFINITION = _build_definition(53, fennec=False, android=False, thunderbird=False)
#
# FX54_DEFINITION = _build_definition(54, fennec=False, android=False, thunderbird=False)
. Output only the next line. | for_appversions=FX48_DEFINITION, |
Continue the code snippet: <|code_start|> 'their new versions (ending with 2). '
'See %s for more information.'
% MDN_DOC % 'Mozilla/Tech/XPCOM/Reference/Interface/nsIIOService'),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
for_appversions=FX48_DEFINITION,
compatibility_type='warning',
context=traverser.context,
tier=5)
@register_entity('nsIX509Cert.getUsagesArray')
@register_entity('nsIX509Cert.requestUsagesArrayAsync')
@register_entity('nsIX509Cert.getUsagesString')
def nsIX509Cert(traverser):
traverser.err.warning(
err_id=('testcases_javascript_entity_values',
'nsIX509Cert'),
warning=(
'The methods getUsagesArray, requestUsagesArrayAsync, and '
'getUsagesString are no longer supported.'),
description=(
'The methods getUsagesArray, requestUsagesArrayAsync, and '
'getUsagesString are no longer supported.',
'See %s for more information.'
% BUGZILLA_BUG % 1284946),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
<|code_end|>
. Use current file imports:
from call_definitions import open_in_chrome_context
from instanceproperties import _set_HTML_property
from validator.compat import (
FX47_DEFINITION, FX48_DEFINITION, FX50_DEFINITION, FX51_DEFINITION,
FX52_DEFINITION, FX53_DEFINITION, FX54_DEFINITION)
from validator.constants import BUGZILLA_BUG, MDN_DOC
from validator.testcases.javascript.predefinedentities import (
CUSTOMIZATION_API_HELP)
and context (classes, functions, or code) from other files:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
#
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
#
# FX50_DEFINITION = _build_definition(50, fennec=False, android=False, thunderbird=False)
#
# FX51_DEFINITION = _build_definition(51, fennec=False, android=False, thunderbird=False)
#
# FX52_DEFINITION = _build_definition(52, fennec=False, android=False, thunderbird=False)
#
# FX53_DEFINITION = _build_definition(53, fennec=False, android=False, thunderbird=False)
#
# FX54_DEFINITION = _build_definition(54, fennec=False, android=False, thunderbird=False)
. Output only the next line. | for_appversions=FX50_DEFINITION, |
Given the code snippet: <|code_start|> 'The methods getUsagesArray, requestUsagesArrayAsync, and '
'getUsagesString are no longer supported.',
'See %s for more information.'
% BUGZILLA_BUG % 1284946),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
for_appversions=FX50_DEFINITION,
compatibility_type='error',
context=traverser.context,
tier=5)
@register_entity('mozIAsyncFavicons.setAndFetchFaviconForPage')
@register_entity('mozIAsyncFavicons.replaceFaviconDataFromDataURL')
def mozIAsyncFavicons(traverser):
traverser.err.warning(
err_id=('testcases_javascript_entity_values',
'mozIAsyncFavicons'),
warning=(
'The methods setAndFetchFaviconForPage and '
'replaceFaviconDataFromDataURL now default to using a null '
'principal for security reasons. An appropriate principal should '
'be passed if different behavior is required.'),
description=(
'See %s for more information.'
% BUGZILLA_BUG % 1227289),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
<|code_end|>
, generate the next line using the imports in this file:
from call_definitions import open_in_chrome_context
from instanceproperties import _set_HTML_property
from validator.compat import (
FX47_DEFINITION, FX48_DEFINITION, FX50_DEFINITION, FX51_DEFINITION,
FX52_DEFINITION, FX53_DEFINITION, FX54_DEFINITION)
from validator.constants import BUGZILLA_BUG, MDN_DOC
from validator.testcases.javascript.predefinedentities import (
CUSTOMIZATION_API_HELP)
and context (functions, classes, or occasionally code) from other files:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
#
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
#
# FX50_DEFINITION = _build_definition(50, fennec=False, android=False, thunderbird=False)
#
# FX51_DEFINITION = _build_definition(51, fennec=False, android=False, thunderbird=False)
#
# FX52_DEFINITION = _build_definition(52, fennec=False, android=False, thunderbird=False)
#
# FX53_DEFINITION = _build_definition(53, fennec=False, android=False, thunderbird=False)
#
# FX54_DEFINITION = _build_definition(54, fennec=False, android=False, thunderbird=False)
. Output only the next line. | for_appversions=FX51_DEFINITION, |
Predict the next line after this snippet: <|code_start|> 'The methods setAndFetchFaviconForPage and '
'replaceFaviconDataFromDataURL now default to using a null '
'principal for security reasons. An appropriate principal should '
'be passed if different behavior is required.'),
description=(
'See %s for more information.'
% BUGZILLA_BUG % 1227289),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
for_appversions=FX51_DEFINITION,
compatibility_type='warning',
context=traverser.context,
tier=5)
@register_entity('nsISupportsArray')
def nsISupportsArray(traverser):
traverser.err.warning(
err_id=('testcases_javascript_entity_values', 'nsISupportsArray'),
warning=(
'The nsISupportsArray interface is deprecated and is being '
'replaced by nsIArray.'),
description=(
'The nsISupportsArray interface is deprecated and is being '
'replaced by nsIArray. See %s for more information.'
% BUGZILLA_BUG % 792209),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
<|code_end|>
using the current file's imports:
from call_definitions import open_in_chrome_context
from instanceproperties import _set_HTML_property
from validator.compat import (
FX47_DEFINITION, FX48_DEFINITION, FX50_DEFINITION, FX51_DEFINITION,
FX52_DEFINITION, FX53_DEFINITION, FX54_DEFINITION)
from validator.constants import BUGZILLA_BUG, MDN_DOC
from validator.testcases.javascript.predefinedentities import (
CUSTOMIZATION_API_HELP)
and any relevant context from other files:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
#
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
#
# FX50_DEFINITION = _build_definition(50, fennec=False, android=False, thunderbird=False)
#
# FX51_DEFINITION = _build_definition(51, fennec=False, android=False, thunderbird=False)
#
# FX52_DEFINITION = _build_definition(52, fennec=False, android=False, thunderbird=False)
#
# FX53_DEFINITION = _build_definition(53, fennec=False, android=False, thunderbird=False)
#
# FX54_DEFINITION = _build_definition(54, fennec=False, android=False, thunderbird=False)
. Output only the next line. | for_appversions=FX52_DEFINITION, |
Predict the next line after this snippet: <|code_start|> err_id=('testcases_javascript_entity_values', 'nsISupportsArray'),
warning=(
'The nsISupportsArray interface is deprecated and is being '
'replaced by nsIArray.'),
description=(
'The nsISupportsArray interface is deprecated and is being '
'replaced by nsIArray. See %s for more information.'
% BUGZILLA_BUG % 792209),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
for_appversions=FX52_DEFINITION,
compatibility_type='warning',
context=traverser.context,
tier=5)
@register_entity('nsINavBookmarksService.getURIForKeyword')
def nsINavBookmarksService(traverser):
traverser.err.warning(
err_id=('testcases_javascript_entity_values',
'nsINavBookmarksService'),
warning='The getURIForKeyword function was removed.',
description=(
'You can use PlacesUtils.keywords.fetch instead. '
'See %s for more information.'
% BUGZILLA_BUG % 1329926),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
<|code_end|>
using the current file's imports:
from call_definitions import open_in_chrome_context
from instanceproperties import _set_HTML_property
from validator.compat import (
FX47_DEFINITION, FX48_DEFINITION, FX50_DEFINITION, FX51_DEFINITION,
FX52_DEFINITION, FX53_DEFINITION, FX54_DEFINITION)
from validator.constants import BUGZILLA_BUG, MDN_DOC
from validator.testcases.javascript.predefinedentities import (
CUSTOMIZATION_API_HELP)
and any relevant context from other files:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
#
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
#
# FX50_DEFINITION = _build_definition(50, fennec=False, android=False, thunderbird=False)
#
# FX51_DEFINITION = _build_definition(51, fennec=False, android=False, thunderbird=False)
#
# FX52_DEFINITION = _build_definition(52, fennec=False, android=False, thunderbird=False)
#
# FX53_DEFINITION = _build_definition(53, fennec=False, android=False, thunderbird=False)
#
# FX54_DEFINITION = _build_definition(54, fennec=False, android=False, thunderbird=False)
. Output only the next line. | for_appversions=FX53_DEFINITION, |
Here is a snippet: <|code_start|> 'nsIPK11TokenDB', 'findTokenByName'),
warning='Calling findTokenByName("") is no longer valid.',
description=(
'Calling findTokenByName("") is no longer valid. '
'If you need to determine if there\'s a master password '
'set, please see %s for more information.' % link),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
for_appversions=FX53_DEFINITION,
compatibility_type='error',
context=traverser.context,
tier=5)
return {'return': _return}
@register_entity(u'nsIFormHistory2')
def nsIFormHistory2(traverser):
traverser.err.warning(
err_id=('testcases_javascript_entity_values', 'nsIFormHistory2'),
warning=(
'The nsIFormHistory2 interface has been removed. You can use '
'FormHistory.jsm instead.'),
description=(
'The nsIFormHistory2 interface has been removed. You can use '
'FormHistory.jsm instead. See %s for more information.'
% BUGZILLA_BUG % 876002),
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
<|code_end|>
. Write the next line using the current file imports:
from call_definitions import open_in_chrome_context
from instanceproperties import _set_HTML_property
from validator.compat import (
FX47_DEFINITION, FX48_DEFINITION, FX50_DEFINITION, FX51_DEFINITION,
FX52_DEFINITION, FX53_DEFINITION, FX54_DEFINITION)
from validator.constants import BUGZILLA_BUG, MDN_DOC
from validator.testcases.javascript.predefinedentities import (
CUSTOMIZATION_API_HELP)
and context from other files:
# Path: validator/compat.py
# FX47_DEFINITION = _build_definition(47, fennec=False, android=False, thunderbird=False)
#
# FX48_DEFINITION = _build_definition(48, fennec=False, android=False, thunderbird=False)
#
# FX50_DEFINITION = _build_definition(50, fennec=False, android=False, thunderbird=False)
#
# FX51_DEFINITION = _build_definition(51, fennec=False, android=False, thunderbird=False)
#
# FX52_DEFINITION = _build_definition(52, fennec=False, android=False, thunderbird=False)
#
# FX53_DEFINITION = _build_definition(53, fennec=False, android=False, thunderbird=False)
#
# FX54_DEFINITION = _build_definition(54, fennec=False, android=False, thunderbird=False)
, which may include functions, classes, or code. Output only the next line. | for_appversions=FX54_DEFINITION, |
Next line prediction: <|code_start|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Model test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
class ModelTestCase(TestCase):
"""Test djangolg models."""
def setUp(self):
"""Create the necessary objects."""
credentials = models.Credential.objects.create(
name="test-credentials", type=models.Credential.CRED_TYPE_PASSWD,
username="test", password="test"
)
location = models.Location.objects.create(
name="test-location", sitecode="test01"
)
router = models.Router.objects.create(
hostname="test-router", dialect="ios",
location=location, credentials=credentials
)
models.Log.objects.create(
<|code_end|>
. Use current file imports:
(from django.test import TestCase
from djangolg import events, models)
and context including class names, function names, or small code snippets from other files:
# Path: djangolg/events.py
# EVENT_START = 0
# EVENT_QUERY_ACCEPT = 1
# EVENT_QUERY_REJECT = 2
# EVENT_QUERY_INVALID = 3
# EVENT_QUERY_FAILED = 4
# EVENT_QUERY_ERROR = 5
# EVENT_CHOICES = (
# (EVENT_START, "Session Started"),
# (EVENT_QUERY_ACCEPT, "Query Authorised"),
# (EVENT_QUERY_REJECT, "Query Rejected"),
# (EVENT_QUERY_INVALID, "Invalid Query"),
# (EVENT_QUERY_FAILED, "Query Execution Failed"),
# (EVENT_QUERY_ERROR, "Unhandled Error")
# )
#
# Path: djangolg/models.py
# class Router(models.Model):
# class Location(models.Model):
# class Credential(models.Model):
# class Log(models.Model):
# def label(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# CRED_TYPE_PASSWD = 0
# CRED_TYPE_PUBKEY = 1
# CRED_TYPE_CHOICES = (
# (CRED_TYPE_PASSWD, "Password"),
# (CRED_TYPE_PUBKEY, "Public Key"),
# )
. Output only the next line. | event=events.EVENT_START, |
Here is a snippet: <|code_start|># Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Model test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
class ModelTestCase(TestCase):
"""Test djangolg models."""
def setUp(self):
"""Create the necessary objects."""
<|code_end|>
. Write the next line using the current file imports:
from django.test import TestCase
from djangolg import events, models
and context from other files:
# Path: djangolg/events.py
# EVENT_START = 0
# EVENT_QUERY_ACCEPT = 1
# EVENT_QUERY_REJECT = 2
# EVENT_QUERY_INVALID = 3
# EVENT_QUERY_FAILED = 4
# EVENT_QUERY_ERROR = 5
# EVENT_CHOICES = (
# (EVENT_START, "Session Started"),
# (EVENT_QUERY_ACCEPT, "Query Authorised"),
# (EVENT_QUERY_REJECT, "Query Rejected"),
# (EVENT_QUERY_INVALID, "Invalid Query"),
# (EVENT_QUERY_FAILED, "Query Execution Failed"),
# (EVENT_QUERY_ERROR, "Unhandled Error")
# )
#
# Path: djangolg/models.py
# class Router(models.Model):
# class Location(models.Model):
# class Credential(models.Model):
# class Log(models.Model):
# def label(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# CRED_TYPE_PASSWD = 0
# CRED_TYPE_PUBKEY = 1
# CRED_TYPE_CHOICES = (
# (CRED_TYPE_PASSWD, "Password"),
# (CRED_TYPE_PUBKEY, "Public Key"),
# )
, which may include functions, classes, or code. Output only the next line. | credentials = models.Credential.objects.create( |
Using the snippet: <|code_start|># License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Base method class for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
class BaseMethod(object):
"""Base method class."""
name = None
title = None
new = False
description = None
target_field = None
options = None
test_target = None
def __init__(self, dialect=None):
"""Initialise new instance."""
if dialect:
<|code_end|>
, determine the next line of code. You have imports:
from djangolg import exceptions
from djangolg.dialects.base import BaseDialect
from djangolg.dialects.base import BaseDialect
and context (class names, function names, or code) available:
# Path: djangolg/exceptions.py
# DEFAULT_EVENT = events.EVENT_QUERY_ERROR
# DEFAULT_STATUS = 500
# DEFAULT_REASON = "An unhandled error occured. \
# Please try again or contact support."
# class LookingGlassError(Exception):
# class TypeCheckError(TypeError):
# def log_error(self):
# def check_type(instance=None, classinfo=None):
# def default_error_message(e=None):
. Output only the next line. | exceptions.check_type(instance=dialect, classinfo=BaseDialect) |
Using the snippet: <|code_start|># Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Types test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
class TypesTestCase(TestCase):
"""Test djangolg types."""
def test_ip_prefix_type(self):
"""Test IPPrefix type."""
network = '192.0.2.0/24'
host = '192.0.2.1'
<|code_end|>
, determine the next line of code. You have imports:
from django.test import TestCase
from djangolg import types
and context (class names, function names, or code) available:
# Path: djangolg/types.py
# class IPPrefix(object):
# HOST = 0
# PREFIX = 1
# def __init__(self, value):
# def __str__(self):
# def __unicode__(self):
. Output only the next line. | assert "{}".format(types.IPPrefix(network)) == network |
Based on the snippet: <|code_start|> return self._data['signed']
def validate(self, key, life=None):
"""Validate key value."""
if not life and settings.LIFETIME:
life = settings.LIFETIME
try:
clear = self.signer.unsign(key, max_age=life)
except SignatureExpired as e:
raise KeyValidityExpired("{}".format(e))
if self.clear == clear:
return True
else:
raise KeyValueMismatchError(keyval=clear, refval=self.clear)
def __str__(self):
"""Return string representation."""
return self.signed
def __unicode__(self):
"""Return string representation."""
return self.__str__() # pragma: no cover
class KeyValidationError(exceptions.LookingGlassError):
"""Generic exception raised if key validation fails."""
http_status = 401
http_reason = "An error occured during authorisation key validation. \
Please try again or contact support."
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.signing import SignatureExpired, TimestampSigner
from djangolg import events, exceptions, settings
and context (classes, functions, sometimes code) from other files:
# Path: djangolg/events.py
# EVENT_START = 0
# EVENT_QUERY_ACCEPT = 1
# EVENT_QUERY_REJECT = 2
# EVENT_QUERY_INVALID = 3
# EVENT_QUERY_FAILED = 4
# EVENT_QUERY_ERROR = 5
# EVENT_CHOICES = (
# (EVENT_START, "Session Started"),
# (EVENT_QUERY_ACCEPT, "Query Authorised"),
# (EVENT_QUERY_REJECT, "Query Rejected"),
# (EVENT_QUERY_INVALID, "Invalid Query"),
# (EVENT_QUERY_FAILED, "Query Execution Failed"),
# (EVENT_QUERY_ERROR, "Unhandled Error")
# )
#
# Path: djangolg/exceptions.py
# DEFAULT_EVENT = events.EVENT_QUERY_ERROR
# DEFAULT_STATUS = 500
# DEFAULT_REASON = "An unhandled error occured. \
# Please try again or contact support."
# class LookingGlassError(Exception):
# class TypeCheckError(TypeError):
# def log_error(self):
# def check_type(instance=None, classinfo=None):
# def default_error_message(e=None):
#
# Path: djangolg/settings.py
# NETNAME = getattr(settings, 'DJANGOLG_NETNAME', "Example Network")
# GENERAL_EMAIL = getattr(settings, 'DJANGOLG_GENERAL_EMAIL',
# "contact@example.com")
# SUPPORT_EMAIL = getattr(settings, 'DJANGOLG_SUPPORT_EMAIL', None)
# NOC_EMAIL = getattr(settings, 'DJANGOLG_NOC_EMAIL', None)
# PEERING_EMAIL = getattr(settings, 'DJANGOLG_PEERING_EMAIL', None)
# ROUTER_LABEL = getattr(
# settings, 'DJANGOLG_ROUTER_LABEL',
# lambda router:
# router.location.name if router.location else str(router)
# )
# LIFETIME = getattr(settings, 'DJANGOLG_LIFETIME', 300)
# MAX_REQUESTS = getattr(settings, 'DJANGOLG_MAX_REQUESTS', 20)
# AUP_LINK = getattr(settings, 'DJANGOLG_AUP_LINK', None)
# RECAPTCHA_ON = getattr(settings, 'DJANGOLG_RECAPTCHA_ON', False)
# RECAPTCHA_URL = getattr(settings, 'DJANGOLG_RECAPTCHA_URL',
# 'https://www.google.com/recaptcha/api/siteverify')
# RECAPTCHA_SITE_KEY = getattr(settings, 'DJANGOLG_RECAPTCHA_SITE_KEY', None)
# RECAPTCHA_SECRET_KEY = getattr(settings, 'DJANGOLG_RECAPTCHA_SECRET_KEY', None)
# BASE_TEMPLATE = getattr(settings, 'DJANGOLG_BASE_TEMPLATE',
# 'djangolg/base.html')
# LOGO = getattr(settings, 'DJANGOLG_LOGO', 'djangolg/img/logo.jpg')
# SMALL_LOGO = getattr(settings, 'DJANGOLG_SMALL_LOGO',
# 'djangolg/img/small_logo.jpg')
# FAVICON = getattr(settings, 'DJANGOLG_FAVICON', 'djangolg/img/favicon.ico')
# NAV_IMG = getattr(settings, 'DJANGOLG_NAV_IMG', None)
# FORMATTED_OUTPUT = getattr(settings, 'DJANGOLG_FORMATTED_OUTPUT', False)
# DEBUG = getattr(settings, 'DJANGOLG_DEBUG', False)
# _DEFAULT_METHODS = [
# 'djangolg.methods.bgp_prefix.BGPPrefixMethod',
# 'djangolg.methods.bgp_as_path.BGPASPathMethod',
# 'djangolg.methods.bgp_community.BGPCommunityMethod',
# 'djangolg.methods.ping.PingMethod',
# 'djangolg.methods.traceroute.TracerouteMethod',
# ]
# CUSTOM_METHODS = getattr(settings, 'DJANGOLG_CUSTOM_METHODS', [])
# METHODS = getattr(settings, 'DJANGOLG_METHODS',
# _DEFAULT_METHODS + CUSTOM_METHODS)
# _DEFAULT_DIALECTS = [
# 'djangolg.dialects.cisco_ios.CiscoIOSDialect',
# ]
# CUSTOM_DIALECTS = getattr(settings, 'DJANGOLG_CUSTOM_DIALECTS', [])
# DIALECTS = getattr(settings, 'DJANGOLG_DIALECTS',
# _DEFAULT_DIALECTS + CUSTOM_DIALECTS)
. Output only the next line. | log_event = events.EVENT_QUERY_REJECT |
Continue the code snippet: <|code_start|> """Get cleartext key value."""
return self._data['clear']
@property
def signed(self):
"""Get cyphertext key value."""
return self._data['signed']
def validate(self, key, life=None):
"""Validate key value."""
if not life and settings.LIFETIME:
life = settings.LIFETIME
try:
clear = self.signer.unsign(key, max_age=life)
except SignatureExpired as e:
raise KeyValidityExpired("{}".format(e))
if self.clear == clear:
return True
else:
raise KeyValueMismatchError(keyval=clear, refval=self.clear)
def __str__(self):
"""Return string representation."""
return self.signed
def __unicode__(self):
"""Return string representation."""
return self.__str__() # pragma: no cover
<|code_end|>
. Use current file imports:
from django.core.signing import SignatureExpired, TimestampSigner
from djangolg import events, exceptions, settings
and context (classes, functions, or code) from other files:
# Path: djangolg/events.py
# EVENT_START = 0
# EVENT_QUERY_ACCEPT = 1
# EVENT_QUERY_REJECT = 2
# EVENT_QUERY_INVALID = 3
# EVENT_QUERY_FAILED = 4
# EVENT_QUERY_ERROR = 5
# EVENT_CHOICES = (
# (EVENT_START, "Session Started"),
# (EVENT_QUERY_ACCEPT, "Query Authorised"),
# (EVENT_QUERY_REJECT, "Query Rejected"),
# (EVENT_QUERY_INVALID, "Invalid Query"),
# (EVENT_QUERY_FAILED, "Query Execution Failed"),
# (EVENT_QUERY_ERROR, "Unhandled Error")
# )
#
# Path: djangolg/exceptions.py
# DEFAULT_EVENT = events.EVENT_QUERY_ERROR
# DEFAULT_STATUS = 500
# DEFAULT_REASON = "An unhandled error occured. \
# Please try again or contact support."
# class LookingGlassError(Exception):
# class TypeCheckError(TypeError):
# def log_error(self):
# def check_type(instance=None, classinfo=None):
# def default_error_message(e=None):
#
# Path: djangolg/settings.py
# NETNAME = getattr(settings, 'DJANGOLG_NETNAME', "Example Network")
# GENERAL_EMAIL = getattr(settings, 'DJANGOLG_GENERAL_EMAIL',
# "contact@example.com")
# SUPPORT_EMAIL = getattr(settings, 'DJANGOLG_SUPPORT_EMAIL', None)
# NOC_EMAIL = getattr(settings, 'DJANGOLG_NOC_EMAIL', None)
# PEERING_EMAIL = getattr(settings, 'DJANGOLG_PEERING_EMAIL', None)
# ROUTER_LABEL = getattr(
# settings, 'DJANGOLG_ROUTER_LABEL',
# lambda router:
# router.location.name if router.location else str(router)
# )
# LIFETIME = getattr(settings, 'DJANGOLG_LIFETIME', 300)
# MAX_REQUESTS = getattr(settings, 'DJANGOLG_MAX_REQUESTS', 20)
# AUP_LINK = getattr(settings, 'DJANGOLG_AUP_LINK', None)
# RECAPTCHA_ON = getattr(settings, 'DJANGOLG_RECAPTCHA_ON', False)
# RECAPTCHA_URL = getattr(settings, 'DJANGOLG_RECAPTCHA_URL',
# 'https://www.google.com/recaptcha/api/siteverify')
# RECAPTCHA_SITE_KEY = getattr(settings, 'DJANGOLG_RECAPTCHA_SITE_KEY', None)
# RECAPTCHA_SECRET_KEY = getattr(settings, 'DJANGOLG_RECAPTCHA_SECRET_KEY', None)
# BASE_TEMPLATE = getattr(settings, 'DJANGOLG_BASE_TEMPLATE',
# 'djangolg/base.html')
# LOGO = getattr(settings, 'DJANGOLG_LOGO', 'djangolg/img/logo.jpg')
# SMALL_LOGO = getattr(settings, 'DJANGOLG_SMALL_LOGO',
# 'djangolg/img/small_logo.jpg')
# FAVICON = getattr(settings, 'DJANGOLG_FAVICON', 'djangolg/img/favicon.ico')
# NAV_IMG = getattr(settings, 'DJANGOLG_NAV_IMG', None)
# FORMATTED_OUTPUT = getattr(settings, 'DJANGOLG_FORMATTED_OUTPUT', False)
# DEBUG = getattr(settings, 'DJANGOLG_DEBUG', False)
# _DEFAULT_METHODS = [
# 'djangolg.methods.bgp_prefix.BGPPrefixMethod',
# 'djangolg.methods.bgp_as_path.BGPASPathMethod',
# 'djangolg.methods.bgp_community.BGPCommunityMethod',
# 'djangolg.methods.ping.PingMethod',
# 'djangolg.methods.traceroute.TracerouteMethod',
# ]
# CUSTOM_METHODS = getattr(settings, 'DJANGOLG_CUSTOM_METHODS', [])
# METHODS = getattr(settings, 'DJANGOLG_METHODS',
# _DEFAULT_METHODS + CUSTOM_METHODS)
# _DEFAULT_DIALECTS = [
# 'djangolg.dialects.cisco_ios.CiscoIOSDialect',
# ]
# CUSTOM_DIALECTS = getattr(settings, 'DJANGOLG_CUSTOM_DIALECTS', [])
# DIALECTS = getattr(settings, 'DJANGOLG_DIALECTS',
# _DEFAULT_DIALECTS + CUSTOM_DIALECTS)
. Output only the next line. | class KeyValidationError(exceptions.LookingGlassError): |
Predict the next line for this snippet: <|code_start|>
from __future__ import print_function
from __future__ import unicode_literals
class AuthKey(object):
"""Command authorisation key class."""
def __init__(self, value=None):
"""Initialise new AuthKey instance."""
self.signer = TimestampSigner(salt="auth")
self._data = {
'clear': value,
'signed': self.signer.sign(value)
}
@property
def clear(self):
"""Get cleartext key value."""
return self._data['clear']
@property
def signed(self):
"""Get cyphertext key value."""
return self._data['signed']
def validate(self, key, life=None):
"""Validate key value."""
<|code_end|>
with the help of current file imports:
from django.core.signing import SignatureExpired, TimestampSigner
from djangolg import events, exceptions, settings
and context from other files:
# Path: djangolg/events.py
# EVENT_START = 0
# EVENT_QUERY_ACCEPT = 1
# EVENT_QUERY_REJECT = 2
# EVENT_QUERY_INVALID = 3
# EVENT_QUERY_FAILED = 4
# EVENT_QUERY_ERROR = 5
# EVENT_CHOICES = (
# (EVENT_START, "Session Started"),
# (EVENT_QUERY_ACCEPT, "Query Authorised"),
# (EVENT_QUERY_REJECT, "Query Rejected"),
# (EVENT_QUERY_INVALID, "Invalid Query"),
# (EVENT_QUERY_FAILED, "Query Execution Failed"),
# (EVENT_QUERY_ERROR, "Unhandled Error")
# )
#
# Path: djangolg/exceptions.py
# DEFAULT_EVENT = events.EVENT_QUERY_ERROR
# DEFAULT_STATUS = 500
# DEFAULT_REASON = "An unhandled error occured. \
# Please try again or contact support."
# class LookingGlassError(Exception):
# class TypeCheckError(TypeError):
# def log_error(self):
# def check_type(instance=None, classinfo=None):
# def default_error_message(e=None):
#
# Path: djangolg/settings.py
# NETNAME = getattr(settings, 'DJANGOLG_NETNAME', "Example Network")
# GENERAL_EMAIL = getattr(settings, 'DJANGOLG_GENERAL_EMAIL',
# "contact@example.com")
# SUPPORT_EMAIL = getattr(settings, 'DJANGOLG_SUPPORT_EMAIL', None)
# NOC_EMAIL = getattr(settings, 'DJANGOLG_NOC_EMAIL', None)
# PEERING_EMAIL = getattr(settings, 'DJANGOLG_PEERING_EMAIL', None)
# ROUTER_LABEL = getattr(
# settings, 'DJANGOLG_ROUTER_LABEL',
# lambda router:
# router.location.name if router.location else str(router)
# )
# LIFETIME = getattr(settings, 'DJANGOLG_LIFETIME', 300)
# MAX_REQUESTS = getattr(settings, 'DJANGOLG_MAX_REQUESTS', 20)
# AUP_LINK = getattr(settings, 'DJANGOLG_AUP_LINK', None)
# RECAPTCHA_ON = getattr(settings, 'DJANGOLG_RECAPTCHA_ON', False)
# RECAPTCHA_URL = getattr(settings, 'DJANGOLG_RECAPTCHA_URL',
# 'https://www.google.com/recaptcha/api/siteverify')
# RECAPTCHA_SITE_KEY = getattr(settings, 'DJANGOLG_RECAPTCHA_SITE_KEY', None)
# RECAPTCHA_SECRET_KEY = getattr(settings, 'DJANGOLG_RECAPTCHA_SECRET_KEY', None)
# BASE_TEMPLATE = getattr(settings, 'DJANGOLG_BASE_TEMPLATE',
# 'djangolg/base.html')
# LOGO = getattr(settings, 'DJANGOLG_LOGO', 'djangolg/img/logo.jpg')
# SMALL_LOGO = getattr(settings, 'DJANGOLG_SMALL_LOGO',
# 'djangolg/img/small_logo.jpg')
# FAVICON = getattr(settings, 'DJANGOLG_FAVICON', 'djangolg/img/favicon.ico')
# NAV_IMG = getattr(settings, 'DJANGOLG_NAV_IMG', None)
# FORMATTED_OUTPUT = getattr(settings, 'DJANGOLG_FORMATTED_OUTPUT', False)
# DEBUG = getattr(settings, 'DJANGOLG_DEBUG', False)
# _DEFAULT_METHODS = [
# 'djangolg.methods.bgp_prefix.BGPPrefixMethod',
# 'djangolg.methods.bgp_as_path.BGPASPathMethod',
# 'djangolg.methods.bgp_community.BGPCommunityMethod',
# 'djangolg.methods.ping.PingMethod',
# 'djangolg.methods.traceroute.TracerouteMethod',
# ]
# CUSTOM_METHODS = getattr(settings, 'DJANGOLG_CUSTOM_METHODS', [])
# METHODS = getattr(settings, 'DJANGOLG_METHODS',
# _DEFAULT_METHODS + CUSTOM_METHODS)
# _DEFAULT_DIALECTS = [
# 'djangolg.dialects.cisco_ios.CiscoIOSDialect',
# ]
# CUSTOM_DIALECTS = getattr(settings, 'DJANGOLG_CUSTOM_DIALECTS', [])
# DIALECTS = getattr(settings, 'DJANGOLG_DIALECTS',
# _DEFAULT_DIALECTS + CUSTOM_DIALECTS)
, which may contain function names, class names, or code. Output only the next line. | if not life and settings.LIFETIME: |
Given the following code snippet before the placeholder: <|code_start|># Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Key test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
class KeyTestCase(TestCase):
"""Test djangolg keys."""
def test_validate_auth_key(self):
"""Test AuthKey validation."""
value = "good_value"
other_value = "bad_value"
<|code_end|>
, predict the next line using imports from the current file:
import time
from django.test import TestCase
from djangolg import keys
and context including class names, function names, and sometimes code from other files:
# Path: djangolg/keys.py
# class AuthKey(object):
# class KeyValidationError(exceptions.LookingGlassError):
# class KeyValueMismatchError(KeyValidationError):
# class KeyValidityExpired(KeyValidationError, SignatureExpired):
# def __init__(self, value=None):
# def clear(self):
# def signed(self):
# def validate(self, key, life=None):
# def __str__(self):
# def __unicode__(self):
# def __init__(self, keyval=None, refval=None, *args, **kwargs):
. Output only the next line. | key = keys.AuthKey(value=value) |
Given the following code snippet before the placeholder: <|code_start|># Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Helper functions for djangolg view classes."""
from __future__ import print_function
from __future__ import unicode_literals
def get_src(request=None):
"""Get the source address of a request."""
address = None
<|code_end|>
, predict the next line using imports from the current file:
from django.http import HttpRequest
from djangolg import exceptions
and context including class names, function names, and sometimes code from other files:
# Path: djangolg/exceptions.py
# DEFAULT_EVENT = events.EVENT_QUERY_ERROR
# DEFAULT_STATUS = 500
# DEFAULT_REASON = "An unhandled error occured. \
# Please try again or contact support."
# class LookingGlassError(Exception):
# class TypeCheckError(TypeError):
# def log_error(self):
# def check_type(instance=None, classinfo=None):
# def default_error_message(e=None):
. Output only the next line. | exceptions.check_type(request, HttpRequest) |
Continue the code snippet: <|code_start|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Exception test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
class ExceptionTestCase(TestCase):
"""Test djangolg custom exceptions."""
def test_type_check_helper(self):
"""Test check_type helper."""
class DummyClass(object): # noqa
"""A dummy class for testing purposes."""
pass
class DummySubClass(DummyClass): # noqa
"""A dummy subclass for testing purposes."""
pass
# these checks should pass
pass_dict = {
(0,): tuple, 0: int,
DummyClass: type, DummySubClass(): DummyClass,
}
for instance, classinfo in pass_dict.items():
<|code_end|>
. Use current file imports:
from django.test import TestCase
from djangolg import exceptions
and context (classes, functions, or code) from other files:
# Path: djangolg/exceptions.py
# DEFAULT_EVENT = events.EVENT_QUERY_ERROR
# DEFAULT_STATUS = 500
# DEFAULT_REASON = "An unhandled error occured. \
# Please try again or contact support."
# class LookingGlassError(Exception):
# class TypeCheckError(TypeError):
# def log_error(self):
# def check_type(instance=None, classinfo=None):
# def default_error_message(e=None):
. Output only the next line. | exceptions.check_type(instance=instance, classinfo=classinfo) |
Predict the next line for this snippet: <|code_start|># Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Field test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
class FieldTestCase(TestCase):
"""Test djangolg fields."""
def test_ip_prefix_field(self):
"""Test IPPrefixField deserialisation."""
ipv4_prefix = "192.0.2.0/24"
ipv6_prefix = "2001:db8::/32"
not_prefix = "A.B.C.D/L"
<|code_end|>
with the help of current file imports:
from django.core.exceptions import ValidationError
from django.test import TestCase
from djangolg import fields, models
from tests.models import DummyRouterWithLabel, DummyRouterWithoutLabel
and context from other files:
# Path: djangolg/fields.py
# class IPPrefixField(forms.CharField):
# class IPAddressField(forms.CharField):
# class RouterChoiceField(forms.ModelChoiceField):
# def to_python(self, value=None):
# def to_python(self, value=None):
# def label_from_instance(self, obj):
#
# Path: djangolg/models.py
# class Router(models.Model):
# class Location(models.Model):
# class Credential(models.Model):
# class Log(models.Model):
# def label(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# CRED_TYPE_PASSWD = 0
# CRED_TYPE_PUBKEY = 1
# CRED_TYPE_CHOICES = (
# (CRED_TYPE_PASSWD, "Password"),
# (CRED_TYPE_PUBKEY, "Public Key"),
# )
#
# Path: tests/models.py
# class DummyRouterWithLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# label = "router-label-string"
#
# class DummyRouterWithoutLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# @property
# def label(self):
# """Raise error."""
# raise NotImplementedError
, which may contain function names, class names, or code. Output only the next line. | field = fields.IPPrefixField() |
Using the snippet: <|code_start|> def test_ip_prefix_field(self):
"""Test IPPrefixField deserialisation."""
ipv4_prefix = "192.0.2.0/24"
ipv6_prefix = "2001:db8::/32"
not_prefix = "A.B.C.D/L"
field = fields.IPPrefixField()
assert field.to_python() is None
assert "{}".format(field.to_python(ipv4_prefix)) == ipv4_prefix
assert "{}".format(field.to_python(ipv6_prefix)) == ipv6_prefix
try:
field.to_python(not_prefix)
except Exception as e:
assert isinstance(e, ValidationError)
def test_ip_address_field(self):
"""Test IPAddressField deserialisation."""
ipv4_address = "192.0.2.1"
ipv6_address = "2001:db8::1"
not_address = "A.B.C.D"
field = fields.IPAddressField()
assert field.to_python() is None
assert "{}".format(field.to_python(ipv4_address)) == ipv4_address
assert "{}".format(field.to_python(ipv6_address)) == ipv6_address
try:
field.to_python(not_address)
except Exception as e:
assert isinstance(e, ValidationError)
def test_router_label(self):
"""Test router label rendering."""
<|code_end|>
, determine the next line of code. You have imports:
from django.core.exceptions import ValidationError
from django.test import TestCase
from djangolg import fields, models
from tests.models import DummyRouterWithLabel, DummyRouterWithoutLabel
and context (class names, function names, or code) available:
# Path: djangolg/fields.py
# class IPPrefixField(forms.CharField):
# class IPAddressField(forms.CharField):
# class RouterChoiceField(forms.ModelChoiceField):
# def to_python(self, value=None):
# def to_python(self, value=None):
# def label_from_instance(self, obj):
#
# Path: djangolg/models.py
# class Router(models.Model):
# class Location(models.Model):
# class Credential(models.Model):
# class Log(models.Model):
# def label(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# CRED_TYPE_PASSWD = 0
# CRED_TYPE_PUBKEY = 1
# CRED_TYPE_CHOICES = (
# (CRED_TYPE_PASSWD, "Password"),
# (CRED_TYPE_PUBKEY, "Public Key"),
# )
#
# Path: tests/models.py
# class DummyRouterWithLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# label = "router-label-string"
#
# class DummyRouterWithoutLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# @property
# def label(self):
# """Raise error."""
# raise NotImplementedError
. Output only the next line. | field = fields.RouterChoiceField(queryset=models.Router.objects.all()) |
Continue the code snippet: <|code_start|> """Test IPPrefixField deserialisation."""
ipv4_prefix = "192.0.2.0/24"
ipv6_prefix = "2001:db8::/32"
not_prefix = "A.B.C.D/L"
field = fields.IPPrefixField()
assert field.to_python() is None
assert "{}".format(field.to_python(ipv4_prefix)) == ipv4_prefix
assert "{}".format(field.to_python(ipv6_prefix)) == ipv6_prefix
try:
field.to_python(not_prefix)
except Exception as e:
assert isinstance(e, ValidationError)
def test_ip_address_field(self):
"""Test IPAddressField deserialisation."""
ipv4_address = "192.0.2.1"
ipv6_address = "2001:db8::1"
not_address = "A.B.C.D"
field = fields.IPAddressField()
assert field.to_python() is None
assert "{}".format(field.to_python(ipv4_address)) == ipv4_address
assert "{}".format(field.to_python(ipv6_address)) == ipv6_address
try:
field.to_python(not_address)
except Exception as e:
assert isinstance(e, ValidationError)
def test_router_label(self):
"""Test router label rendering."""
field = fields.RouterChoiceField(queryset=models.Router.objects.all())
<|code_end|>
. Use current file imports:
from django.core.exceptions import ValidationError
from django.test import TestCase
from djangolg import fields, models
from tests.models import DummyRouterWithLabel, DummyRouterWithoutLabel
and context (classes, functions, or code) from other files:
# Path: djangolg/fields.py
# class IPPrefixField(forms.CharField):
# class IPAddressField(forms.CharField):
# class RouterChoiceField(forms.ModelChoiceField):
# def to_python(self, value=None):
# def to_python(self, value=None):
# def label_from_instance(self, obj):
#
# Path: djangolg/models.py
# class Router(models.Model):
# class Location(models.Model):
# class Credential(models.Model):
# class Log(models.Model):
# def label(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# CRED_TYPE_PASSWD = 0
# CRED_TYPE_PUBKEY = 1
# CRED_TYPE_CHOICES = (
# (CRED_TYPE_PASSWD, "Password"),
# (CRED_TYPE_PUBKEY, "Public Key"),
# )
#
# Path: tests/models.py
# class DummyRouterWithLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# label = "router-label-string"
#
# class DummyRouterWithoutLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# @property
# def label(self):
# """Raise error."""
# raise NotImplementedError
. Output only the next line. | router_with_label = DummyRouterWithLabel(hostname="test-router") |
Based on the snippet: <|code_start|> ipv4_prefix = "192.0.2.0/24"
ipv6_prefix = "2001:db8::/32"
not_prefix = "A.B.C.D/L"
field = fields.IPPrefixField()
assert field.to_python() is None
assert "{}".format(field.to_python(ipv4_prefix)) == ipv4_prefix
assert "{}".format(field.to_python(ipv6_prefix)) == ipv6_prefix
try:
field.to_python(not_prefix)
except Exception as e:
assert isinstance(e, ValidationError)
def test_ip_address_field(self):
"""Test IPAddressField deserialisation."""
ipv4_address = "192.0.2.1"
ipv6_address = "2001:db8::1"
not_address = "A.B.C.D"
field = fields.IPAddressField()
assert field.to_python() is None
assert "{}".format(field.to_python(ipv4_address)) == ipv4_address
assert "{}".format(field.to_python(ipv6_address)) == ipv6_address
try:
field.to_python(not_address)
except Exception as e:
assert isinstance(e, ValidationError)
def test_router_label(self):
"""Test router label rendering."""
field = fields.RouterChoiceField(queryset=models.Router.objects.all())
router_with_label = DummyRouterWithLabel(hostname="test-router")
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.exceptions import ValidationError
from django.test import TestCase
from djangolg import fields, models
from tests.models import DummyRouterWithLabel, DummyRouterWithoutLabel
and context (classes, functions, sometimes code) from other files:
# Path: djangolg/fields.py
# class IPPrefixField(forms.CharField):
# class IPAddressField(forms.CharField):
# class RouterChoiceField(forms.ModelChoiceField):
# def to_python(self, value=None):
# def to_python(self, value=None):
# def label_from_instance(self, obj):
#
# Path: djangolg/models.py
# class Router(models.Model):
# class Location(models.Model):
# class Credential(models.Model):
# class Log(models.Model):
# def label(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# def __str__(self):
# def __unicode__(self):
# CRED_TYPE_PASSWD = 0
# CRED_TYPE_PUBKEY = 1
# CRED_TYPE_CHOICES = (
# (CRED_TYPE_PASSWD, "Password"),
# (CRED_TYPE_PUBKEY, "Public Key"),
# )
#
# Path: tests/models.py
# class DummyRouterWithLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# label = "router-label-string"
#
# class DummyRouterWithoutLabel(models.Router):
# """Dummy router subclass for testing."""
#
# class Meta:
# """Meta class."""
#
# proxy = True
#
# @property
# def label(self):
# """Raise error."""
# raise NotImplementedError
. Output only the next line. | router_without_label = DummyRouterWithoutLabel(hostname="test-router") |
Given snippet: <|code_start|> if stashed[0]:
return
if self.change_count > 1:
message = 'stashing {0} changes'
else:
message = 'stashing {0} change'
print(colored(
message.format(self.change_count),
'magenta'
))
try:
self._run('stash')
except GitError as e:
raise StashError(stderr=e.stderr, stdout=e.stdout)
stashed[0] = True
yield stash
if stashed[0]:
print(colored('unstashing', 'magenta'))
try:
self._run('stash', 'pop')
except GitError as e:
raise UnstashError(stderr=e.stderr, stdout=e.stdout)
def checkout(self, branch_name):
""" Checkout a branch by name. """
try:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import re
import subprocess
import platform
from contextlib import contextmanager
from termcolor import colored # Assume, colorama is already initialized
from git import GitCommandError, CheckoutError as OrigCheckoutError, Git
from PyGitUp.utils import find
and context:
# Path: PyGitUp/utils.py
# def find(seq, test):
# """ Return first item in sequence where test(item) == True """
# for item in seq:
# if test(item):
# return item
which might include code, classes, or functions. Output only the next line. | find(
|
Continue the code snippet: <|code_start|>
APP: Inquisition
DESC: Unit test for Revelation library
CREATION_DATE: 2017-11-17
"""
# MODULES
# | Native
# | Third-Party
# | Custom
# METADATA
__author__ = 'Joshua Carlson-Purcell'
__copyright__ = 'Copyright 2017, CarlsoNet'
__license__ = 'MIT'
__version__ = '1.0.0-alpha'
__maintainer__ = 'Joshua Carlson-Purcell'
__email__ = 'jcarlson@carlso.net'
__status__ = 'Development'
class RevelationTestCase(unittest.TestCase):
def setUp(self):
# generate config
cfg = configparser.ConfigParser()
cfg.read('build/tests/unit_tests_GOOD.cfg')
<|code_end|>
. Use current file imports:
import configparser
import unittest
from time import time
from lib.revelation.Revelation import Revelation
and context (classes, functions, or code) from other files:
# Path: lib/revelation/Revelation.py
# class Revelation(Inquisit):
# """
# Alert framework for use with analysis engines
# """
#
# alertStore = []
#
# def __init__(self, cfg, sentryClient=None):
# Inquisit.__init__(self, cfg, lgrName=__name__, sentryClient=sentryClient)
#
# def addAlertToDB(self, alert):
# """
# Add alert with given data to entry in Inquisition DB
#
# :param alert: alert object who's data we're adding to the db
# :return: bool
# """
#
# # set sql
# sql = """
# INSERT INTO
# Alerts
# (
# alert_type,
# host,
# src_node,
# dst_node,
# alert_detail,
# log_data
# )
# VALUES
# (
# %s,
# %s,
# %s,
# %s,
# %s,
# %s
# )
# """
#
# # run sql query
# with self.inquisitionDbHandle.cursor() as dbCursor:
# try:
# dbCursor.execute(sql, (alert.alertType, alert.host, alert.srcNode, alert.dstNode, alert.alertDetails,
# alert.logData))
# self.inquisitionDbHandle.commit()
# if self.getCfgValue(section='logging', name='verbose', defaultVal=False, dataType=bool):
# self.lgr.debug(
# 'successfully added alert ' + str(alert) + ' to Inquisition database')
# except err as e:
# self.inquisitionDbHandle.rollback()
# self.lgr.critical(
# 'database error when adding new alert ' + str(alert) + ' :: [ ' + str(e) + ' ]')
# finally:
# dbCursor.close()
#
# def addAlert(self, timestamp=0, alertType=1, status=0, host='127.0.0.1', srcNode='0.0.0.0', dstNode='0.0.0.0',
# alertDetails='', logData=None, serializeLogData=True, addAlertToDb=True):
# """
# Generate an alert with given parameters and make it persistent
# \
# :param timestamp: timestamp that alert was generated, in epoch time
# :param alertType: type of alert: 1 = host anomaly, 2 = traffic node anomaly, 3 = threat
# :param status: status of alert: 0 = NEW, 1 = ACKNOWLEDGED, 2 = RESOLVED
# :param host: host that generated the alert
# :param srcNode: source host that generated the alert
# :param dstNode: destination host that generated the alert
# :param alertDetails: blob of text constituting the additional details of the alert
# :param logData: a key-value pair representation of the log that generated the alert
# :param serializeLogData: bool denoting whether to serialize the data or not if it's already been serialized
# :param addAlertToDb: bool determining whether we should add the alert to the db along with the alert store
# :return: void
# """
#
# if timestamp < 0:
# raise ValueError('invalid alert timestamp provided :: [ ' + str(timestamp) + ' ]')
#
# if alertType < 1 or 3 < alertType:
# raise ValueError('invalid alert type provided :: [ ' + str(alertType) + ' ]')
#
# if status < 0 or 2 < status:
# raise ValueError('invalid alert status provided :: [ ' + str(status) + ' ]')
#
# if serializeLogData:
# # serialize log data as json for storage in db
# logData = json.dumps(logData)
#
# # create alert
# alert = Alert(timestamp=timestamp, alertType=alertType, status=status, host=host, srcNode=srcNode,
# dstNode=dstNode, alertDetails=alertDetails, logData=logData)
# self.lgr.debug('created new alert :: ' + str(alert))
#
# # add to alert store
# self.alertStore.append(alert)
#
# if addAlertToDb:
# # add alert in db
# self.addAlertToDB(alert=alert)
. Output only the next line. | self.revelation = Revelation(cfg=cfg) |
Given the following code snippet before the placeholder: <|code_start|>
"""
test-template.py
APP: Inquisition
DESC: Unit test for Template.py library
CREATION_DATE: 2017-04-28
"""
# MODULES
# | Native
# | Third-Party
# | Custom
# METADATA
__author__ = 'Joshua Carlson-Purcell'
__copyright__ = 'Copyright 2017, CarlsoNet'
__license__ = 'MIT'
__version__ = '1.0.0-alpha'
__maintainer__ = 'Joshua Carlson-Purcell'
__email__ = 'jcarlson@carlso.net'
__status__ = 'Development'
class TemplateTestCase(unittest.TestCase):
def setUp(self):
# generate template
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from lib.anatomize.Template import Template
and context including class names, function names, and sometimes code from other files:
# Path: lib/anatomize/Template.py
# class Template:
# """
# Logical representation of template with all the bells and whistles
# """
#
# templateID = 0
# templateName = ''
# field = ''
# rawRegex = ''
# regexGrp = 0
# regexMatchIdx = 0
# compiledRegex = None
#
# def __init__(self, templateID, field, regex, regexGrp=0, regexMatchIdx=0, templateName='Default'):
# self.templateID = templateID
# self.templateName = templateName
# self.field = field
#
# self.setRegex(regex, regexGrp, regexMatchIdx)
#
# def setRegex(self, regex, regexGrp, regexMatchIdx):
# """
# Sets raw and compiled regex from provided regex pattern, as well as associated matching options
#
# :param regex: raw regex pattern
# :param regexGrp: regex group number to match on
# :param regexMatchIdx: index of match to use
# :return: void
# """
#
# # set raw regex
# self.rawRegex = regex
#
# # set regex group number and match index
# regexGrp = int(regexGrp)
# regexMatchIdx = int(regexMatchIdx)
# if 0 <= regexGrp and 0 <= regexMatchIdx:
# self.regexGrp = regexGrp
# self.regexMatchIdx = regexMatchIdx
# else:
# raise ValueError('invalid regex match options :: [ GROUP: { ' + str(regexGrp) + ' } || MATCH IDX: { ' + str(regexMatchIdx) + ' } ]')
#
# # compile raw regex
# self.compiledRegex = re.compile(regex)
#
# def matchLogAgainstRegex(self, log):
# """
# Matches given log against set regex and returns the value
#
# :param log: raw log message
# :return: parsed string
# """
#
# matchedString = ''
#
# if not log:
# # no log provided
# raise ValueError('no log provided')
#
# # try regex matching
# regexMatches = self.compiledRegex.findall(log)
#
# # get specified match based on idx num
# regexMatch = regexMatches[self.regexMatchIdx]
#
# if regexMatch:
# # match found, see if we need to specify by group
# if isinstance(regexMatch, tuple):
# # multiple regex groups found in match - use specified group
# matchedString = regexMatch[self.regexGrp]
# else:
# # only one group found, use as match value
# matchedString = regexMatch
#
# # strip surrounding whitespace
# matchedString = matchedString.strip(" \t\n\r")
#
# return matchedString
#
# def __str__(self):
# """
# Override __str__special method to print template metadata when obj is treated as a string
#
# :return: str
# """
#
# return '[ TID: ' + str(self.templateID) + ' // NAME: ' + self.templateName + ' // FIELD: ' + self.field \
# + ' // REGEX: {{ ' + self.rawRegex + ' }} // GRP: { ' + str(self.regexGrp) + ' } || MATCH_IDX: { ' \
# + str(self.regexMatchIdx) + ' } ]'
. Output only the next line. | self.template = Template(1, 'timestamp', '^\d$') |
Using the snippet: <|code_start|># License for the specific language governing permissions and limitations
# under the License.
log = HMCClientLogger.HMCClientLogger(__name__)
ROOT = "LogicalPartition"
CONTENT_TYPE = "application/vnd.ibm.powervm.uom+xml;type=VirtualFibreChannelClientAdapter"
class ListVirtualFibreChannelClientAdapter:
"""
List the details of the virtual fibre channel adapter
for a given logical partition
"""
def __init__(self):
"""
assign the root and content_type for request
"""
self.root = ROOT
self.content_type = CONTENT_TYPE
def list_virtualfibrechannel_clientadapter(self, ip, logicalpartition_id, x_api_session):
"""
returns the list of virtual fibre channel adapter available in the
client partition
Args:
ip : ip address of HMC
logicalpartition_id : UUID of the Logical Partition
x_api_session : session to be used
"""
log.log_debug("fc adarpter object list is started")
<|code_end|>
, determine the next line of code. You have imports:
from src.common import ListModule
from src.utility import HMCClientLogger
and context (class names, function names, or code) available:
# Path: src/common/ListModule.py
# class ListModule:
# """
# called in Listing operation to get the object list
# of ManagementConsole,ManagedSystem,LogicalPartition,
# LogicalPartitionProfile,VirtualIOServer and other objects.
# """
# log_object = HMCClientLogger.HMCClientLogger(__name__)
#
# def listing(self, service, ip, root, content_type, Resource, session_id, uuid=None):
#
# """
# Makes an HTTPRequest to get the details of the
# corresponding object and store the response content
# into a python object.
# Args:
# service:uom or web
# ip:ip address of the hmc
# root:root element in rest uri
# content_type:type of object to be extracted
# (logicalpartition,logicalpartitionprofile,
# ManagedSystem,VirtualIOServer and other objects)
# session_id:to access the session
# uuid:root unique id
# Returns:
# list of corresponding objects
# """
# #for ManagementConsole the uuid is none and for other types the uri is appended with roots uuid
# obj_list = []
# xml_object = None
# headers_obj = HmcHeaders.HmcHeaders(service)
# ns = headers_obj.ns
# request_obj = HTTPClient.HTTPClient(service, ip, root, content_type, session_id)
# if uuid == None:
# request_obj.HTTPGet()
# else:
# request_obj.HTTPGet(append=str(uuid)+"/"+Resource)
# if request_obj.response_b:
# root = etree.fromstring(request_obj.response.text)
# entries = root.findall(".//%s:%s"%(Resource,Resource),
# namespaces={"%s" %(Resource): ns["xmlns"]})
# for entry in entries:
# if entry.getchildren() != []:
# xmlstring = etree.tostring(entry)
# xml_object = UOM.CreateFromDocument(xmlstring)
# obj_list.append(xml_object)
# return obj_list
#
# Path: src/utility/HMCClientLogger.py
# class HMCClientLogger:
# def __init__(self,module_name):
# self.logger_module = logging.getLogger(module_name)
# self.logger_module.setLevel(logging.DEBUG)
#
# # create file handler which logs even debug messages
# if not os.path.exists("output/Log"):
# os.makedirs("output/Log")
# filehandler = logging.FileHandler('output/Log/Debug.log')
# filehandler.setLevel(logging.DEBUG)
# consolehandler=logging.StreamHandler()
# consolehandler.setLevel(logging.WARNING)
# # create formatter and add it to the handlers
# fileformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %I:%M:%S %p')
# consoleformatter = logging.Formatter(' %(levelname)s : %(message)s')
# filehandler.setFormatter(fileformatter)
# consolehandler.setFormatter(consoleformatter)
# # add the handlers to logger
# self.logger_module.addHandler(filehandler)
# self.logger_module.addHandler(consolehandler)
#
# def log_debug(self,message):
# self.logger_module.debug(message)
# def log_info(self,message):
# self.logger_module.info(message)
# def log_error(self,message):
# self.logger_module.error(message)
# def log_warn(self,message):
# self.logger_module.warn(message)
. Output only the next line. | list_object = ListModule.ListModule() |
Predict the next line after this snippet: <|code_start|># Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
log_object = HMCClientLogger.HMCClientLogger(__name__)
class SelectManagedSystem:
""" selects required managed system from src.list """
def get_managedsystem_uuid(self, ip, x_api_session):
""" get uuid for required managed system"""
print("\nAvailable ManagedSystems : ")
self.managedsystem_uuid = ""
<|code_end|>
using the current file's imports:
from src.managed_system import ListManagedSystem
from src.utility import HMCClientLogger
and any relevant context from other files:
# Path: src/managed_system/ListManagedSystem.py
# class ListManagedSystem(object):
#
# """"
# lists the details of Managed System
# """
#
# def __init__(self):
# """
# Initializes the content_type
# """
#
# self.content_type = CONTENT_TYPE
#
#
# def list_ManagedSystem(self, ip, session_id):
# """
# collects the xml content of the managed system and
# returns a reference to it
# Args:
# ip : ip address of HMC
# session_id : session to be used
# """
# log_object.log_debug("List of ManagedSystem started")
# listing_object = ListModule.ListModule()
# #call to get the xml content of managed system #
# self.object_list = listing_object.listing("uom", ip,
# "ManagedSystem",
# self.content_type, "ManagedSystem", session_id)
# log_object.log_debug("Returns ManagedSystem objects to"
# "the main module")
# return self.object_list
#
# def print_managedsystem_attributes(self, choice):
# """
# Prints the quick property values from src.the retrieved xml content
#
# Args:
# choice:represents user selected choice of specific
# managed system
# """
# object_list=self.object_list[choice]
# print("\n")
# print("ManagedSystemName".ljust(35), ":",
# object_list.SystemName.value())
# print("ManagedSystem ID".ljust(35), ":",
# object_list.Metadata.Atom.AtomID.value())
# print("MachineType".ljust(35), ":",
# object_list.MachineTypeModelAndSerialNumber.MachineType.value())
# print("Model".ljust(35), ":",
# object_list.MachineTypeModelAndSerialNumber.Model.value())
# print("IPAddress".ljust(35), ":", object_list.PrimaryIPAddress.value())
# print("SystemState".ljust(35), ":", object_list.State.value())
# print("SerialNumber".ljust(35), ":",
# object_list.MachineTypeModelAndSerialNumber.SerialNumber.value())
# print("PhysicalSystemAttentionLEDState".ljust(35), ":",
# object_list.PhysicalSystemAttentionLEDState.value())
#
# Path: src/utility/HMCClientLogger.py
# class HMCClientLogger:
# def __init__(self,module_name):
# self.logger_module = logging.getLogger(module_name)
# self.logger_module.setLevel(logging.DEBUG)
#
# # create file handler which logs even debug messages
# if not os.path.exists("output/Log"):
# os.makedirs("output/Log")
# filehandler = logging.FileHandler('output/Log/Debug.log')
# filehandler.setLevel(logging.DEBUG)
# consolehandler=logging.StreamHandler()
# consolehandler.setLevel(logging.WARNING)
# # create formatter and add it to the handlers
# fileformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %I:%M:%S %p')
# consoleformatter = logging.Formatter(' %(levelname)s : %(message)s')
# filehandler.setFormatter(fileformatter)
# consolehandler.setFormatter(consoleformatter)
# # add the handlers to logger
# self.logger_module.addHandler(filehandler)
# self.logger_module.addHandler(consolehandler)
#
# def log_debug(self,message):
# self.logger_module.debug(message)
# def log_info(self,message):
# self.logger_module.info(message)
# def log_error(self,message):
# self.logger_module.error(message)
# def log_warn(self,message):
# self.logger_module.warn(message)
. Output only the next line. | managedsystem_object = ListManagedSystem.ListManagedSystem() |
Predict the next line for this snippet: <|code_start|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
log = HMCClientLogger.HMCClientLogger(__name__)
ROOT = "Cluster"
CONTENT_TYPE = "application/vnd.ibm.powervm.uom+xml; type=Cluster"
class ListCluster:
"""
List the details of the cluster
"""
def __init__(self):
"""
assign the root and content_type for request
"""
self.root = ROOT
self.content_type = CONTENT_TYPE
def list_cluster(self, ip, x_api_session):
"""
returns the List of available Cluster objects
Args:
ip : ip address of HMC
x_api_session : session to be used
"""
log.log_debug("cluster object list is started")
<|code_end|>
with the help of current file imports:
from src.common import ListModule
from src.utility import HMCClientLogger
and context from other files:
# Path: src/common/ListModule.py
# class ListModule:
# """
# called in Listing operation to get the object list
# of ManagementConsole,ManagedSystem,LogicalPartition,
# LogicalPartitionProfile,VirtualIOServer and other objects.
# """
# log_object = HMCClientLogger.HMCClientLogger(__name__)
#
# def listing(self, service, ip, root, content_type, Resource, session_id, uuid=None):
#
# """
# Makes an HTTPRequest to get the details of the
# corresponding object and store the response content
# into a python object.
# Args:
# service:uom or web
# ip:ip address of the hmc
# root:root element in rest uri
# content_type:type of object to be extracted
# (logicalpartition,logicalpartitionprofile,
# ManagedSystem,VirtualIOServer and other objects)
# session_id:to access the session
# uuid:root unique id
# Returns:
# list of corresponding objects
# """
# #for ManagementConsole the uuid is none and for other types the uri is appended with roots uuid
# obj_list = []
# xml_object = None
# headers_obj = HmcHeaders.HmcHeaders(service)
# ns = headers_obj.ns
# request_obj = HTTPClient.HTTPClient(service, ip, root, content_type, session_id)
# if uuid == None:
# request_obj.HTTPGet()
# else:
# request_obj.HTTPGet(append=str(uuid)+"/"+Resource)
# if request_obj.response_b:
# root = etree.fromstring(request_obj.response.text)
# entries = root.findall(".//%s:%s"%(Resource,Resource),
# namespaces={"%s" %(Resource): ns["xmlns"]})
# for entry in entries:
# if entry.getchildren() != []:
# xmlstring = etree.tostring(entry)
# xml_object = UOM.CreateFromDocument(xmlstring)
# obj_list.append(xml_object)
# return obj_list
#
# Path: src/utility/HMCClientLogger.py
# class HMCClientLogger:
# def __init__(self,module_name):
# self.logger_module = logging.getLogger(module_name)
# self.logger_module.setLevel(logging.DEBUG)
#
# # create file handler which logs even debug messages
# if not os.path.exists("output/Log"):
# os.makedirs("output/Log")
# filehandler = logging.FileHandler('output/Log/Debug.log')
# filehandler.setLevel(logging.DEBUG)
# consolehandler=logging.StreamHandler()
# consolehandler.setLevel(logging.WARNING)
# # create formatter and add it to the handlers
# fileformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %I:%M:%S %p')
# consoleformatter = logging.Formatter(' %(levelname)s : %(message)s')
# filehandler.setFormatter(fileformatter)
# consolehandler.setFormatter(consoleformatter)
# # add the handlers to logger
# self.logger_module.addHandler(filehandler)
# self.logger_module.addHandler(consolehandler)
#
# def log_debug(self,message):
# self.logger_module.debug(message)
# def log_info(self,message):
# self.logger_module.info(message)
# def log_error(self,message):
# self.logger_module.error(message)
# def log_warn(self,message):
# self.logger_module.warn(message)
, which may contain function names, class names, or code. Output only the next line. | list_object = ListModule.ListModule() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.