Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Continue the code snippet: <|code_start|> pm.Model.__init__(self, name)
__init__(self, *args, **kwargs)
return wrapped
# wrap new for new class
def wrap_new(__new__):
@functools.wraps(__new__)
def wrapped(_cls_, *args, **kwargs):
parent = kwargs.get('model', None)
if parent is None and not issubclass(_cls_, lasagne.layers.InputLayer):
incoming = kwargs.get('incoming',
kwargs.get('incomings',
args[1]))
parent = find_parent(incoming)
kwargs['model'] = parent
instance = __new__(_cls_, *args, **kwargs)
return instance
return classmethod(wrapped)
cls.__init__ = wrap_init(cls.__init__)
cls.__new__ = wrap_new(cls.__new__)
def add_param(self, spec, shape, name=None, **tags):
if tags.get('trainable', True):
if tags.get('regularizable', True):
if not isinstance(spec, DistSpec):
# here spec is like test value
# passed to pymc3 distribution
spec = getattr(self, 'default_spec', get_default_spec(spec))
else:
<|code_end|>
. Use current file imports:
import functools
import inspect
import six
import lasagne.layers.base
import pymc3 as pm
from pymc3.memoize import hashable
from gelato.specs.dist import get_default_spec, FlatSpec
from gelato.specs.base import DistSpec
from gelato.layers.helper import find_parent
and context (classes, functions, or code) from other files:
# Path: gelato/specs/dist.py
# def get_default_spec(testval=None):
# # to avoid init collision
# cp = copy.deepcopy(_default_spec)
# if testval is None and cp.testval is None:
# cp.testval = get_default_testval()
# elif testval is not None:
# cp.testval = testval
# else:
# pass
# return cp
#
# class FlatSpec(PartialSpec):
# spec = pm.Flat
# __doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
# dist=spec.__name__,
# doc=spec.__doc__
# )
#
# def __init__(self):
# super(FlatSpec, self).__init__(testval=init.Uniform(1))
#
# Path: gelato/specs/base.py
# class DistSpec(SpecVar):
# """Spec based on pymc3 distributions
#
# All specs support lazy evaluation, see Usage
#
# Parameters
# ----------
# distcls : pymc3.Distribution
# args : args for `distcls`
# kwargs : kwargs for `distcls`
#
# Usage
# -----
# >>> spec = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))
# >>> spec += (NormalSpec() + LaplaceSpec()) / 100 - NormalSpec()
# >>> with Model():
# ... prior_expr = spec((10, 10), name='silly_prior')
#
# """
# def __init__(self, distcls, *args, **kwargs):
# if not isinstance(distcls, type) and issubclass(distcls, pm.Distribution):
# raise ValueError('We can deal with pymc3 '
# 'distributions only, got {!r} instead'
# .format(distcls))
# self.testval = kwargs.pop('testval', None)
# self.tag = kwargs.get('tag', 'default')
# self.args = args
# self.kwargs = kwargs
# self.distcls = distcls
#
# def __call__(self, shape, name=None, memo=None):
# memo, shape = self._prepare(memo, shape)
# if name is None:
# name = self.auto()
# shape, tag = self._get_shape(shape)
# if id(self) ^ hash(tag) in memo:
# return memo[id(self) ^ hash(tag)]
# model = pm.modelcontext(None)
# called_args = self._call_args(self.args, name, shape, memo)
# called_kwargs = self._call_kwargs(self.kwargs, name, shape, memo)
# called_kwargs.update(shape=shape['default'])
# val = model.Var(
# name, self.distcls.dist(
# *called_args,
# dtype=theano.config.floatX,
# **called_kwargs
# ),
# )
# if self.testval is None:
# val.tag.test_value = get_default_testval()(shape['default']).astype(val.dtype)
# elif isinstance(self.testval, str) and self.testval == 'random':
# val.tag.test_value = val.random(size=shape['default']).astype(val.dtype)
# else:
# val.tag.test_value = self.testval(shape['default']).astype(val.dtype)
# memo[id(self) ^ hash(tag)] = val
# return memo[id(self) ^ hash(tag)]
#
# def __repr__(self):
# if self._shape != -1:
# sh = '; '+str(self._shape)
# else:
# sh = ''
# template = '<{cls}: {args!r}; {kwargs!r}'+sh+'>'
# return template.format(cls=self.distcls.__name__,
# args=self.args,
# kwargs=self.kwargs)
. Output only the next line. | spec = FlatSpec() |
Continue the code snippet: <|code_start|> def wrap_init(__init__):
@functools.wraps(__init__)
def wrapped(self, *args, **kwargs):
name = kwargs.get('name')
self._fingerprint = hashable(self.parent)
pm.Model.__init__(self, name)
__init__(self, *args, **kwargs)
return wrapped
# wrap new for new class
def wrap_new(__new__):
@functools.wraps(__new__)
def wrapped(_cls_, *args, **kwargs):
parent = kwargs.get('model', None)
if parent is None and not issubclass(_cls_, lasagne.layers.InputLayer):
incoming = kwargs.get('incoming',
kwargs.get('incomings',
args[1]))
parent = find_parent(incoming)
kwargs['model'] = parent
instance = __new__(_cls_, *args, **kwargs)
return instance
return classmethod(wrapped)
cls.__init__ = wrap_init(cls.__init__)
cls.__new__ = wrap_new(cls.__new__)
def add_param(self, spec, shape, name=None, **tags):
if tags.get('trainable', True):
if tags.get('regularizable', True):
<|code_end|>
. Use current file imports:
import functools
import inspect
import six
import lasagne.layers.base
import pymc3 as pm
from pymc3.memoize import hashable
from gelato.specs.dist import get_default_spec, FlatSpec
from gelato.specs.base import DistSpec
from gelato.layers.helper import find_parent
and context (classes, functions, or code) from other files:
# Path: gelato/specs/dist.py
# def get_default_spec(testval=None):
# # to avoid init collision
# cp = copy.deepcopy(_default_spec)
# if testval is None and cp.testval is None:
# cp.testval = get_default_testval()
# elif testval is not None:
# cp.testval = testval
# else:
# pass
# return cp
#
# class FlatSpec(PartialSpec):
# spec = pm.Flat
# __doc__ = """Gelato DistSpec with {dist} prior\n\n{doc}""".format(
# dist=spec.__name__,
# doc=spec.__doc__
# )
#
# def __init__(self):
# super(FlatSpec, self).__init__(testval=init.Uniform(1))
#
# Path: gelato/specs/base.py
# class DistSpec(SpecVar):
# """Spec based on pymc3 distributions
#
# All specs support lazy evaluation, see Usage
#
# Parameters
# ----------
# distcls : pymc3.Distribution
# args : args for `distcls`
# kwargs : kwargs for `distcls`
#
# Usage
# -----
# >>> spec = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))
# >>> spec += (NormalSpec() + LaplaceSpec()) / 100 - NormalSpec()
# >>> with Model():
# ... prior_expr = spec((10, 10), name='silly_prior')
#
# """
# def __init__(self, distcls, *args, **kwargs):
# if not isinstance(distcls, type) and issubclass(distcls, pm.Distribution):
# raise ValueError('We can deal with pymc3 '
# 'distributions only, got {!r} instead'
# .format(distcls))
# self.testval = kwargs.pop('testval', None)
# self.tag = kwargs.get('tag', 'default')
# self.args = args
# self.kwargs = kwargs
# self.distcls = distcls
#
# def __call__(self, shape, name=None, memo=None):
# memo, shape = self._prepare(memo, shape)
# if name is None:
# name = self.auto()
# shape, tag = self._get_shape(shape)
# if id(self) ^ hash(tag) in memo:
# return memo[id(self) ^ hash(tag)]
# model = pm.modelcontext(None)
# called_args = self._call_args(self.args, name, shape, memo)
# called_kwargs = self._call_kwargs(self.kwargs, name, shape, memo)
# called_kwargs.update(shape=shape['default'])
# val = model.Var(
# name, self.distcls.dist(
# *called_args,
# dtype=theano.config.floatX,
# **called_kwargs
# ),
# )
# if self.testval is None:
# val.tag.test_value = get_default_testval()(shape['default']).astype(val.dtype)
# elif isinstance(self.testval, str) and self.testval == 'random':
# val.tag.test_value = val.random(size=shape['default']).astype(val.dtype)
# else:
# val.tag.test_value = self.testval(shape['default']).astype(val.dtype)
# memo[id(self) ^ hash(tag)] = val
# return memo[id(self) ^ hash(tag)]
#
# def __repr__(self):
# if self._shape != -1:
# sh = '; '+str(self._shape)
# else:
# sh = ''
# template = '<{cls}: {args!r}; {kwargs!r}'+sh+'>'
# return template.format(cls=self.distcls.__name__,
# args=self.args,
# kwargs=self.kwargs)
. Output only the next line. | if not isinstance(spec, DistSpec): |
Continue the code snippet: <|code_start|>__module = sys.modules[__name__]
del sys
__all__ = []
for obj_name in __cloned.__all__:
try:
<|code_end|>
. Use current file imports:
import sys
import lasagne.layers.dense as __cloned
from .base import bayes as __bayes
and context (classes, functions, or code) from other files:
# Path: gelato/layers/base.py
# def bayes(layercls, stack=1):
# try:
# issubcls = issubclass(layercls, lasagne.layers.base.Layer)
# except TypeError:
# raise TypeError('{} needs to be a Layer subclass'
# .format(layercls))
# if issubcls:
# if type(layercls) is LayerModelMeta:
# raise TypeError('{} is already bayesian'
# .format(layercls))
# else:
# @six.add_metaclass(LayerModelMeta)
# class BayesianAnalog(layercls, pm.Model):
# pass
# frm = inspect.stack()[stack]
# mod = inspect.getmodule(frm[0])
# if mod is None:
# modname = '__main__'
# else:
# modname = mod.__name__
# BayesianAnalog.__module__ = modname
# BayesianAnalog.__doc__ = layercls.__doc__
# BayesianAnalog.__name__ = layercls.__name__
# return BayesianAnalog
# else:
# raise TypeError('{} needs to be a Layer subclass'
# .format(layercls))
. Output only the next line. | setattr(__module, obj_name, __bayes(getattr(__cloned, obj_name))) |
Predict the next line for this snippet: <|code_start|>
__all__ = [
'get_default_spec',
'set_default_spec',
'PartialSpec',
'UniformSpec',
'FlatSpec',
'NormalSpec',
'BetaSpec',
'ExponentialSpec',
'LaplaceSpec',
'StudentTSpec',
'CauchySpec',
'HalfCauchySpec',
'GammaSpec',
'WeibullSpec',
'LognormalSpec',
'ChiSquaredSpec',
'HalfNormalSpec',
'WaldSpec',
'ParetoSpec',
'InverseGammaSpec',
'ExGaussianSpec',
'VonMisesSpec',
'SkewNormalSpec',
# 'HalfStudentTSpec',
# 'NormalMixtureSpec'
]
<|code_end|>
with the help of current file imports:
import copy
import pymc3 as pm
from lasagne import init
from gelato.specs.base import DistSpec, get_default_testval, smart_init
and context from other files:
# Path: gelato/specs/base.py
# class DistSpec(SpecVar):
# """Spec based on pymc3 distributions
#
# All specs support lazy evaluation, see Usage
#
# Parameters
# ----------
# distcls : pymc3.Distribution
# args : args for `distcls`
# kwargs : kwargs for `distcls`
#
# Usage
# -----
# >>> spec = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))
# >>> spec += (NormalSpec() + LaplaceSpec()) / 100 - NormalSpec()
# >>> with Model():
# ... prior_expr = spec((10, 10), name='silly_prior')
#
# """
# def __init__(self, distcls, *args, **kwargs):
# if not isinstance(distcls, type) and issubclass(distcls, pm.Distribution):
# raise ValueError('We can deal with pymc3 '
# 'distributions only, got {!r} instead'
# .format(distcls))
# self.testval = kwargs.pop('testval', None)
# self.tag = kwargs.get('tag', 'default')
# self.args = args
# self.kwargs = kwargs
# self.distcls = distcls
#
# def __call__(self, shape, name=None, memo=None):
# memo, shape = self._prepare(memo, shape)
# if name is None:
# name = self.auto()
# shape, tag = self._get_shape(shape)
# if id(self) ^ hash(tag) in memo:
# return memo[id(self) ^ hash(tag)]
# model = pm.modelcontext(None)
# called_args = self._call_args(self.args, name, shape, memo)
# called_kwargs = self._call_kwargs(self.kwargs, name, shape, memo)
# called_kwargs.update(shape=shape['default'])
# val = model.Var(
# name, self.distcls.dist(
# *called_args,
# dtype=theano.config.floatX,
# **called_kwargs
# ),
# )
# if self.testval is None:
# val.tag.test_value = get_default_testval()(shape['default']).astype(val.dtype)
# elif isinstance(self.testval, str) and self.testval == 'random':
# val.tag.test_value = val.random(size=shape['default']).astype(val.dtype)
# else:
# val.tag.test_value = self.testval(shape['default']).astype(val.dtype)
# memo[id(self) ^ hash(tag)] = val
# return memo[id(self) ^ hash(tag)]
#
# def __repr__(self):
# if self._shape != -1:
# sh = '; '+str(self._shape)
# else:
# sh = ''
# template = '<{cls}: {args!r}; {kwargs!r}'+sh+'>'
# return template.format(cls=self.distcls.__name__,
# args=self.args,
# kwargs=self.kwargs)
#
# def get_default_testval():
# return _default_testval
#
# def smart_init(shape):
# if len(shape) > 1:
# return init.GlorotUniform()(shape)
# else:
# return init.Normal()(shape)
, which may contain function names, class names, or code. Output only the next line. | _default_spec = DistSpec(pm.Normal, mu=0, sd=10, testval=smart_init) |
Using the snippet: <|code_start|> 'FlatSpec',
'NormalSpec',
'BetaSpec',
'ExponentialSpec',
'LaplaceSpec',
'StudentTSpec',
'CauchySpec',
'HalfCauchySpec',
'GammaSpec',
'WeibullSpec',
'LognormalSpec',
'ChiSquaredSpec',
'HalfNormalSpec',
'WaldSpec',
'ParetoSpec',
'InverseGammaSpec',
'ExGaussianSpec',
'VonMisesSpec',
'SkewNormalSpec',
# 'HalfStudentTSpec',
# 'NormalMixtureSpec'
]
_default_spec = DistSpec(pm.Normal, mu=0, sd=10, testval=smart_init)
def get_default_spec(testval=None):
# to avoid init collision
cp = copy.deepcopy(_default_spec)
if testval is None and cp.testval is None:
<|code_end|>
, determine the next line of code. You have imports:
import copy
import pymc3 as pm
from lasagne import init
from gelato.specs.base import DistSpec, get_default_testval, smart_init
and context (class names, function names, or code) available:
# Path: gelato/specs/base.py
# class DistSpec(SpecVar):
# """Spec based on pymc3 distributions
#
# All specs support lazy evaluation, see Usage
#
# Parameters
# ----------
# distcls : pymc3.Distribution
# args : args for `distcls`
# kwargs : kwargs for `distcls`
#
# Usage
# -----
# >>> spec = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))
# >>> spec += (NormalSpec() + LaplaceSpec()) / 100 - NormalSpec()
# >>> with Model():
# ... prior_expr = spec((10, 10), name='silly_prior')
#
# """
# def __init__(self, distcls, *args, **kwargs):
# if not isinstance(distcls, type) and issubclass(distcls, pm.Distribution):
# raise ValueError('We can deal with pymc3 '
# 'distributions only, got {!r} instead'
# .format(distcls))
# self.testval = kwargs.pop('testval', None)
# self.tag = kwargs.get('tag', 'default')
# self.args = args
# self.kwargs = kwargs
# self.distcls = distcls
#
# def __call__(self, shape, name=None, memo=None):
# memo, shape = self._prepare(memo, shape)
# if name is None:
# name = self.auto()
# shape, tag = self._get_shape(shape)
# if id(self) ^ hash(tag) in memo:
# return memo[id(self) ^ hash(tag)]
# model = pm.modelcontext(None)
# called_args = self._call_args(self.args, name, shape, memo)
# called_kwargs = self._call_kwargs(self.kwargs, name, shape, memo)
# called_kwargs.update(shape=shape['default'])
# val = model.Var(
# name, self.distcls.dist(
# *called_args,
# dtype=theano.config.floatX,
# **called_kwargs
# ),
# )
# if self.testval is None:
# val.tag.test_value = get_default_testval()(shape['default']).astype(val.dtype)
# elif isinstance(self.testval, str) and self.testval == 'random':
# val.tag.test_value = val.random(size=shape['default']).astype(val.dtype)
# else:
# val.tag.test_value = self.testval(shape['default']).astype(val.dtype)
# memo[id(self) ^ hash(tag)] = val
# return memo[id(self) ^ hash(tag)]
#
# def __repr__(self):
# if self._shape != -1:
# sh = '; '+str(self._shape)
# else:
# sh = ''
# template = '<{cls}: {args!r}; {kwargs!r}'+sh+'>'
# return template.format(cls=self.distcls.__name__,
# args=self.args,
# kwargs=self.kwargs)
#
# def get_default_testval():
# return _default_testval
#
# def smart_init(shape):
# if len(shape) > 1:
# return init.GlorotUniform()(shape)
# else:
# return init.Normal()(shape)
. Output only the next line. | cp.testval = get_default_testval() |
Predict the next line for this snippet: <|code_start|>
__all__ = [
'get_default_spec',
'set_default_spec',
'PartialSpec',
'UniformSpec',
'FlatSpec',
'NormalSpec',
'BetaSpec',
'ExponentialSpec',
'LaplaceSpec',
'StudentTSpec',
'CauchySpec',
'HalfCauchySpec',
'GammaSpec',
'WeibullSpec',
'LognormalSpec',
'ChiSquaredSpec',
'HalfNormalSpec',
'WaldSpec',
'ParetoSpec',
'InverseGammaSpec',
'ExGaussianSpec',
'VonMisesSpec',
'SkewNormalSpec',
# 'HalfStudentTSpec',
# 'NormalMixtureSpec'
]
<|code_end|>
with the help of current file imports:
import copy
import pymc3 as pm
from lasagne import init
from gelato.specs.base import DistSpec, get_default_testval, smart_init
and context from other files:
# Path: gelato/specs/base.py
# class DistSpec(SpecVar):
# """Spec based on pymc3 distributions
#
# All specs support lazy evaluation, see Usage
#
# Parameters
# ----------
# distcls : pymc3.Distribution
# args : args for `distcls`
# kwargs : kwargs for `distcls`
#
# Usage
# -----
# >>> spec = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))
# >>> spec += (NormalSpec() + LaplaceSpec()) / 100 - NormalSpec()
# >>> with Model():
# ... prior_expr = spec((10, 10), name='silly_prior')
#
# """
# def __init__(self, distcls, *args, **kwargs):
# if not isinstance(distcls, type) and issubclass(distcls, pm.Distribution):
# raise ValueError('We can deal with pymc3 '
# 'distributions only, got {!r} instead'
# .format(distcls))
# self.testval = kwargs.pop('testval', None)
# self.tag = kwargs.get('tag', 'default')
# self.args = args
# self.kwargs = kwargs
# self.distcls = distcls
#
# def __call__(self, shape, name=None, memo=None):
# memo, shape = self._prepare(memo, shape)
# if name is None:
# name = self.auto()
# shape, tag = self._get_shape(shape)
# if id(self) ^ hash(tag) in memo:
# return memo[id(self) ^ hash(tag)]
# model = pm.modelcontext(None)
# called_args = self._call_args(self.args, name, shape, memo)
# called_kwargs = self._call_kwargs(self.kwargs, name, shape, memo)
# called_kwargs.update(shape=shape['default'])
# val = model.Var(
# name, self.distcls.dist(
# *called_args,
# dtype=theano.config.floatX,
# **called_kwargs
# ),
# )
# if self.testval is None:
# val.tag.test_value = get_default_testval()(shape['default']).astype(val.dtype)
# elif isinstance(self.testval, str) and self.testval == 'random':
# val.tag.test_value = val.random(size=shape['default']).astype(val.dtype)
# else:
# val.tag.test_value = self.testval(shape['default']).astype(val.dtype)
# memo[id(self) ^ hash(tag)] = val
# return memo[id(self) ^ hash(tag)]
#
# def __repr__(self):
# if self._shape != -1:
# sh = '; '+str(self._shape)
# else:
# sh = ''
# template = '<{cls}: {args!r}; {kwargs!r}'+sh+'>'
# return template.format(cls=self.distcls.__name__,
# args=self.args,
# kwargs=self.kwargs)
#
# def get_default_testval():
# return _default_testval
#
# def smart_init(shape):
# if len(shape) > 1:
# return init.GlorotUniform()(shape)
# else:
# return init.Normal()(shape)
, which may contain function names, class names, or code. Output only the next line. | _default_spec = DistSpec(pm.Normal, mu=0, sd=10, testval=smart_init) |
Continue the code snippet: <|code_start|>__module = sys.modules[__name__]
del sys
__all__ = []
for obj_name in __cloned.__all__:
try:
<|code_end|>
. Use current file imports:
import sys
import lasagne.layers.corrmm as __cloned
from .base import bayes as __bayes
and context (classes, functions, or code) from other files:
# Path: gelato/layers/base.py
# def bayes(layercls, stack=1):
# try:
# issubcls = issubclass(layercls, lasagne.layers.base.Layer)
# except TypeError:
# raise TypeError('{} needs to be a Layer subclass'
# .format(layercls))
# if issubcls:
# if type(layercls) is LayerModelMeta:
# raise TypeError('{} is already bayesian'
# .format(layercls))
# else:
# @six.add_metaclass(LayerModelMeta)
# class BayesianAnalog(layercls, pm.Model):
# pass
# frm = inspect.stack()[stack]
# mod = inspect.getmodule(frm[0])
# if mod is None:
# modname = '__main__'
# else:
# modname = mod.__name__
# BayesianAnalog.__module__ = modname
# BayesianAnalog.__doc__ = layercls.__doc__
# BayesianAnalog.__name__ = layercls.__name__
# return BayesianAnalog
# else:
# raise TypeError('{} needs to be a Layer subclass'
# .format(layercls))
. Output only the next line. | setattr(__module, obj_name, __bayes(getattr(__cloned, obj_name))) |
Here is a snippet: <|code_start|># Copyright (C) 2014 Iago Veloso Abalo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class UsersView(generic.ListView):
model = UserProfile
def get_queryset(self):
return UserProfile.objects.filter(display_personal_page=True).select_related('user')
def userprofile_detail(request, slug):
userprofile = get_object_or_404(UserProfile.objects.select_related('user'), slug=slug, display_personal_page=True)
context = {
'userprofile': userprofile,
<|code_end|>
. Write the next line using the current file imports:
from django.shortcuts import render, get_object_or_404
from django.views import generic
from radioco.apps.programmes.models import Role
from radioco.apps.users.models import UserProfile
and context from other files:
# Path: radioco/apps/programmes/models.py
# class Role(models.Model):
# person = models.ForeignKey(User, verbose_name=_("person"), on_delete=models.CASCADE)
# programme = models.ForeignKey(Programme, verbose_name=_("programme"), on_delete=models.CASCADE)
# role = models.CharField(max_length=60, blank=True, null=True, verbose_name=_("role"))
# description = models.TextField(blank=True, verbose_name=_("description"))
# date_joined = models.DateField(auto_now_add=True)
#
# class Meta:
# unique_together = ('person', 'programme')
# verbose_name = _('role')
# verbose_name_plural = _('roles')
# permissions = (
# ("see_all_roles", "Can see all roles"),
# )
#
# def __str__(self):
# return "%s: %s" % (self.programme.name, self.person.username)
#
# Path: radioco/apps/users/models.py
# class UserProfile(models.Model):
# user = models.OneToOneField(User, unique=True)
# bio = RichTextUploadingField(blank=True, verbose_name=_("biography"))
# avatar = models.ImageField(
# upload_to='avatars/', default='defaults/default-userprofile-avatar.jpg', verbose_name=_("avatar")
# )
# display_personal_page = models.BooleanField(default=False, verbose_name=_("display personal page"))
# slug = models.SlugField(max_length=30)
#
# def get_absolute_url(self):
# return reverse('users:detail', args=[self.slug])
#
# def save(self, *args, **kwargs):
# if not self.pk:
# try:
# p = UserProfile.objects.get(user=self.user)
# self.pk = p.pk
# except UserProfile.DoesNotExist:
# pass
# self.slug = slugify(self.user.username)
# super(UserProfile, self).save(*args, **kwargs)
#
# class Meta:
# default_permissions = ('change',)
# verbose_name = _('user profile')
# verbose_name_plural = _('user profile')
#
# def __str__(self):
# return "%s's profile" % self.user
, which may include functions, classes, or code. Output only the next line. | 'role_list': Role.objects.filter(person=userprofile.user).select_related('programme') |
Given the following code snippet before the placeholder: <|code_start|>
@property
def recorder_token(self):
if hasattr(settings, 'USERNAME_RADIOCO_RECORDER'):
username = settings.USERNAME_RADIOCO_RECORDER
user, created = User.objects.get_or_create(username=username)
if created:
user.set_password(User.objects.make_random_password())
user.save()
token, created = Token.objects.get_or_create(user=user)
return token.key
else:
return _('Variable USERNAME_RADIOCO_RECORDER doesn\'t exist in your settings file')
def __str__(self):
# In django 1.7 we can't use lazy
return _u('Podcast Configuration')
class Meta:
default_permissions = ('change',)
verbose_name = _('Podcast Configuration')
verbose_name_plural = _('Podcast Configuration')
class CalendarConfiguration(SingletonModel):
slot_duration = models.DurationField(
default=datetime.timedelta(minutes=30), verbose_name=_('slot duration'),
help_text=_('The frequency for displaying time slots. Format hh:mm:ss'))
first_day = models.IntegerField(
<|code_end|>
, predict the next line using imports from the current file:
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import models
from django.utils.translation import ugettext as _u
from django.utils.translation import ugettext_lazy as _
from rest_framework.authtoken.models import Token
from radioco.apps.schedules.models import WEEKDAY_CHOICES
and context including class names, function names, and sometimes code from other files:
# Path: radioco/apps/schedules/models.py
# WEEKDAY_CHOICES = (
# (MO, _('Monday')),
# (TU, _('Tuesday')),
# (WE, _('Wednesday')),
# (TH, _('Thursday')),
# (FR, _('Friday')),
# (SA, _('Saturday')),
# (SU, _('Sunday')),
# )
. Output only the next line. | choices=WEEKDAY_CHOICES, default=0, verbose_name=_('first day'), |
Based on the snippet: <|code_start|> return datetime.timedelta(minutes=self._runtime)
@runtime.setter
def runtime(self, value):
self._runtime = value
@property
def start_dt(self):
if not self.start_date:
return None
tz = timezone.get_default_timezone()
return tz.localize(datetime.datetime.combine(self.start_date, datetime.time())).astimezone(pytz.utc)
@property
def end_dt(self):
if not self.end_date:
return None
tz = timezone.get_default_timezone()
return tz.localize(datetime.datetime.combine(self.end_date, datetime.time(23, 59, 59))).astimezone(pytz.utc)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Programme, self).save(*args, **kwargs)
def rearrange_episodes(self, after, calendar):
"""
Update the issue_date of episodes from a given date
"""
episodes = Episode.objects.unfinished(self, after)
<|code_end|>
, predict the immediate next line with the help of imports:
import pytz
import datetime
from bs4 import BeautifulSoup
from ckeditor_uploader.fields import RichTextUploadingField
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import FieldError
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save, pre_save
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from radioco.apps.radioco.utils import field_has_changed
from radioco.apps.schedules.utils import next_dates
from radioco.apps.global_settings.models import PodcastConfiguration
and context (classes, functions, sometimes code) from other files:
# Path: radioco/apps/schedules/utils.py
# def next_dates(calendar, programme, after):
# """
# Returns: A generator with the next dates of a given programme
# """
# if not calendar or not calendar.id:
# return
#
# from radioco.apps.schedules.models import Schedule
# # Only taking into account schedules which belong to the active calendar
# schedules = Schedule.objects.filter(programme=programme, type='L', calendar=calendar)
#
# while True:
# candidates = [s.date_after(after) for s in schedules]
# try:
# next_date = min([_dt for _dt in candidates if _dt is not None])
# except ValueError:
# break
#
# yield next_date
# after = next_date + datetime.timedelta(seconds=1)
# # schedules = filter(lambda t: t[1] is not None, zip(schedules, candidates))
# schedules = [_tuple[0] for _tuple in zip(schedules, candidates) if _tuple[1] is not None]
. Output only the next line. | dates = next_dates(calendar, self, after) |
Based on the snippet: <|code_start|> dt = self.monthly_recurrence.after(after, True, dtstart=start_dt)
self.assertEqual(datetime.datetime(2014, 1, 20, 14, 0, 0), dt)
def test_before_dt(self):
start_dt = datetime.datetime(2014, 1, 20, 14, 0, 0)
before = datetime.datetime(2014, 1, 20, 14, 0, 1)
dt = self.monthly_recurrence.before(before, dtstart=start_dt)
self.assertEqual(datetime.datetime(2014, 1, 20, 14, 0, 0), dt)
before = start_dt
assert self.monthly_recurrence.before(before, dtstart=start_dt) is None
def test_before_inclusive(self):
start_dt = datetime.datetime(2014, 1, 20, 14, 0, 0)
before = start_dt
dt = self.monthly_recurrence.before(before, True, dtstart=start_dt)
self.assertEqual(datetime.datetime(2014, 1, 20, 14, 0, 0), dt)
def test_impossible_recurrence_after(self):
"""
Testing error calling after and function wrapper to solve it (recurrence_after)
"""
start_dt = datetime.datetime(2014, 1, 20, 14, 0, 0)
until_dt = datetime.datetime(2014, 1, 19, 14, 0, 0)
daily_recurrence = recurrence.Recurrence(
rrules=[recurrence.Rule(recurrence.DAILY, until=until_dt)])
dt = daily_recurrence.after(start_dt, True, dtstart=start_dt)
self.assertEqual(start_dt, dt) # wrong!
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
import recurrence
from django.test import TestCase
from radioco.apps.radioco.test_utils import TestDataMixin
from radioco.apps.radioco.tz_utils import recurrence_after, recurrence_before
and context (classes, functions, sometimes code) from other files:
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
#
# Path: radioco/apps/radioco/tz_utils.py
# def recurrence_after(recurrence, after_dt, start_dt):
# """
# Fix for django-recurrence 1.3
# Avoid outputting a impossible dt
# """
# dt = recurrence.after(after_dt, True, dtstart=start_dt)
# if dt == start_dt:
# return _fix_invalid_dt(recurrence, dt)
# return dt
#
# def recurrence_before(recurrence, before_dt, start_dt):
# """
# Fix for django-recurrence 1.3
# Avoid outputting a impossible dt
# """
# dt = recurrence.before(before_dt, True, dtstart=start_dt)
# if dt == start_dt:
# return _fix_invalid_dt(recurrence, dt)
# return dt
. Output only the next line. | self.assertIsNone(recurrence_after(daily_recurrence, start_dt, start_dt)) |
Here is a snippet: <|code_start|> before = start_dt
dt = self.monthly_recurrence.before(before, True, dtstart=start_dt)
self.assertEqual(datetime.datetime(2014, 1, 20, 14, 0, 0), dt)
def test_impossible_recurrence_after(self):
"""
Testing error calling after and function wrapper to solve it (recurrence_after)
"""
start_dt = datetime.datetime(2014, 1, 20, 14, 0, 0)
until_dt = datetime.datetime(2014, 1, 19, 14, 0, 0)
daily_recurrence = recurrence.Recurrence(
rrules=[recurrence.Rule(recurrence.DAILY, until=until_dt)])
dt = daily_recurrence.after(start_dt, True, dtstart=start_dt)
self.assertEqual(start_dt, dt) # wrong!
self.assertIsNone(recurrence_after(daily_recurrence, start_dt, start_dt))
def test_impossible_recurrence_before(self):
"""
Testing error calling before and function wrapper to solve it (recurrence_before)
"""
start_dt = datetime.datetime(2014, 1, 20, 14, 0, 0)
until_dt = datetime.datetime(2014, 1, 19, 14, 0, 0)
daily_recurrence = recurrence.Recurrence(
rrules=[recurrence.Rule(recurrence.MONTHLY, until=until_dt)])
dt = daily_recurrence.before(start_dt + datetime.timedelta(seconds=1), dtstart=start_dt)
self.assertEqual(start_dt, dt) # wrong!
<|code_end|>
. Write the next line using the current file imports:
import datetime
import recurrence
from django.test import TestCase
from radioco.apps.radioco.test_utils import TestDataMixin
from radioco.apps.radioco.tz_utils import recurrence_after, recurrence_before
and context from other files:
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
#
# Path: radioco/apps/radioco/tz_utils.py
# def recurrence_after(recurrence, after_dt, start_dt):
# """
# Fix for django-recurrence 1.3
# Avoid outputting a impossible dt
# """
# dt = recurrence.after(after_dt, True, dtstart=start_dt)
# if dt == start_dt:
# return _fix_invalid_dt(recurrence, dt)
# return dt
#
# def recurrence_before(recurrence, before_dt, start_dt):
# """
# Fix for django-recurrence 1.3
# Avoid outputting a impossible dt
# """
# dt = recurrence.before(before_dt, True, dtstart=start_dt)
# if dt == start_dt:
# return _fix_invalid_dt(recurrence, dt)
# return dt
, which may include functions, classes, or code. Output only the next line. | self.assertIsNone(recurrence_before(daily_recurrence, start_dt + datetime.timedelta(seconds=1), start_dt)) |
Predict the next line for this snippet: <|code_start|># Radioco - Broadcasting Radio Recording Scheduling system.
# Copyright (C) 2014 Iago Veloso Abalo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
except ImportError:
# USER
class UserProfileInline(admin.StackedInline):
inline_classes = ('grp-collapse grp-open',)
extra = 1
<|code_end|>
with the help of current file imports:
import re
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core import validators
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from radioco.apps.users.models import UserProfile
from django.utils.encoding import force_unicode
from django.utils.encoding import force_text as force_unicode
and context from other files:
# Path: radioco/apps/users/models.py
# class UserProfile(models.Model):
# user = models.OneToOneField(User, unique=True)
# bio = RichTextUploadingField(blank=True, verbose_name=_("biography"))
# avatar = models.ImageField(
# upload_to='avatars/', default='defaults/default-userprofile-avatar.jpg', verbose_name=_("avatar")
# )
# display_personal_page = models.BooleanField(default=False, verbose_name=_("display personal page"))
# slug = models.SlugField(max_length=30)
#
# def get_absolute_url(self):
# return reverse('users:detail', args=[self.slug])
#
# def save(self, *args, **kwargs):
# if not self.pk:
# try:
# p = UserProfile.objects.get(user=self.user)
# self.pk = p.pk
# except UserProfile.DoesNotExist:
# pass
# self.slug = slugify(self.user.username)
# super(UserProfile, self).save(*args, **kwargs)
#
# class Meta:
# default_permissions = ('change',)
# verbose_name = _('user profile')
# verbose_name_plural = _('user profile')
#
# def __str__(self):
# return "%s's profile" % self.user
, which may contain function names, class names, or code. Output only the next line. | model = UserProfile |
Based on the snippet: <|code_start|> return _fix_invalid_dt(recurrence, dt)
return dt
def calculate_effective_schedule_start_dt(schedule):
"""
Calculation of the first start date to improve performance
"""
tz = timezone.get_default_timezone()
programme_start_dt = tz.localize(
datetime.datetime.combine(schedule.programme.start_date, datetime.time())
).astimezone(pytz.utc) if schedule.programme.start_date else None
programme_end_dt = tz.localize(
datetime.datetime.combine(schedule.programme.end_date, datetime.time(23, 59, 59))
).astimezone(pytz.utc) if schedule.programme.end_date else None
# If there are no rrules
if not schedule.recurrences:
if programme_start_dt and programme_start_dt > schedule.start_dt:
return None
if programme_end_dt and schedule.start_dt > programme_end_dt:
return None
return schedule.start_dt
# Get first date
after_dt = schedule.start_dt
if programme_start_dt:
after_dt = max(schedule.start_dt, programme_start_dt)
first_start_dt = fix_recurrence_dst(recurrence_after(
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
import django.db.models.deletion
import pytz
from collections import namedtuple
from itertools import chain
from django.db import migrations, models
from django.utils import timezone
from radioco.apps.radioco.tz_utils import transform_dt_to_default_tz, fix_recurrence_dst
and context (classes, functions, sometimes code) from other files:
# Path: radioco/apps/radioco/tz_utils.py
# def transform_dt_to_default_tz(dt):
# """
# Transform a datetime in other timezone to the current one
# """
# tz = timezone.get_default_timezone()
# return tz.normalize(dt.astimezone(tz))
#
# def fix_recurrence_dst(dt):
# """
# Fix for django-recurrence 1.3
# Function to fix a datetime tz aware with an incorrect offset
#
# Returns: A datetime in the same timezone but with the offset fixed
# """
# if dt:
# tz = dt.tzinfo
# return tz.localize(datetime.datetime.combine(dt.date(), dt.time()))
# return None
. Output only the next line. | schedule.recurrences, transform_dt_to_default_tz(after_dt), transform_dt_to_default_tz(schedule.start_dt))) |
Based on the snippet: <|code_start|> if dt == start_dt:
return _fix_invalid_dt(recurrence, dt)
return dt
def calculate_effective_schedule_start_dt(schedule):
"""
Calculation of the first start date to improve performance
"""
tz = timezone.get_default_timezone()
programme_start_dt = tz.localize(
datetime.datetime.combine(schedule.programme.start_date, datetime.time())
).astimezone(pytz.utc) if schedule.programme.start_date else None
programme_end_dt = tz.localize(
datetime.datetime.combine(schedule.programme.end_date, datetime.time(23, 59, 59))
).astimezone(pytz.utc) if schedule.programme.end_date else None
# If there are no rrules
if not schedule.recurrences:
if programme_start_dt and programme_start_dt > schedule.start_dt:
return None
if programme_end_dt and schedule.start_dt > programme_end_dt:
return None
return schedule.start_dt
# Get first date
after_dt = schedule.start_dt
if programme_start_dt:
after_dt = max(schedule.start_dt, programme_start_dt)
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
import django.db.models.deletion
import pytz
from collections import namedtuple
from itertools import chain
from django.db import migrations, models
from django.utils import timezone
from radioco.apps.radioco.tz_utils import transform_dt_to_default_tz, fix_recurrence_dst
and context (classes, functions, sometimes code) from other files:
# Path: radioco/apps/radioco/tz_utils.py
# def transform_dt_to_default_tz(dt):
# """
# Transform a datetime in other timezone to the current one
# """
# tz = timezone.get_default_timezone()
# return tz.normalize(dt.astimezone(tz))
#
# def fix_recurrence_dst(dt):
# """
# Fix for django-recurrence 1.3
# Function to fix a datetime tz aware with an incorrect offset
#
# Returns: A datetime in the same timezone but with the offset fixed
# """
# if dt:
# tz = dt.tzinfo
# return tz.localize(datetime.datetime.combine(dt.date(), dt.time()))
# return None
. Output only the next line. | first_start_dt = fix_recurrence_dst(recurrence_after( |
Here is a snippet: <|code_start|>
EXPECTED_RESULT = b"""<?xml version="1.0" encoding="utf-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"><channel><title>Morning News</title><link>http://example.com/programmes/morning-news/</link><description>
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type specimen book.
</description><atom:link rel="self" href="http://example.com/programmes/morning-news/rss/"></atom:link><language>en</language><lastBuildDate>Thu, 01 Jan 2015 08:00:00 +0000</lastBuildDate><itunes:explicit>clean</itunes:explicit><itunes:summary>
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type specimen book.
</itunes:summary><itunes:image href="http://testserver/media/_versions/defaults/example/radio_1_itunes_image.jpg"></itunes:image><itunes:category text="News & Politics"></itunes:category><image url="http://testserver/media/_versions/defaults/example/radio_1_rss_image.jpg" title="Morning News" link="http://testserver/programmes/morning-news/"></image><item><title>1x1 Episode 1</title><link>http://example.com/programmes/morning-news/1x1/</link><description>
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type specimen book.
</description><pubDate>Thu, 01 Jan 2015 08:00:00 +0000</pubDate><enclosure url="https://archive.org/download/Backstate_Wife/1945-08-10_-_1600_-_Backstage_Wife_-_Mary_And_Larry_See_A_Twenty_Year_Old_Portrait_That_Looks_Exactly_Like_Mary_-_32-22_-_14m13s.mp3" length="0" type="audio/mp3"></enclosure><itunes:subtitle>Episode 1</itunes:subtitle><itunes:summary>
Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type specimen book.
</itunes:summary><itunes:duration>0:14:13</itunes:duration></item></channel></rss>"""
<|code_end|>
. Write the next line using the current file imports:
from rest_framework import status
from rest_framework.test import APITestCase
from radioco.apps.radioco.test_utils import TestDataMixin
and context from other files:
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
, which may include functions, classes, or code. Output only the next line. | class TestFeed(TestDataMixin, APITestCase): |
Here is a snippet: <|code_start|>
def mock_now():
return pytz.utc.localize(datetime.datetime(2015, 1, 6, 14, 30, 0))
class TestProgramme(TestDataMixin, APITestCase):
def setUp(self):
<|code_end|>
. Write the next line using the current file imports:
import datetime
import mock
import pytz
from rest_framework import status
from rest_framework.test import APITestCase
from radioco.apps.programmes.models import Programme
from radioco.apps.radioco.test_utils import TestDataMixin
and context from other files:
# Path: radioco/apps/programmes/models.py
# class Programme(models.Model):
# class Meta:
# verbose_name = _('programme')
# verbose_name_plural = _('programmes')
# permissions = (("see_all_programmes", "Can see all programmes"),)
#
# CATEGORY_CHOICES = (
# ('Arts', _('Arts')),
# ('Business', _('Business')),
# ('Comedy', _('Comedy')),
# ('Education', _('Education')),
# ('Games & Hobbies', _('Games & Hobbies')),
# ('Government & Organizations', _('Government & Organizations')),
# ('Health', _('Health')),
# ('Kids & Family', _('Kids & Family')),
# ('Music', _('Music')),
# ('News & Politics', _('News & Politics')),
# ('Religion & Spirituality', _('Religion & Spirituality')),
# ('Science & Medicine', _('Science & Medicine')),
# ('Society & Culture', _('Society & Culture')),
# ('Sports & Recreation', _('Sports & Recreation')),
# ('Technology', _('Technology')),
# ('TV & Film', _('TV & Film')),
# )
#
# name = models.CharField(
# max_length=100, unique=True, verbose_name=_("name")
# )
# announcers = models.ManyToManyField(
# User, blank=True, through='Role', verbose_name=_("announcers")
# )
# synopsis = RichTextUploadingField(blank=True, verbose_name=_("synopsis"))
# photo = models.ImageField(
# upload_to='photos/', default='defaults/default-programme-photo.jpg', verbose_name=_("photo")
# )
# language = models.CharField(
# default=PROGRAMME_LANGUAGES[0][0], verbose_name=_("language"),
# choices=[(k_v[0], _(k_v[1])) for k_v in PROGRAMME_LANGUAGES], max_length=7
# )
# # XXX ensure not decreasing
# current_season = models.PositiveIntegerField(
# validators=[MinValueValidator(1)], verbose_name=_("current season")
# )
# category = models.CharField(
# blank=True, null=True, max_length=50, choices=CATEGORY_CHOICES, verbose_name=_("category")
# )
# slug = models.SlugField(max_length=100, unique=True,
# help_text=_("Please DON'T change this value. It's used to build URL's."))
# _runtime = models.PositiveIntegerField(
# validators=[MinValueValidator(1)], verbose_name=_("runtime"), help_text=_("In minutes."))
#
# start_date = models.DateField(blank=True, null=True, verbose_name=_('start date'))
# end_date = models.DateField(blank=True, null=True, verbose_name=_('end date'))
#
# @property
# def synopsis_text(self):
# return BeautifulSoup(self.synopsis, "html.parser").text
#
# @property
# def runtime(self):
# if not self._runtime:
# raise FieldError(_('Runtime not set'))
# return datetime.timedelta(minutes=self._runtime)
#
# @runtime.setter
# def runtime(self, value):
# self._runtime = value
#
# @property
# def start_dt(self):
# if not self.start_date:
# return None
# tz = timezone.get_default_timezone()
# return tz.localize(datetime.datetime.combine(self.start_date, datetime.time())).astimezone(pytz.utc)
#
# @property
# def end_dt(self):
# if not self.end_date:
# return None
# tz = timezone.get_default_timezone()
# return tz.localize(datetime.datetime.combine(self.end_date, datetime.time(23, 59, 59))).astimezone(pytz.utc)
#
# def save(self, *args, **kwargs):
# if not self.slug:
# self.slug = slugify(self.name)
# super(Programme, self).save(*args, **kwargs)
#
# def rearrange_episodes(self, after, calendar):
# """
# Update the issue_date of episodes from a given date
# """
# episodes = Episode.objects.unfinished(self, after)
# dates = next_dates(calendar, self, after)
#
# # Further dates and episodes available -> re-order
# while True:
# try:
# date = next(dates)
# episode = next(episodes)
# except StopIteration:
# break
# else:
# episode.issue_date = date
# episode.save()
#
# # No further dates available -> unschedule
# while True:
# try:
# episode = next(episodes)
# except StopIteration:
# break
# else:
# episode.issue_date = None
# episode.save()
#
# def get_absolute_url(self):
# return reverse('programmes:detail', args=[self.slug])
#
# def __str__(self):
# return "%s" % (self.name)
#
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
, which may include functions, classes, or code. Output only the next line. | self.summer_programme = Programme.objects.create( |
Given the following code snippet before the placeholder: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class UtilsTest(TestCase):
def test_example_data(self):
"""
Running example data function, If nothing crash we are happy
"""
create_example_data()
def test_dictionary_key(self):
"""
Making sure that is safe to use a dt tz aware object in a different timezone to access a dictionary
"""
utc_dt = pytz.utc.localize(datetime.datetime(2015, 1, 1, 13, 0, 0))
spanish_dt = SPAIN_TZ.localize(datetime.datetime(2015, 1, 1, 13, 0, 0))
utc_dict = {utc_dt: 'Created using utc dt'}
spain_dict = {spanish_dt: 'Created using spanish dt'}
self.assertEqual(utc_dict.get(utc_dt), utc_dict.get(utc_dt.astimezone(SPAIN_TZ)))
self.assertEqual(spain_dict.get(spanish_dt), spain_dict.get(spanish_dt.astimezone(pytz.utc)))
<|code_end|>
, predict the next line using imports from the current file:
import datetime
import pytz
from django.core.urlresolvers import reverse
from django.test import TestCase
from radioco.apps.radioco.utils import create_example_data
from radioco.apps.radioco.test_utils import TestDataMixin, SPAIN_TZ
and context including class names, function names, and sometimes code from other files:
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
#
# SPAIN_TZ = pytz.timezone('Europe/Madrid')
. Output only the next line. | class RadioIntegrationTests(TestDataMixin, TestCase): |
Using the snippet: <|code_start|>#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class UtilsTest(TestCase):
def test_example_data(self):
"""
Running example data function, If nothing crash we are happy
"""
create_example_data()
def test_dictionary_key(self):
"""
Making sure that is safe to use a dt tz aware object in a different timezone to access a dictionary
"""
utc_dt = pytz.utc.localize(datetime.datetime(2015, 1, 1, 13, 0, 0))
<|code_end|>
, determine the next line of code. You have imports:
import datetime
import pytz
from django.core.urlresolvers import reverse
from django.test import TestCase
from radioco.apps.radioco.utils import create_example_data
from radioco.apps.radioco.test_utils import TestDataMixin, SPAIN_TZ
and context (class names, function names, or code) available:
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
#
# SPAIN_TZ = pytz.timezone('Europe/Madrid')
. Output only the next line. | spanish_dt = SPAIN_TZ.localize(datetime.datetime(2015, 1, 1, 13, 0, 0)) |
Given the code snippet: <|code_start|>
MOCK_CONTEXT = {'request': RequestFactory().get('')}
class TestApi(TestDataMixin):
def test_api(self):
response = self.client.get('/api/2/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class TestSerializers(TestDataMixin, TestCase):
def test_programme(self):
<|code_end|>
, generate the next line using the imports in this file:
import datetime
import pytz
from django.test import TestCase, RequestFactory
from rest_framework import status
from radioco.apps.api import serializers
from radioco.apps.radioco.test_utils import TestDataMixin
from radioco.apps.schedules.models import Transmission
and context (functions, classes, or occasionally code) from other files:
# Path: radioco/apps/api/serializers.py
# class AbsoluteURLField(serializers.URLField):
# class DateTimeFieldTz(serializers.DateTimeField):
# class ProgrammeSerializer(serializers.ModelSerializer):
# class Meta:
# class RadiocomProgrammeSerializer(serializers.ModelSerializer):
# class Meta:
# class EpisodeSerializer(serializers.ModelSerializer):
# class Meta:
# class ScheduleSerializer(serializers.ModelSerializer):
# class Meta:
# class TransmissionSerializer(serializers.Serializer):
# class RadiocomTransmissionSerializer(serializers.Serializer):
# class TransmissionSerializerLight(serializers.Serializer): # WARNING: Hack to save changes
# class RadiocomConfigurationSerializer(serializers.ModelSerializer):
# class Meta:
# def __init__(self, method_name=None, source=None, reverse_url=None, **kwargs):
# def to_representation(self, value):
# def to_representation(self, date):
# def get_title(self, schedule):
# def validate(self, attrs):
# def validate(self, attrs):
# def get_station_photos(self, obj):
# def get_facebook_url(self, obj):
# def get_twitter_url(self, obj):
#
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
#
# Path: radioco/apps/schedules/models.py
# class Transmission(object):
# """
# Temporal object generated according to recurrence rules or schedule information
# It contains concrete dates
# """
# def __init__(self, schedule, date, episode=None):
# self.schedule = schedule
# self.programme = schedule.programme
# self.start = date
# self.episode = episode
#
# @property
# def name(self):
# return self.programme.name
#
# @property
# def slug(self):
# return self.programme.slug
#
# @property
# def end(self):
# return self.start + self.programme.runtime
#
# @property
# def programme_url(self):
# return reverse('programmes:detail', args=[self.programme.slug])
#
# @property
# def episode_url(self):
# if not self.episode:
# return None
# return reverse(
# 'programmes:episode_detail',
# args=(self.slug, self.episode.season, self.episode.number_in_season)
# )
#
# @classmethod
# def at(cls, at):
# schedules = Schedule.objects.filter(
# calendar__is_active=True, effective_start_dt__lte=at
# ).filter(
# Q(effective_end_dt__gt=at) |
# Q(effective_end_dt__isnull=True)
# ).select_related('programme')
# for schedule in schedules:
# date = schedule.date_before(at)
# if date and date <= at < date + schedule.runtime:
# # Get episode
# try:
# episode = Episode.objects.get(issue_date=date)
# except Episode.DoesNotExist:
# episode = None
# # yield transmission
# yield cls(schedule, date, episode)
#
# @classmethod
# def between(cls, after, before, schedules=None):
# """
# Return a tuple of Schedule and Transmissions sorted by date
# """
# if schedules is None:
# schedules = Schedule.objects.filter(calendar__is_active=True)
#
# schedules = schedules.filter(
# effective_start_dt__lt=before
# ).filter(
# Q(effective_end_dt__gt=after) |
# Q(effective_end_dt__isnull=True)
# ).select_related('programme')
#
# # Querying episodes episodes in that period of time
# episodes = Episode.objects.filter(
# issue_date__lt=before, issue_date__gte=after
# )
# episodes = {_episode.issue_date: _episode for _episode in episodes}
#
# transmission_dates = [
# map(partial(_return_tuple, item2=schedule), schedule.dates_between(after, before))
# for schedule in schedules
# ]
# sorted_transmission_dates = heapq.merge(*transmission_dates)
# for sorted_transmission_date, schedule in sorted_transmission_dates:
# # Adding episodes matching by date, we don't care about if this info is not correct
# yield cls(schedule, sorted_transmission_date, episodes.get(sorted_transmission_date))
. Output only the next line. | serializer = serializers.ProgrammeSerializer( |
Next line prediction: <|code_start|> self.assertEqual(
serializer.data['photo_url'], "http://testserver/media/defaults/example/radio_5.jpg"
)
def test_episode(self):
serializer = serializers.EpisodeSerializer(self.episode)
self.assertListEqual(
list(serializer.data.keys()),
['title', 'programme', 'summary', 'issue_date', 'season', 'number_in_season'])
def test_episode_programme(self):
serializer = serializers.EpisodeSerializer(self.episode)
self.assertEqual(serializer.data['programme'], 'classic-hits')
def test_schedule(self):
serializer = serializers.ScheduleSerializer(self.schedule)
schedule_id = self.schedule.id
calendar_id = self.calendar.id
self.assertDictEqual(serializer.data, {
'title': 'Classic hits',
'source': None,
'start': '2015-01-01T14:00:00Z',
'calendar': calendar_id,
'runtime': datetime.timedelta(minutes=60),
'type': 'L',
'id': schedule_id,
'programme': 'classic-hits'})
def test_transmission(self):
serializer = serializers.TransmissionSerializer(
<|code_end|>
. Use current file imports:
(import datetime
import pytz
from django.test import TestCase, RequestFactory
from rest_framework import status
from radioco.apps.api import serializers
from radioco.apps.radioco.test_utils import TestDataMixin
from radioco.apps.schedules.models import Transmission)
and context including class names, function names, or small code snippets from other files:
# Path: radioco/apps/api/serializers.py
# class AbsoluteURLField(serializers.URLField):
# class DateTimeFieldTz(serializers.DateTimeField):
# class ProgrammeSerializer(serializers.ModelSerializer):
# class Meta:
# class RadiocomProgrammeSerializer(serializers.ModelSerializer):
# class Meta:
# class EpisodeSerializer(serializers.ModelSerializer):
# class Meta:
# class ScheduleSerializer(serializers.ModelSerializer):
# class Meta:
# class TransmissionSerializer(serializers.Serializer):
# class RadiocomTransmissionSerializer(serializers.Serializer):
# class TransmissionSerializerLight(serializers.Serializer): # WARNING: Hack to save changes
# class RadiocomConfigurationSerializer(serializers.ModelSerializer):
# class Meta:
# def __init__(self, method_name=None, source=None, reverse_url=None, **kwargs):
# def to_representation(self, value):
# def to_representation(self, date):
# def get_title(self, schedule):
# def validate(self, attrs):
# def validate(self, attrs):
# def get_station_photos(self, obj):
# def get_facebook_url(self, obj):
# def get_twitter_url(self, obj):
#
# Path: radioco/apps/radioco/test_utils.py
# class TestDataMixin(object):
# @classmethod
# def setUpTestData(cls):
# create_test_data()
# cls.programme = Programme.objects.filter(name="Classic hits").get()
# cls.schedule = cls.programme.schedule_set.first()
# cls.calendar = cls.schedule.calendar
# cls.episode = cls.programme.episode_set.first()
# cls.another_calendar = Calendar.objects.create(name="Another")
#
# schedule, created = Schedule.objects.get_or_create(
# programme=cls.programme,
# calendar=cls.another_calendar,
# type='S',
# start_dt=pytz.utc.localize(datetime.datetime(2015, 1, 6, 16, 30, 0)))
#
# Path: radioco/apps/schedules/models.py
# class Transmission(object):
# """
# Temporal object generated according to recurrence rules or schedule information
# It contains concrete dates
# """
# def __init__(self, schedule, date, episode=None):
# self.schedule = schedule
# self.programme = schedule.programme
# self.start = date
# self.episode = episode
#
# @property
# def name(self):
# return self.programme.name
#
# @property
# def slug(self):
# return self.programme.slug
#
# @property
# def end(self):
# return self.start + self.programme.runtime
#
# @property
# def programme_url(self):
# return reverse('programmes:detail', args=[self.programme.slug])
#
# @property
# def episode_url(self):
# if not self.episode:
# return None
# return reverse(
# 'programmes:episode_detail',
# args=(self.slug, self.episode.season, self.episode.number_in_season)
# )
#
# @classmethod
# def at(cls, at):
# schedules = Schedule.objects.filter(
# calendar__is_active=True, effective_start_dt__lte=at
# ).filter(
# Q(effective_end_dt__gt=at) |
# Q(effective_end_dt__isnull=True)
# ).select_related('programme')
# for schedule in schedules:
# date = schedule.date_before(at)
# if date and date <= at < date + schedule.runtime:
# # Get episode
# try:
# episode = Episode.objects.get(issue_date=date)
# except Episode.DoesNotExist:
# episode = None
# # yield transmission
# yield cls(schedule, date, episode)
#
# @classmethod
# def between(cls, after, before, schedules=None):
# """
# Return a tuple of Schedule and Transmissions sorted by date
# """
# if schedules is None:
# schedules = Schedule.objects.filter(calendar__is_active=True)
#
# schedules = schedules.filter(
# effective_start_dt__lt=before
# ).filter(
# Q(effective_end_dt__gt=after) |
# Q(effective_end_dt__isnull=True)
# ).select_related('programme')
#
# # Querying episodes episodes in that period of time
# episodes = Episode.objects.filter(
# issue_date__lt=before, issue_date__gte=after
# )
# episodes = {_episode.issue_date: _episode for _episode in episodes}
#
# transmission_dates = [
# map(partial(_return_tuple, item2=schedule), schedule.dates_between(after, before))
# for schedule in schedules
# ]
# sorted_transmission_dates = heapq.merge(*transmission_dates)
# for sorted_transmission_date, schedule in sorted_transmission_dates:
# # Adding episodes matching by date, we don't care about if this info is not correct
# yield cls(schedule, sorted_transmission_date, episodes.get(sorted_transmission_date))
. Output only the next line. | Transmission(self.schedule, pytz.utc.localize(datetime.datetime(2015, 1, 6, 14, 0, 0))), |
Using the snippet: <|code_start|>
class ApiClient:
"""Handles the communication with the Bunq API
Can send HTTP requests and verify the response
"""
__version_api = 1
__agent_name = "complete-bunq-api-test"
__agent_version = '0.1.0'
_uri_production = "https://api.bunq.com/v%d" % __version_api
_uri_sandbox = "https://sandbox.public.api.bunq.com/v%d" % __version_api
__variables = ['installation_id', 'installation_token', 'api_key',
'server_token', 'server_pubkey', 'session_token']
def __init__(self, privkey, use_sandbox=True, **kwargs):
self.privkey = privkey
self._uri = self._uri_sandbox if use_sandbox else self._uri_production
self._handle_kwargs(kwargs)
<|code_end|>
, determine the next line of code. You have imports:
import base64
import copy
import json
import uuid
import requests
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from apiwrapper.endpoints.controller import Controller as EndpointController
and context (class names, function names, or code) available:
# Path: apiwrapper/endpoints/controller.py
# class Controller:
#
# def __init__(self, api_client):
# self.annual_overview = AnnualOverview(api_client)
# self.attachment_public = AttachmentPublic(api_client)
# self.attachment_tab = AttachmentTab(api_client)
# self.avatar = Avatar(api_client)
# self.card = Card(api_client)
# self.cash_register = CashRegister(api_client)
# self.cash_register_tab = CashRegisterTab(api_client)
# self.certificate_pinned = CertificatePinned(api_client)
# self.chat_conversation = ChatConversation(api_client)
# self.credential_password_ip = CredentialPasswordIp(api_client)
# self.customer_statement = CustomerStatement(api_client)
# self.device = Device(api_client)
# self.device_server = DeviceServer(api_client)
# self.draft_payment = DraftPayment(api_client)
# self.draft_invite = DraftShareInviteBank(api_client)
# self.installation = Installation(api_client)
# self.invoice = Invoice(api_client)
# self.master_card_action = MasterCardAction(api_client)
# self.monetary_account = MonetaryAccount(api_client)
# self.payment = Payment(api_client)
# self.permitted_ip = PermittedIp(api_client)
# self.request_inquiry = RequestInquiry(api_client)
# self.schedule = Schedule(api_client)
# self.scheduled_payment = ScheduledPayment(api_client)
# self.server_public_key = ServerPublicKey(api_client)
# self.session_server = SessionServer(api_client)
# self.share_inquiry = ShareInviteBankInquiry(api_client)
# self.share_response = ShareInviteBankResponse(api_client)
# self.tab_attachment = TabAttachment(api_client)
# self.tab_usage = TabUsage(api_client)
# self.user = User(api_client)
. Output only the next line. | self.__endpoint_controller = EndpointController(self) |
Predict the next line after this snippet: <|code_start|>
class Controller:
def __init__(self, api_client):
self.annual_overview = AnnualOverview(api_client)
self.attachment_public = AttachmentPublic(api_client)
self.attachment_tab = AttachmentTab(api_client)
self.avatar = Avatar(api_client)
self.card = Card(api_client)
self.cash_register = CashRegister(api_client)
self.cash_register_tab = CashRegisterTab(api_client)
self.certificate_pinned = CertificatePinned(api_client)
self.chat_conversation = ChatConversation(api_client)
self.credential_password_ip = CredentialPasswordIp(api_client)
<|code_end|>
using the current file's imports:
from apiwrapper.endpoints.annual_overview import AnnualOverview
from apiwrapper.endpoints.attachment_public import AttachmentPublic
from apiwrapper.endpoints.attachment_tab import AttachmentTab
from apiwrapper.endpoints.avatar import Avatar
from apiwrapper.endpoints.card import Card
from apiwrapper.endpoints.cash_register import CashRegister
from apiwrapper.endpoints.cash_register_tab import CashRegisterTab
from apiwrapper.endpoints.certificate_pinned import CertificatePinned
from apiwrapper.endpoints.chat_conversation import ChatConversation
from apiwrapper.endpoints.credential_password_ip import CredentialPasswordIp
from apiwrapper.endpoints.customer_statement import CustomerStatement
from apiwrapper.endpoints.device import Device
from apiwrapper.endpoints.device_server import DeviceServer
from apiwrapper.endpoints.draft_payment import DraftPayment
from apiwrapper.endpoints.draft_share_invite_bank import DraftShareInviteBank
from apiwrapper.endpoints.installation import Installation
from apiwrapper.endpoints.invoice import Invoice
from apiwrapper.endpoints.master_card_action import MasterCardAction
from apiwrapper.endpoints.monetary_account import MonetaryAccount
from apiwrapper.endpoints.payment import Payment
from apiwrapper.endpoints.permitted_ip import PermittedIp
from apiwrapper.endpoints.request_inquiry import RequestInquiry
from apiwrapper.endpoints.schedule import Schedule
from apiwrapper.endpoints.scheduled_payment import ScheduledPayment
from apiwrapper.endpoints.server_public_key import ServerPublicKey
from apiwrapper.endpoints.session_server import SessionServer
from apiwrapper.endpoints.share_invite_bank_inquiry import \
ShareInviteBankInquiry
from apiwrapper.endpoints.share_invite_bank_response import \
ShareInviteBankResponse
from apiwrapper.endpoints.tab_attachment import TabAttachment
from apiwrapper.endpoints.tab_usage import TabUsage
from apiwrapper.endpoints.user import User
and any relevant context from other files:
# Path: apiwrapper/endpoints/customer_statement.py
# class CustomerStatement(Endpoint):
#
# __endpoint_customer_statement = "customer-statement"
# __endpoint_customer_statement_content = "content"
#
# @classmethod
# def _get_base_endpoint(cls, user_id, account_id, statement_id=None):
# endpoint = MonetaryAccount._get_base_endpoint(user_id, account_id)
# endpoint += "/%s" % cls.__endpoint_customer_statement
# if statement_id is not None:
# endpoint += "/%d" % statement_id
# return endpoint
#
# def get_all_customer_statements_for_account(self, user_id, account_id):
# endpoint = self._get_base_endpoint(user_id, account_id)
#
# return self._make_get_request(endpoint)
#
# def get_customer_statement_by_id(self, user_id, account_id, statement_id):
# endpoint = self._get_base_endpoint(user_id, account_id, statement_id)
#
# return self._make_get_request(endpoint)
#
# def get_content_of_customer_statement(self, user_id, account_id,
# statement_id):
# endpoint = self._get_base_endpoint(user_id, account_id, statement_id)
# endpoint += "/%s" % self.__endpoint_customer_statement_content
#
# return self._make_get_request(endpoint)
. Output only the next line. | self.customer_statement = CustomerStatement(api_client) |
Here is a snippet: <|code_start|>
class CustomerStatementTest(EndpointTest):
__base_endpoint_url = "/user/%d/monetary-account/%d/customer-statement"
@property
def _base_endpoint(self):
return self.__base_endpoint_url % (self.random_id, self.random_id)
def setUp(self):
<|code_end|>
. Write the next line using the current file imports:
from apiwrapper.endpoints.customer_statement import CustomerStatement
from tests.endpoints.test_endpoint import EndpointTest
and context from other files:
# Path: apiwrapper/endpoints/customer_statement.py
# class CustomerStatement(Endpoint):
#
# __endpoint_customer_statement = "customer-statement"
# __endpoint_customer_statement_content = "content"
#
# @classmethod
# def _get_base_endpoint(cls, user_id, account_id, statement_id=None):
# endpoint = MonetaryAccount._get_base_endpoint(user_id, account_id)
# endpoint += "/%s" % cls.__endpoint_customer_statement
# if statement_id is not None:
# endpoint += "/%d" % statement_id
# return endpoint
#
# def get_all_customer_statements_for_account(self, user_id, account_id):
# endpoint = self._get_base_endpoint(user_id, account_id)
#
# return self._make_get_request(endpoint)
#
# def get_customer_statement_by_id(self, user_id, account_id, statement_id):
# endpoint = self._get_base_endpoint(user_id, account_id, statement_id)
#
# return self._make_get_request(endpoint)
#
# def get_content_of_customer_statement(self, user_id, account_id,
# statement_id):
# endpoint = self._get_base_endpoint(user_id, account_id, statement_id)
# endpoint += "/%s" % self.__endpoint_customer_statement_content
#
# return self._make_get_request(endpoint)
#
# Path: tests/endpoints/test_endpoint.py
# class EndpointTest(TestCase):
# @staticmethod
# def create_random_privkey():
# private_key = rsa.generate_private_key(
# public_exponent=65537,
# key_size=2048,
# backend=default_backend()
# )
#
# return private_key.private_bytes(
# encoding=serialization.Encoding.PEM,
# format=serialization.PrivateFormat.PKCS8,
# encryption_algorithm=serialization.NoEncryption()
# ).decode()
#
# random_privkey = create_random_privkey.__func__()
#
# def setUp(self, testing_class):
# self.api_client = ApiClient(self.random_privkey, str(uuid.uuid4()))
# self.test_class = testing_class(self.api_client)
# self.test_class._make_get_request = MagicMock(
# side_effect=self.side_effect_get)
# self.test_class._make_post_request = MagicMock(
# side_effect=self.side_effect_post)
# self.random_id = random.randint(1, 10000)
# self.random_uuid = uuid.uuid4()
#
# def side_effect_get(self, endpoint):
# return endpoint
#
# def side_effect_post(self, endpoint, payload):
# return endpoint, payload
#
# def assert_parameters(self, should, was):
# assert (was == should), \
# "Should be: %s but was: %s" % (should, was)
#
# @property
# def _base_endpoint(self):
# return None
, which may include functions, classes, or code. Output only the next line. | super().setUp(CustomerStatement) |
Given the following code snippet before the placeholder: <|code_start|>
A = -0.19435
B = 1000.41
C = 522463
T = 0.005
f = lambda z: z**2 + A*z + B*exp(-T*z) + C
df = lambda z: 2*z + A - B*T*exp(-T*z)
<|code_end|>
, predict the next line using imports from the current file:
from numpy import exp
from cxroots import Rectangle
and context including class names, function names, and sometimes code from other files:
# Path: cxroots/contours/Rectangle.py
# class Rectangle(Contour):
# """
# A positively oriented rectangle in the complex plane.
#
# Parameters
# ----------
# xRange : tuple
# Tuple of length two giving the range of the rectangle along the
# real axis.
# yRange : tuple
# Tuple of length two giving the range of the rectangle along the
# imaginary axis.
#
# Examples
# --------
# .. plot::
# :include-source:
#
# from cxroots import Rectangle
# rect = Rectangle(xRange=(-2, 2), yRange=(-1, 1))
# rect.show()
# """
#
# def __init__(self, xRange, yRange):
# self.xRange = xRange
# self.yRange = yRange
# self.axisName = ("x", "y")
#
# self.z1 = z1 = self.xRange[0] + 1j * self.yRange[0]
# self.z2 = z2 = self.xRange[1] + 1j * self.yRange[0]
# self.z3 = z3 = self.xRange[1] + 1j * self.yRange[1]
# self.z4 = z4 = self.xRange[0] + 1j * self.yRange[1]
#
# segments = [
# ComplexLine(z1, z2),
# ComplexLine(z2, z3),
# ComplexLine(z3, z4),
# ComplexLine(z4, z1),
# ]
# super(Rectangle, self).__init__(segments)
#
# def __str__(self):
# return "Rectangle: vertices = {z1.real:.3f}{z1.imag:+.3f}i, {z2.real:.3f}{z2.imag:+.3f}i, {z3.real:.3f}{z3.imag:+.3f}i, {z4.real:.3f}{z4.imag:+.3f}i".format(
# z1=self.z1, z2=self.z2, z3=self.z3, z4=self.z4
# )
#
# @property
# def centralPoint(self):
# # get the central point within the contour
# x = (self.xRange[0] + self.xRange[1]) / 2
# y = (self.yRange[0] + self.yRange[1]) / 2
# return x + 1j * y
#
# @property
# def area(self):
# return (self.xRange[1] - self.xRange[0]) * (self.yRange[1] - self.yRange[0])
#
# def contains(self, z):
# """ Returns True if the point z lies within the contour, False if otherwise """
# return (
# self.xRange[0] < z.real < self.xRange[1]
# and self.yRange[0] < z.imag < self.yRange[1]
# )
#
# def subdivide(self, axis, divisionFactor=0.5):
# """
# Subdivide the contour
#
# Parameters
# ----------
# axis : str, can be either 'x' or 'y'
# The axis along which the line subdividing the contour is a
# constant.
# divisionFactor : float in range (0,1), optional
# Determines the point along 'axis' at which the line dividing
# the contour is placed.
#
# Returns
# -------
# box1 : Rectangle
# If axis is 'x' then box1 has the same yRange and minimum value of xRange as the
# original Rectangle but the maximum xRange is determined by the divisionFactor.
# If axis is 'y' then box1 has the same xRange and minimum value of yRange as the
# original Rectangle but the maximum yRange is determined by the divisionFactor.
# box2 : Rectangle
# If axis is 'x' then box2 has the same yRange and maximum value of xRange as the
# original Rectangle but the minimum xRange is equal to the maximum xRange of box1.
# If axis is 'x' then box2 has the same xRange and maximum value of yRange as the
# original Rectangle but the minimum yRange is equal to the maximum yRange of box1.
# """
# if axis == "x" or self.axisName[axis] == "x":
# midpoint = self.xRange[0] + divisionFactor * (
# self.xRange[1] - self.xRange[0]
# )
# box1 = Rectangle([self.xRange[0], midpoint], self.yRange)
# box2 = Rectangle([midpoint, self.xRange[1]], self.yRange)
#
# box1.segments[3] = self.segments[3]
# box2.segments[1] = self.segments[1]
# box1.segments[1]._reversePath = box2.segments[3]
# box2.segments[3]._reversePath = box1.segments[1]
#
# elif axis == "y" or self.axisName[axis] == "y":
# midpoint = self.yRange[0] + divisionFactor * (
# self.yRange[1] - self.yRange[0]
# )
# box1 = Rectangle(self.xRange, [self.yRange[0], midpoint])
# box2 = Rectangle(self.xRange, [midpoint, self.yRange[1]])
#
# box1.segments[0] = self.segments[0]
# box2.segments[2] = self.segments[2]
# box1.segments[2]._reversePath = box2.segments[0]
# box2.segments[0]._reversePath = box1.segments[2]
#
# for box in [box1, box2]:
# box._createdBySubdivisionAxis = axis
# box._parentBox = self
# self._childBoxes = [box1, box2]
#
# return box1, box2
#
# def randomPoint(self):
# """Returns a random point inside the contour of the Rectangle."""
# x = np.random.uniform(*self.xRange)
# y = np.random.uniform(*self.yRange)
# return x + 1j * y
. Output only the next line. | rectangle = Rectangle([-15000,5000], [-15000,15000]) |
Using the snippet: <|code_start|>
def test_newton():
# result from keisan online calculator: http://keisan.casio.com/exec/system/1244946907
f = lambda x: cos(x) - x
df = lambda x: -sin(x) - 1
iterations = []
callback = lambda x, dx, y, iteration: iterations.append(x)
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from cxroots.IterativeMethods import newton
from numpy import pi, cos, sin
and context (class names, function names, or code) available:
# Path: cxroots/IterativeMethods.py
# def newton(
# x0,
# f,
# df,
# steptol=1e-12,
# roottol=1e-12,
# maxIter=20,
# attemptBest=False,
# verbose=False,
# callback=None,
# ):
# """
# Find an approximation to a point xf such that f(xf)=0 for a
# scalar function f using Newton-Raphson iteration starting at
# the point x0.
#
# Parameters
# ----------
# x0 : float or complex
# Initial point for Newton iteration, should be as close as
# possible to a root of f
# f : function
# Function of a single variable which we seek to find a root of.
# df : function
# Function of a single variable, df(x), providing the
# derivative of the function f(x) at the point x
# steptol: float, optional
# The routine ends if the step size, dx, between sucessive
# iterations satisfies abs(dx) < steptol and attemptBest is False.
# roottol: float, optional
# The routine ends if abs(f(x)) < roottol and attemptBest is False.
# maxIter : int, optional
# The routine ends after maxIter iterations.
# attemptBest : bool, optional
# If True then routine ends if the error of the previous iteration,
# x0, was at least as good as the current iteration, x, in the
# sense that abs(f(x)) >= abs(f(x0)) and the previous iteration
# satisfied either abs(dx0) < steptol or abs(f(x0)) < roottol. In
# this case the previous iteration is returned as the approximation
# of the root.
# verbose : bool, optional
# Print x, dx and f(x) at each step of the iteration.
# callback : function, optional
# After each iteration callback(x, dx, f(x), iteration) will be
# called where 'x' is the current iteration of the estimated root,
# 'dx' is the step size between the previous and current 'x' and
# 'iteration' the number of iterations that have been taken. If
# the callback function evaluates to True then the routine will end.
#
# Returns
# -------
# complex
# The approximation to a root of f.
# float
# abs(f(x)) where x is the final approximation for the root of f.
# """
# x, y = x0, f(x0)
# dx0, y0 = inf, y
# for iteration in range(maxIter):
# dx = -y / df(x)
# x += dx
# y = f(x)
#
# if verbose:
# print("x", x, "f(x)", y, "dx", dx)
#
# if callback is not None and callback(x, dx, y, iteration + 1):
# break
#
# if not attemptBest and (abs(dx) < steptol or abs(y) < roottol):
# break
#
# if (
# attemptBest
# and (abs(dx0) < steptol or abs(y0) < roottol)
# and abs(y) > abs(y0)
# ):
# break
#
# if attemptBest:
# # store previous dx and y
# dx0, y0 = dx, y
#
# if verbose:
# print("Final approximation: x=", x, "|f(x)|=", abs(y))
#
# return x, abs(y)
. Output only the next line. | x, err = newton(pi / 4, f, df, callback=callback) |
Next line prediction: <|code_start|>
def test_muller():
f = lambda x: cos(x) - x ** 2 + 1j * x ** 3
iterations = []
callback = lambda x, dx, y, iteration: iterations.append(x)
<|code_end|>
. Use current file imports:
(import pytest
from cxroots.IterativeMethods import muller
from numpy import pi, cos)
and context including class names, function names, or small code snippets from other files:
# Path: cxroots/IterativeMethods.py
# def muller(
# x1,
# x2,
# x3,
# f,
# steptol=1e-12,
# roottol=1e-12,
# maxIter=20,
# attemptBest=False,
# verbose=False,
# callback=None,
# ):
# """
# A wrapper for mpmath's implementation of Muller's method.
#
# Parameters
# ----------
# x1 : float or complex
# An initial point for iteration, should be close to a root of f.
# x2 : float or complex
# An initial point for iteration, should be close to a root of f.
# Should not equal x1.
# x3 : float or complex
# An initial point for iteration, should be close to a root of f.
# Should not equal x1 or x2.
# f : function
# Function of a single variable which we seek to find a root of.
# steptol: float, optional
# The routine ends if the step size, dx, between sucessive
# iterations satisfies abs(dx) < steptol and attemptBest is False.
# roottol: float, optional
# The routine ends if abs(f(x)) < roottol and attemptBest is False.
# maxIter : int, optional
# The routine ends after maxIter iterations.
# attemptBest : bool, optional
# If True then routine ends if the error of the previous iteration,
# x0, was at least as good as the current iteration, x, in the
# sense that abs(f(x)) >= abs(f(x0)) and the previous iteration
# satisfied either abs(dx0) < steptol or abs(f(x0)) < roottol. In
# this case the previous iteration is returned as the approximation
# of the root.
# verbose : bool, optional
# Print x, dx and f(x) at each step of the iteration.
# callback : function, optional
# After each iteration callback(x, dx, f(x), iteration) will be
# called where 'x' is the current iteration of the estimated root,
# 'dx' is the step size between the previous and current 'x' and
# 'iteration' the number of iterations that have been taken. If
# the callback function evaluates to True then the routine will end.
#
# Returns
# -------
# complex
# The approximation to a root of f.
# float
# abs(f(x)) where x is the final approximation for the root of f.
# """
# from mpmath import mp, mpmathify
# from mpmath.calculus.optimization import Muller
#
# # mpmath insists on functions accepting mpc
# f_mpmath = lambda z: mpmathify(f(complex(z)))
#
# mull = Muller(mp, f_mpmath, (x1, x2, x3), verbose=False)
# iteration = 0
# x0 = x3
#
# x, err = x0, abs(f(x0))
# err0, dx0 = inf, inf
# try:
# for x, dx in mull:
# err = abs(f_mpmath(x))
#
# if verbose:
# print(iteration, "x", x, "|f(x)|", err, "dx", dx)
#
# if callback is not None and callback(x, dx, err, iteration + 1):
# break
#
# if (
# not attemptBest
# and (abs(dx) < steptol or err < roottol)
# or iteration > maxIter
# ):
# break
#
# if attemptBest and (abs(dx0) < steptol or err0 < roottol) and err >= err0:
# # The previous iteration was a better appproximation the current one so
# # assume that that was as close to the root as we are going to get.
# x, err = x0, err0
# break
#
# iteration += 1
# x0 = x
#
# if attemptBest:
# # record previous error for comparison
# dx0, err0 = dx, err
#
# except ZeroDivisionError:
# # ZeroDivisionError comes up if the error is evaluated to be zero
# pass
#
# if verbose:
# print("Final approximation: x=", complex(x), "|f(x)|=", float(err))
#
# # cast mpc and mpf back to regular complex and float
# return complex(x), float(err)
. Output only the next line. | x, err = muller(0.5, pi / 4, 0.6, f) |
Given snippet: <|code_start|>
def test_secant():
# example from Table 2.5 of "Numerical Analysis" by Richard L. Burden, J. Douglas Faires
f = lambda x: cos(x) - x
df = lambda x: -sin(x) - 1
iterations = []
callback = lambda x, dx, y, iteration: iterations.append(x)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from cxroots.IterativeMethods import secant
from numpy import pi, cos, sin
and context:
# Path: cxroots/IterativeMethods.py
# def secant(x1, x2, f, steptol=1e-12, roottol=1e-12, maxIter=30, callback=None):
# """
# Find an approximation to a point xf such that f(xf)=0 for a
# scalar function f using the secant method. The method requires
# two initial points x1 and x2, ideally close to a root,
# and proceeds iteratively.
#
# Parameters
# ----------
# x1 : float or complex
# An initial point for iteration, should be close to a
# root of f.
# x2 : float or complex
# An initial point for iteration, should be close to a
# root of f. Should not equal x1.
# f : function
# Function of a single variable which we seek to find a root of.
# steptol: float, optional
# The routine ends if the step size, dx, between sucessive
# iterations satisfies abs(dx) < steptol and attemptBest is False.
# roottol: float, optional
# The routine ends if abs(f(x)) < roottol and attemptBest is False.
# maxIter : int, optional
# The routine ends after maxIter iterations.
# callback : function, optional
# After each iteration callback(x, dx, f(x), iteration) will be
# called where 'x' is the current iteration of the estimated root,
# 'dx' is the step size between the previous and current 'x' and
# 'iteration' the number of iterations that have been taken. If
# the callback function evaluates to True then the routine will end.
#
# Returns
# -------
# complex
# The approximation to a root of f.
# float
# abs(f(x)) where x is the final approximation for the root of f.
# """
# # As in "Numerical Recipies 3rd Edition" pick the bound with the
# # smallest function value as the most recent guess
# y1, y2 = f(x1), f(x2)
# if abs(y1) < abs(y2):
# x1, x2 = x2, x1
# y1, y2 = y2, y1
#
# for iteration in range(maxIter):
# dx = -(x2 - x1) * y2 / (y2 - y1)
# x1, x2 = x2, x2 + dx
# y1, y2 = y2, f(x2)
#
# if callback is not None and callback(x2, dx, y2, iteration + 1):
# break
#
# if abs(dx) < steptol or abs(y2) < roottol:
# break
#
# return x2, abs(y2)
which might include code, classes, or functions. Output only the next line. | x, err = secant(0.5, pi / 4, f, callback=callback) |
Predict the next line after this snippet: <|code_start|>
@pytest.mark.parametrize(
"symmetry",
[
pytest.param(lambda z: [z.conjugate()], id="right_symmetry"),
pytest.param(lambda z: [z + 1], id="wrong_symmetry"),
],
)
def test_guess_symmetry_1(symmetry):
<|code_end|>
using the current file's imports:
import pytest
import numpy as np
from numpy import exp, sin, cos
from cxroots import Circle
from cxroots.tests.ApproxEqual import roots_approx_equal
and any relevant context from other files:
# Path: cxroots/contours/Circle.py
# class Circle(Contour):
# """
# A positively oriented circle in the complex plane.
#
# Parameters
# ----------
# center : complex
# The center of the circle.
# radius : float
# The radius of the circle.
#
# Examples
# --------
# .. plot::
# :include-source:
#
# from cxroots import Circle
# circle = Circle(center=1, radius=0.5)
# circle.show()
# """
#
# def __init__(self, center, radius):
# self.center = center
# self.radius = radius
# self.axisName = "r"
#
# segments = [ComplexArc(center, radius, 0, 2 * pi)]
# super(Circle, self).__init__(segments)
#
# def __str__(self):
# return "Circle: center={center.real:.3f}{center.imag:+.3f}i, radius={radius:.3f}".format(
# center=self.center, radius=self.radius
# )
#
# def contains(self, z):
# """ Returns True if the point z lies within the contour, False if otherwise """
# return abs(z - self.center) < self.radius
#
# @property
# def centralPoint(self):
# return self.center
#
# @property
# def area(self):
# return pi * self.radius ** 2
#
# def subdivide(self, axis="r", divisionFactor=0.5):
# """
# Subdivide the contour
#
# Parameters
# ----------
# axis : str, can only be 'r' (argument kept for consistency with 'subdivisions' method in parent Contour class)
# The axis along which the line subdividing the contour is a constant.
# divisionFactor : float in range (0,1), optional
# Determines the point along 'axis' at which the line dividing the box is placed
#
# Returns
# -------
# box1 : Annulus
# With inner radius determined by the divisionFactor and outer radius equal to that of the original circle
# box2 : Circle
# With radius equal to the inner radius of box1
# """
# if axis == "r" or self.axisName[axis] == "r":
# box1 = Annulus(self.center, [self.radius * divisionFactor, self.radius])
# box2 = Circle(self.center, self.radius * divisionFactor)
# box1.segments[0] = self.segments[0]
# box1.segments[1]._reversePath = box2.segments[0]
# box2.segments[0]._reversePath = box1.segments[1]
#
# for box in [box1, box2]:
# box._createdBySubdivisionAxis = axis
# box._parentBox = self
# self._childBoxes = [box1, box2]
#
# return box1, box2
#
# def randomPoint(self):
# """ Returns a random point inside the Circle """
# r = np.random.uniform(0, self.radius)
# phi = np.random.uniform(0, 2 * pi)
# return r * exp(1j * phi) + self.center
#
# Path: cxroots/tests/ApproxEqual.py
# def roots_approx_equal(a, b, decimal=10):
# a_roots, a_multiplicities = a
# b_roots, b_multiplicities = b
#
# a_roots, b_roots = np.array(a_roots), np.array(b_roots)
# a_multiplicities, b_multiplicities = (
# np.array(a_multiplicities),
# np.array(b_multiplicities),
# )
#
# for ai, a_element in np.ndenumerate(a_roots):
# bi = np.argmin(np.abs(b_roots - a_element))
# b_roots[ai], b_roots[bi] = b_roots[bi], b_roots[ai]
# b_multiplicities[ai], b_multiplicities[bi] = (
# b_multiplicities[bi],
# b_multiplicities[ai],
# )
#
# np.testing.assert_almost_equal(a_roots, b_roots, decimal)
# np.testing.assert_almost_equal(a_multiplicities, b_multiplicities, decimal)
. Output only the next line. | C = Circle(0, 3) |
Given the following code snippet before the placeholder: <|code_start|>
@pytest.mark.parametrize(
"symmetry",
[
pytest.param(lambda z: [z.conjugate()], id="right_symmetry"),
pytest.param(lambda z: [z + 1], id="wrong_symmetry"),
],
)
def test_guess_symmetry_1(symmetry):
C = Circle(0, 3)
f = lambda z: z ** 4 + z ** 3 + z ** 2 + z
roots = [0, -1, 1j, -1j]
multiplicities = [1, 1, 1, 1]
<|code_end|>
, predict the next line using imports from the current file:
import pytest
import numpy as np
from numpy import exp, sin, cos
from cxroots import Circle
from cxroots.tests.ApproxEqual import roots_approx_equal
and context including class names, function names, and sometimes code from other files:
# Path: cxroots/contours/Circle.py
# class Circle(Contour):
# """
# A positively oriented circle in the complex plane.
#
# Parameters
# ----------
# center : complex
# The center of the circle.
# radius : float
# The radius of the circle.
#
# Examples
# --------
# .. plot::
# :include-source:
#
# from cxroots import Circle
# circle = Circle(center=1, radius=0.5)
# circle.show()
# """
#
# def __init__(self, center, radius):
# self.center = center
# self.radius = radius
# self.axisName = "r"
#
# segments = [ComplexArc(center, radius, 0, 2 * pi)]
# super(Circle, self).__init__(segments)
#
# def __str__(self):
# return "Circle: center={center.real:.3f}{center.imag:+.3f}i, radius={radius:.3f}".format(
# center=self.center, radius=self.radius
# )
#
# def contains(self, z):
# """ Returns True if the point z lies within the contour, False if otherwise """
# return abs(z - self.center) < self.radius
#
# @property
# def centralPoint(self):
# return self.center
#
# @property
# def area(self):
# return pi * self.radius ** 2
#
# def subdivide(self, axis="r", divisionFactor=0.5):
# """
# Subdivide the contour
#
# Parameters
# ----------
# axis : str, can only be 'r' (argument kept for consistency with 'subdivisions' method in parent Contour class)
# The axis along which the line subdividing the contour is a constant.
# divisionFactor : float in range (0,1), optional
# Determines the point along 'axis' at which the line dividing the box is placed
#
# Returns
# -------
# box1 : Annulus
# With inner radius determined by the divisionFactor and outer radius equal to that of the original circle
# box2 : Circle
# With radius equal to the inner radius of box1
# """
# if axis == "r" or self.axisName[axis] == "r":
# box1 = Annulus(self.center, [self.radius * divisionFactor, self.radius])
# box2 = Circle(self.center, self.radius * divisionFactor)
# box1.segments[0] = self.segments[0]
# box1.segments[1]._reversePath = box2.segments[0]
# box2.segments[0]._reversePath = box1.segments[1]
#
# for box in [box1, box2]:
# box._createdBySubdivisionAxis = axis
# box._parentBox = self
# self._childBoxes = [box1, box2]
#
# return box1, box2
#
# def randomPoint(self):
# """ Returns a random point inside the Circle """
# r = np.random.uniform(0, self.radius)
# phi = np.random.uniform(0, 2 * pi)
# return r * exp(1j * phi) + self.center
#
# Path: cxroots/tests/ApproxEqual.py
# def roots_approx_equal(a, b, decimal=10):
# a_roots, a_multiplicities = a
# b_roots, b_multiplicities = b
#
# a_roots, b_roots = np.array(a_roots), np.array(b_roots)
# a_multiplicities, b_multiplicities = (
# np.array(a_multiplicities),
# np.array(b_multiplicities),
# )
#
# for ai, a_element in np.ndenumerate(a_roots):
# bi = np.argmin(np.abs(b_roots - a_element))
# b_roots[ai], b_roots[bi] = b_roots[bi], b_roots[ai]
# b_multiplicities[ai], b_multiplicities[bi] = (
# b_multiplicities[bi],
# b_multiplicities[ai],
# )
#
# np.testing.assert_almost_equal(a_roots, b_roots, decimal)
# np.testing.assert_almost_equal(a_multiplicities, b_multiplicities, decimal)
. Output only the next line. | roots_approx_equal( |
Here is a snippet: <|code_start|>
# have a common seed for each testing process
today = date.today()
np.random.seed(today.year * today.month * today.day)
@pytest.mark.parametrize("a", uniform(-10, 10, size=3) + 1j * uniform(-10, 10, size=3))
@pytest.mark.parametrize("b", uniform(-10, 10, size=3) + 1j * uniform(-10, 10, size=3))
@pytest.mark.parametrize("P", uniform(-10, 10, size=3) + 1j * uniform(-10, 10, size=3))
def test_distance_line(a, b, P):
t = np.linspace(0, 1, 100001)
<|code_end|>
. Write the next line using the current file imports:
import pytest
import numpy as np
from numpy import pi
from numpy.random import uniform
from cxroots.Paths import ComplexArc, ComplexLine
from datetime import date
and context from other files:
# Path: cxroots/Paths.py
# class ComplexArc(ComplexPath):
# r"""
# A circular arc :math:`z` with center z0, radius R, initial angle t0
# and change of angle dt. The arc is parameterised by
#
# ..math::
#
# z(t) = R e^{i(t0 + t dt)} + z0, \quad 0\leq t \leq 1
#
# Parameters
# ----------
# z0 : complex
# R : float
# t0 : float
# dt : float
# """
#
# def __init__(self, z0, R, t0, dt):
# self.z0, self.R, self.t0, self.dt = z0, R, t0, dt
# self.dzdt = lambda t: 1j * self.dt * self.R * exp(1j * (self.t0 + t * self.dt))
# super(ComplexArc, self).__init__()
#
# def __str__(self):
# return "ComplexArc: z0=%.3f, R=%.3f, t0=%.3f, dt=%.3f" % (
# self.z0,
# self.R,
# self.t0,
# self.dt,
# )
#
# def __call__(self, t):
# r"""
# The function :math:`z(t) = R e^{i(t_0 + t dt)} + z_0`.
#
# Parameters
# ----------
# t : float
# A real number :math:`0\leq t \leq 1`.
#
# Returns
# -------
# complex
# A point on the arc in the complex plane.
# """
# return self.R * exp(1j * (self.t0 + t * self.dt)) + self.z0
#
# def distance(self, z):
# """
# Distance from the point z to the closest point on the arc.
#
# Parameters
# ----------
# z : complex
#
# Returns
# -------
# float
# The distance from z to the point on the arc which is closest
# to z.
# """
# theta = np.angle(z - self.z0) # np.angle maps to (-pi,pi]
# theta = (theta - self.t0) % (2 * pi) + self.t0 # put theta in [t0,t0+2pi)
#
# if (self.dt > 0 and self.t0 < theta < self.t0 + self.dt) or (
# self.dt < 0 and self.t0 + self.dt < theta - 2 * pi < self.t0
# ):
# # the closest point to z lies on the arc
# return abs(self.R * exp(1j * theta) + self.z0 - z)
# else:
# # the closest point to z is one of the endpoints
# return min(abs(self(0) - z), abs(self(1) - z))
#
# class ComplexLine(ComplexPath):
# r"""
# A straight line :math:`z` in the complex plane from a to b
# parameterised by
#
# ..math::
#
# z(t) = a + (b-a)t, \quad 0\leq t \leq 1
#
#
# Parameters
# ----------
# a : float
# b : float
# """
#
# def __init__(self, a, b):
# self.a, self.b = a, b
# self.dzdt = lambda t: self.b - self.a
# super(ComplexLine, self).__init__()
#
# def __str__(self):
# return "ComplexLine from %.3f+%.3fi to %.3f+%.3fi" % (
# self.a.real,
# self.a.imag,
# self.b.real,
# self.b.imag,
# )
#
# def __call__(self, t):
# r"""
# The function :math:`z(t) = a + (b-a)t`.
#
# Parameters
# ----------
# t : float
# A real number :math:`0\leq t \leq 1`.
#
# Returns
# -------
# complex
# A point on the line in the complex plane.
# """
# return self.a + t * (self.b - self.a)
#
# def distance(self, z):
# """
# Distance from the point z to the closest point on the line.
#
# Parameters
# ----------
# z : complex
#
# Returns
# -------
# float
# The distance from z to the point on the line which is
# closest to z.
# """
# # convert complex numbers to vectors
# A = np.array([self.a.real, self.a.imag])
# B = np.array([self.b.real, self.b.imag])
# Z = np.array([z.real, z.imag])
#
# # the projection of the point z onto the line a -> b is where
# # the parameter t is
# t = (Z - A).dot(B - A) / abs((B - A).dot(B - A))
#
# # but the line segment only has 0 <= t <= 1
# t = t.clip(0, 1)
#
# # so the point on the line segment closest to z is
# c = self(t)
# return abs(c - z)
, which may include functions, classes, or code. Output only the next line. | C = ComplexLine(a, b) |
Continue the code snippet: <|code_start|>
@pytest.mark.parametrize("useDerivative", [True, False])
def test_count_roots(useDerivative):
"""
Example from "Locating all the Zeros of an Analytic Function in one Complex Variable"
M.Dellnitz, O.Schutze, Q.Zheng, J. Compu. and App. Math. (2002), Vol.138, Issue 2
There should be 424 roots inside this contour
"""
<|code_end|>
. Use current file imports:
import pytest
import numpy as np
from numpy import cos, sin
from cxroots import Rectangle
from cxroots import Rectangle, findRoots
and context (classes, functions, or code) from other files:
# Path: cxroots/contours/Rectangle.py
# class Rectangle(Contour):
# """
# A positively oriented rectangle in the complex plane.
#
# Parameters
# ----------
# xRange : tuple
# Tuple of length two giving the range of the rectangle along the
# real axis.
# yRange : tuple
# Tuple of length two giving the range of the rectangle along the
# imaginary axis.
#
# Examples
# --------
# .. plot::
# :include-source:
#
# from cxroots import Rectangle
# rect = Rectangle(xRange=(-2, 2), yRange=(-1, 1))
# rect.show()
# """
#
# def __init__(self, xRange, yRange):
# self.xRange = xRange
# self.yRange = yRange
# self.axisName = ("x", "y")
#
# self.z1 = z1 = self.xRange[0] + 1j * self.yRange[0]
# self.z2 = z2 = self.xRange[1] + 1j * self.yRange[0]
# self.z3 = z3 = self.xRange[1] + 1j * self.yRange[1]
# self.z4 = z4 = self.xRange[0] + 1j * self.yRange[1]
#
# segments = [
# ComplexLine(z1, z2),
# ComplexLine(z2, z3),
# ComplexLine(z3, z4),
# ComplexLine(z4, z1),
# ]
# super(Rectangle, self).__init__(segments)
#
# def __str__(self):
# return "Rectangle: vertices = {z1.real:.3f}{z1.imag:+.3f}i, {z2.real:.3f}{z2.imag:+.3f}i, {z3.real:.3f}{z3.imag:+.3f}i, {z4.real:.3f}{z4.imag:+.3f}i".format(
# z1=self.z1, z2=self.z2, z3=self.z3, z4=self.z4
# )
#
# @property
# def centralPoint(self):
# # get the central point within the contour
# x = (self.xRange[0] + self.xRange[1]) / 2
# y = (self.yRange[0] + self.yRange[1]) / 2
# return x + 1j * y
#
# @property
# def area(self):
# return (self.xRange[1] - self.xRange[0]) * (self.yRange[1] - self.yRange[0])
#
# def contains(self, z):
# """ Returns True if the point z lies within the contour, False if otherwise """
# return (
# self.xRange[0] < z.real < self.xRange[1]
# and self.yRange[0] < z.imag < self.yRange[1]
# )
#
# def subdivide(self, axis, divisionFactor=0.5):
# """
# Subdivide the contour
#
# Parameters
# ----------
# axis : str, can be either 'x' or 'y'
# The axis along which the line subdividing the contour is a
# constant.
# divisionFactor : float in range (0,1), optional
# Determines the point along 'axis' at which the line dividing
# the contour is placed.
#
# Returns
# -------
# box1 : Rectangle
# If axis is 'x' then box1 has the same yRange and minimum value of xRange as the
# original Rectangle but the maximum xRange is determined by the divisionFactor.
# If axis is 'y' then box1 has the same xRange and minimum value of yRange as the
# original Rectangle but the maximum yRange is determined by the divisionFactor.
# box2 : Rectangle
# If axis is 'x' then box2 has the same yRange and maximum value of xRange as the
# original Rectangle but the minimum xRange is equal to the maximum xRange of box1.
# If axis is 'x' then box2 has the same xRange and maximum value of yRange as the
# original Rectangle but the minimum yRange is equal to the maximum yRange of box1.
# """
# if axis == "x" or self.axisName[axis] == "x":
# midpoint = self.xRange[0] + divisionFactor * (
# self.xRange[1] - self.xRange[0]
# )
# box1 = Rectangle([self.xRange[0], midpoint], self.yRange)
# box2 = Rectangle([midpoint, self.xRange[1]], self.yRange)
#
# box1.segments[3] = self.segments[3]
# box2.segments[1] = self.segments[1]
# box1.segments[1]._reversePath = box2.segments[3]
# box2.segments[3]._reversePath = box1.segments[1]
#
# elif axis == "y" or self.axisName[axis] == "y":
# midpoint = self.yRange[0] + divisionFactor * (
# self.yRange[1] - self.yRange[0]
# )
# box1 = Rectangle(self.xRange, [self.yRange[0], midpoint])
# box2 = Rectangle(self.xRange, [midpoint, self.yRange[1]])
#
# box1.segments[0] = self.segments[0]
# box2.segments[2] = self.segments[2]
# box1.segments[2]._reversePath = box2.segments[0]
# box2.segments[0]._reversePath = box1.segments[2]
#
# for box in [box1, box2]:
# box._createdBySubdivisionAxis = axis
# box._parentBox = self
# self._childBoxes = [box1, box2]
#
# return box1, box2
#
# def randomPoint(self):
# """Returns a random point inside the contour of the Rectangle."""
# x = np.random.uniform(*self.xRange)
# y = np.random.uniform(*self.yRange)
# return x + 1j * y
. Output only the next line. | C = Rectangle([-20.3, 20.7], [-20.3, 20.7]) |
Given snippet: <|code_start|> filt = pa.filters.FilterAP(3, mu=1.)
for k in range(N):
# measure input
x = measure_x()
# predict new value
y = filt.predict(x)
# do the important stuff with prediction output
pass
# measure output
d = measure_d(x)
# update filter
filt.adapt(d, x)
# log values
log_d[k] = d
log_y[k] = y
### show results
plt.figure(figsize=(15,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
plt.plot(log_d,"b", label="d - target")
plt.plot(log_y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]")
plt.legend(); plt.tight_layout(); plt.show()
Code Explanation
======================================
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
from padasip.filters.base_filter import AdaptiveFilterAP
and context:
# Path: padasip/filters/base_filter.py
# class AdaptiveFilterAP(AdaptiveFilter):
# """
# This class modifies the AdaptiveFilter class
# to allow AP filtering.
# """
# def __init__(self, *args, order=5, ifc=0.001, **kwargs):
# """
# **Kwargs:**
#
# * `order` : projection order (integer) - how many input vectors
# are in one input matrix
#
# * `ifc` : initial offset covariance (float) - regularization term
# to prevent problems with inverse matrix
#
# """
# super().__init__(*args, **kwargs)
# self.order = order
# self.x_mem = np.zeros((self.n, self.order))
# self.d_mem = np.zeros(order)
# self.ide_ifc = ifc * np.identity(self.order)
# self.ide = np.identity(self.order)
# self.y_mem = False
# self.e_mem = False
#
# def learning_rule(self, e_mem, x_mem):
# """
# This functions computes the increment of adaptive weights.
#
# **Args:**
#
# * `e_mem` : error of the adaptive filter (1d array)
#
# * `x_mem` : input matrix (2d array)
#
# **Returns**
#
# * increments of adaptive weights - result of adaptation
# """
# return np.zeros(len(x_mem))
#
# def adapt(self, d, x):
# """
# Adapt weights according one desired value and its input.
#
# **Args:**
#
# * `d` : desired value (float)
#
# * `x` : input array (1-dimensional array)
# """
# # create input matrix and target vector
# self.x_mem[:, 1:] = self.x_mem[:, :-1]
# self.x_mem[:, 0] = x
# self.d_mem[1:] = self.d_mem[:-1]
# self.d_mem[0] = d
# # estimate output and error
# self.y_mem = np.dot(self.x_mem.T, self.w)
# self.e_mem = self.d_mem - self.y_mem
# # update
# dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_ifc
# dw_part2 = np.linalg.solve(dw_part1, self.ide)
# dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
# self.w += self.mu * dw
#
# def run(self, d, x):
# """
# This function filters multiple samples in a row.
#
# **Args:**
#
# * `d` : desired value (1 dimensional array)
#
# * `x` : input matrix (2-dimensional array). Rows are samples,
# columns are input arrays.
#
# **Returns:**
#
# * `y` : output value (1 dimensional array).
# The size corresponds with the desired value.
#
# * `e` : filter error for every sample (1 dimensional array).
# The size corresponds with the desired value.
#
# * `w` : history of all weights (2 dimensional array).
# Every row is set of the weights for given sample.
#
# """
# # measure the data and check if the dimmension agree
# N = len(x)
# if not len(d) == N:
# raise ValueError('The length of vector d and matrix x must agree.')
# self.n = len(x[0])
# # prepare data
# try:
# x = np.array(x)
# d = np.array(d)
# except:
# raise ValueError('Impossible to convert x or d to a numpy array')
# # create empty arrays
# y = np.zeros(N)
# e = np.zeros(N)
# self.w_history = np.zeros((N, self.n))
# # adaptation loop
# for k in range(N):
# self.w_history[k, :] = self.w
# # create input matrix and target vector
# self.x_mem[:, 1:] = self.x_mem[:, :-1]
# self.x_mem[:, 0] = x[k]
# self.d_mem[1:] = self.d_mem[:-1]
# self.d_mem[0] = d[k]
# # estimate output and error
# self.y_mem = np.dot(self.x_mem.T, self.w)
# self.e_mem = self.d_mem - self.y_mem
# y[k] = self.y_mem[0]
# e[k] = self.e_mem[0]
# # update
# self.w += self.learning_rule(self.e_mem, self.x_mem)
# return y, e, self.w_history
which might include code, classes, or functions. Output only the next line. | class FilterAP(AdaptiveFilterAP): |
Here is a snippet: <|code_start|> retVal = regOpcode['r16'][self.RM]
elif typ == 'r/m32':
retVal = regOpcode['r32'][self.RM]
else:
raise RuntimeError("Invalid r/m type")
else:
raise RuntimeError("Invalid Mode")
return retVal
def GetDisplacementSize(self):
"We only know this at runtime with real values"
if self.Mode == 0 and self.RM == 5:
return 4
elif self.Mode == 1:
return 1
elif self.Mode == 2:
return 4
else:
return 0
class instruction:
def __init__(self,opstr,inststr,desc):
self.OpcodeString = opstr
self.InstructionString = inststr
self.Description = desc
self.Opcode = []
self.OpcodeSize = 0
self.OpcodeFlags = []
<|code_end|>
. Write the next line using the current file imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
, which may include functions, classes, or code. Output only the next line. | self.InstructionDef = tokenizeInstDef(self.InstructionString) |
Continue the code snippet: <|code_start|> else:
raise RuntimeError("Invalid Displacement size")
if self.Instruction.HasImmediate:
if self.Instruction.ImmediateSize == 1:
first,rest = rest[0],rest[1:]
self.Immediate = struct.unpack("<b",first)[0]
elif self.Instruction.ImmediateSize == 2:
first,rest = rest[:2],rest[2:]
self.Immediate = struct.unpack("<s",first)[0]
elif self.Instruction.ImmediateSize == 4:
first,rest = rest[:4],rest[4:]
self.Immediate = struct.unpack('<l',first)[0]
else:
raise RuntimeError("Invalid Immdediate size [%s]" % \
self.InstructionImmediateSize)
if rest:
raise RuntimeError("Couldn't unpack all data")
def DataText(self,data,size,skip=False):
retVal = ''
if skip:
return retVal
if size >= 1:
retVal += "%02X " % (data % 0xFF)
return retVal
def LoadConcreteValues(self, toks):
if type(toks) == type(""):
<|code_end|>
. Use current file imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and context (classes, functions, or code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
. Output only the next line. | toks = tokenizeInst(toks) |
Given snippet: <|code_start|> elif self.Instruction.ImmediateSize == 2:
first,rest = rest[:2],rest[2:]
self.Immediate = struct.unpack("<s",first)[0]
elif self.Instruction.ImmediateSize == 4:
first,rest = rest[:4],rest[4:]
self.Immediate = struct.unpack('<l',first)[0]
else:
raise RuntimeError("Invalid Immdediate size [%s]" % \
self.InstructionImmediateSize)
if rest:
raise RuntimeError("Couldn't unpack all data")
def DataText(self,data,size,skip=False):
retVal = ''
if skip:
return retVal
if size >= 1:
retVal += "%02X " % (data % 0xFF)
return retVal
def LoadConcreteValues(self, toks):
if type(toks) == type(""):
toks = tokenizeInst(toks)
logging.info("%s => %s" % (self.Instruction.InstructionString, toks))
tmpModRM = ModRM()
firstDef, restDef = (self.Instruction.InstructionDef[0],self.Instruction.InstructionDef[1:])
firstTok, restTok = toks[0],toks[1:]
while 1:
logging.info("TOK COMPARES: %s => %s" % (firstDef, firstTok))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and context:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
which might include code, classes, or functions. Output only the next line. | if firstDef[0] in (OPCODE, COMMA, REGISTER): |
Here is a snippet: <|code_start|> elif self.Instruction.ImmediateSize == 2:
first,rest = rest[:2],rest[2:]
self.Immediate = struct.unpack("<s",first)[0]
elif self.Instruction.ImmediateSize == 4:
first,rest = rest[:4],rest[4:]
self.Immediate = struct.unpack('<l',first)[0]
else:
raise RuntimeError("Invalid Immdediate size [%s]" % \
self.InstructionImmediateSize)
if rest:
raise RuntimeError("Couldn't unpack all data")
def DataText(self,data,size,skip=False):
retVal = ''
if skip:
return retVal
if size >= 1:
retVal += "%02X " % (data % 0xFF)
return retVal
def LoadConcreteValues(self, toks):
if type(toks) == type(""):
toks = tokenizeInst(toks)
logging.info("%s => %s" % (self.Instruction.InstructionString, toks))
tmpModRM = ModRM()
firstDef, restDef = (self.Instruction.InstructionDef[0],self.Instruction.InstructionDef[1:])
firstTok, restTok = toks[0],toks[1:]
while 1:
logging.info("TOK COMPARES: %s => %s" % (firstDef, firstTok))
<|code_end|>
. Write the next line using the current file imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
, which may include functions, classes, or code. Output only the next line. | if firstDef[0] in (OPCODE, COMMA, REGISTER): |
Predict the next line after this snippet: <|code_start|> elif self.Instruction.ImmediateSize == 2:
first,rest = rest[:2],rest[2:]
self.Immediate = struct.unpack("<s",first)[0]
elif self.Instruction.ImmediateSize == 4:
first,rest = rest[:4],rest[4:]
self.Immediate = struct.unpack('<l',first)[0]
else:
raise RuntimeError("Invalid Immdediate size [%s]" % \
self.InstructionImmediateSize)
if rest:
raise RuntimeError("Couldn't unpack all data")
def DataText(self,data,size,skip=False):
retVal = ''
if skip:
return retVal
if size >= 1:
retVal += "%02X " % (data % 0xFF)
return retVal
def LoadConcreteValues(self, toks):
if type(toks) == type(""):
toks = tokenizeInst(toks)
logging.info("%s => %s" % (self.Instruction.InstructionString, toks))
tmpModRM = ModRM()
firstDef, restDef = (self.Instruction.InstructionDef[0],self.Instruction.InstructionDef[1:])
firstTok, restTok = toks[0],toks[1:]
while 1:
logging.info("TOK COMPARES: %s => %s" % (firstDef, firstTok))
<|code_end|>
using the current file's imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and any relevant context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
. Output only the next line. | if firstDef[0] in (OPCODE, COMMA, REGISTER): |
Here is a snippet: <|code_start|> retVal += "%02X " % (data % 0xFF)
return retVal
def LoadConcreteValues(self, toks):
if type(toks) == type(""):
toks = tokenizeInst(toks)
logging.info("%s => %s" % (self.Instruction.InstructionString, toks))
tmpModRM = ModRM()
firstDef, restDef = (self.Instruction.InstructionDef[0],self.Instruction.InstructionDef[1:])
firstTok, restTok = toks[0],toks[1:]
while 1:
logging.info("TOK COMPARES: %s => %s" % (firstDef, firstTok))
if firstDef[0] in (OPCODE, COMMA, REGISTER):
#TODO: Can we handle this special case better?
# The special case is a m8/16/32 value that is constant
# and doesn't have an RM.
if firstDef[0] == REGISTER and firstDef[1][0] == '[' and \
firstTok[0] == LBRACKET:
firstTok, restTok = restTok[0],restTok[1:]
if firstTok[1] == firstDef[1][1:-1]:
firstTok, restTok = restTok[0],restTok[1:]
else:
raise x86instError("TOKEN MISMATCH '%s' '%s'" % \
(firstDef,firstTok))
elif firstDef[0] != firstTok[0]:
raise x86instError("These should be equal '%s' '%s'" % \
(firstDef, firstTok))
elif firstDef[0] == NUMBER:
if firstTok[0] != NUMBER or firstTok[1] != firstDef[1]:
raise x86instError("INVALID NUMBER '%s'" % repr(firstTok))
<|code_end|>
. Write the next line using the current file imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
, which may include functions, classes, or code. Output only the next line. | elif firstDef[0] == OPERAND: |
Predict the next line after this snippet: <|code_start|> self.Immediate = struct.unpack('<l',first)[0]
else:
raise RuntimeError("Invalid Immdediate size [%s]" % \
self.InstructionImmediateSize)
if rest:
raise RuntimeError("Couldn't unpack all data")
def DataText(self,data,size,skip=False):
retVal = ''
if skip:
return retVal
if size >= 1:
retVal += "%02X " % (data % 0xFF)
return retVal
def LoadConcreteValues(self, toks):
if type(toks) == type(""):
toks = tokenizeInst(toks)
logging.info("%s => %s" % (self.Instruction.InstructionString, toks))
tmpModRM = ModRM()
firstDef, restDef = (self.Instruction.InstructionDef[0],self.Instruction.InstructionDef[1:])
firstTok, restTok = toks[0],toks[1:]
while 1:
logging.info("TOK COMPARES: %s => %s" % (firstDef, firstTok))
if firstDef[0] in (OPCODE, COMMA, REGISTER):
#TODO: Can we handle this special case better?
# The special case is a m8/16/32 value that is constant
# and doesn't have an RM.
if firstDef[0] == REGISTER and firstDef[1][0] == '[' and \
<|code_end|>
using the current file's imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and any relevant context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
. Output only the next line. | firstTok[0] == LBRACKET: |
Continue the code snippet: <|code_start|> if skip:
return retVal
if size >= 1:
retVal += "%02X " % (data % 0xFF)
return retVal
def LoadConcreteValues(self, toks):
if type(toks) == type(""):
toks = tokenizeInst(toks)
logging.info("%s => %s" % (self.Instruction.InstructionString, toks))
tmpModRM = ModRM()
firstDef, restDef = (self.Instruction.InstructionDef[0],self.Instruction.InstructionDef[1:])
firstTok, restTok = toks[0],toks[1:]
while 1:
logging.info("TOK COMPARES: %s => %s" % (firstDef, firstTok))
if firstDef[0] in (OPCODE, COMMA, REGISTER):
#TODO: Can we handle this special case better?
# The special case is a m8/16/32 value that is constant
# and doesn't have an RM.
if firstDef[0] == REGISTER and firstDef[1][0] == '[' and \
firstTok[0] == LBRACKET:
firstTok, restTok = restTok[0],restTok[1:]
if firstTok[1] == firstDef[1][1:-1]:
firstTok, restTok = restTok[0],restTok[1:]
else:
raise x86instError("TOKEN MISMATCH '%s' '%s'" % \
(firstDef,firstTok))
elif firstDef[0] != firstTok[0]:
raise x86instError("These should be equal '%s' '%s'" % \
(firstDef, firstTok))
<|code_end|>
. Use current file imports:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and context (classes, functions, or code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
. Output only the next line. | elif firstDef[0] == NUMBER: |
Given the code snippet: <|code_start|> # and doesn't have an RM.
if firstDef[0] == REGISTER and firstDef[1][0] == '[' and \
firstTok[0] == LBRACKET:
firstTok, restTok = restTok[0],restTok[1:]
if firstTok[1] == firstDef[1][1:-1]:
firstTok, restTok = restTok[0],restTok[1:]
else:
raise x86instError("TOKEN MISMATCH '%s' '%s'" % \
(firstDef,firstTok))
elif firstDef[0] != firstTok[0]:
raise x86instError("These should be equal '%s' '%s'" % \
(firstDef, firstTok))
elif firstDef[0] == NUMBER:
if firstTok[0] != NUMBER or firstTok[1] != firstDef[1]:
raise x86instError("INVALID NUMBER '%s'" % repr(firstTok))
elif firstDef[0] == OPERAND:
if firstDef[1] in ('r/m32','r/m16','r/m8'):
#figure out r/m val
if firstTok[0] == REGISTER:
#figure out r val
registerName = firstTok[1]
registerType = firstDef[1][0] + firstDef[1][3:]
registerVal = regOpcode[registerType].index(registerName)
if registerVal < 0:
raise x86instError("Couldn't resolve register '%s'" % registerName)
else:
tmpModRM.Mode = 3
tmpModRM.RM = registerVal
elif firstTok[0] == LBRACKET:
firstTok, restTok = restTok[0],restTok[1:]
<|code_end|>
, generate the next line using the imports in this file:
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
and context (functions, classes, or occasionally code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
. Output only the next line. | if firstTok[0] in (NUMBER,SYMBOL): |
Given the code snippet: <|code_start|> else: #try runtime resolution, currently windows specific
funcaddress = runtimeResolve(sym)
self.symbols[sym] = funcaddress
return funcaddress
def BindPythonFunctions(self,glb=None,bindFunction=excmem.BindFunctionAddress):
if glb is None:
glb = globals()
for proc in self.cp.CodeSymbols:
if proc[2] == PYTHON:
glb[proc[0]] = bindFunction(proc[1] + self.codeAddr)
def MakeMemory(self,glb=None):
if not glb:
glb = globals()
self.codeAddr = excmem.AllocateExecutableMemory(len(self.cp.Code))
self.dataAddr = excmem.AllocateExecutableMemory(len(self.cp.Data))
self.symbols = {}
for sym in self.cp.CodeSymbols:
self.symbols[sym[0]] = sym[1] + self.codeAddr
for sym in self.cp.DataSymbols:
self.symbols[sym[0]] = sym[1] + self.dataAddr
self.resolvedCode = self.cp.Code # nondestructive on cp
for patch in self.cp.CodePatchins:
if patch[2] == DIRECT:
resolvedAddr = self.LookupAddress(patch[0])
<|code_end|>
, generate the next line using the imports in this file:
from x86inst import RELATIVE,DIRECT
from x86PackUnpack import ulongToString
from x86asm import PYTHON
from sys import dllhandle
import pyasm.excmem as excmem
import logging, sys
import win32api, pywintypes
and context (functions, classes, or occasionally code) from other files:
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
#
# Path: x86PackUnpack.py
# def ulongToString(u): return struct.pack("<L", u)
#
# Path: x86asm.py
# class x86asmError(Exception): pass
# class labelRef:
# class label:
# class labelDict(dict):
# class constDict(dict):
# class data:
# class codePackage:
# class procedure:
# class assembler:
# def possibleDefault(*toks):
# def possibleImmediateOrRelative(*toks):
# def possibleRegister(*toks):
# def possibleIndirect(*toks):
# def getProperLookup(*toks):
# def findBestMatchTokens(toks):
# def findBestMatch(s):
# def printBestMatch(s):
# def __init__(self, name):
# def __init__(self, name,typ=0):
# def __setitem__(self,key,val):
# def __setitem__(self,key,val):
# def __init__(self,name,dat,size=0):
# def __init__(self):
# def __init__(self,name, typ=CDECL):
# def AddArg(self,name,bytes=4):
# def AddLocal(self,name,bytes=4):
# def LookupArg(self,name):
# def LookupLocal(self,name):
# def LookupVar(self, name):
# def EmitProcStartCode(self, a):
# def EmitProcEndCode(self, a):
# def __init__(self):
# def registerLabel(self,lbl):
# def freezeProc(self):
# def AddInstruction(self,inst):
# def AI(self,inst):
# def AddInstructionLabel(self,name,typ=0):
# def AIL(self,name):
# def AddData(self,name,dat):
# def ADStr(self,name,dat):
# def AddProcedure(self,name,typ=STDCALL):
# def AP(self,name,typ=STDCALL):
# def AddArgument(self,name,size=4):
# def AA(self,name,size=4):
# def AddLocal(self,name,size=4):
# def EndProc(self):
# def EP(self):
# def AddConstant(self,name,val):
# def AC(self,name,val):
# def getVarNameAndSize(t,s):
# def PROC(self,params):
# def ARG(self,params):
# def LOCAL(self,params):
# def ENDPROC(self,params):
# def CALL(self,params):
# def CHARS(self,params):
# def COMMENT(self,params):
# def dispatchDirective(self,s):
# def dispatchStatement(self,s):
# def DispatchString(self,s):
# def __call__(self,s):
# def pass1(self):
# def Compile(self):
# def _log_header(text):
# def codePackageFromFile(fil,constCallback=None):
# def manglePythonNames(cp):
# STDCALL, CDECL, PYTHON = range(1,4)
. Output only the next line. | elif patch[2] == RELATIVE: |
Here is a snippet: <|code_start|> if self.symbols.has_key(sym):
return self.symbols[sym]
else: #try runtime resolution, currently windows specific
funcaddress = runtimeResolve(sym)
self.symbols[sym] = funcaddress
return funcaddress
def BindPythonFunctions(self,glb=None,bindFunction=excmem.BindFunctionAddress):
if glb is None:
glb = globals()
for proc in self.cp.CodeSymbols:
if proc[2] == PYTHON:
glb[proc[0]] = bindFunction(proc[1] + self.codeAddr)
def MakeMemory(self,glb=None):
if not glb:
glb = globals()
self.codeAddr = excmem.AllocateExecutableMemory(len(self.cp.Code))
self.dataAddr = excmem.AllocateExecutableMemory(len(self.cp.Data))
self.symbols = {}
for sym in self.cp.CodeSymbols:
self.symbols[sym[0]] = sym[1] + self.codeAddr
for sym in self.cp.DataSymbols:
self.symbols[sym[0]] = sym[1] + self.dataAddr
self.resolvedCode = self.cp.Code # nondestructive on cp
for patch in self.cp.CodePatchins:
<|code_end|>
. Write the next line using the current file imports:
from x86inst import RELATIVE,DIRECT
from x86PackUnpack import ulongToString
from x86asm import PYTHON
from sys import dllhandle
import pyasm.excmem as excmem
import logging, sys
import win32api, pywintypes
and context from other files:
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
#
# Path: x86PackUnpack.py
# def ulongToString(u): return struct.pack("<L", u)
#
# Path: x86asm.py
# class x86asmError(Exception): pass
# class labelRef:
# class label:
# class labelDict(dict):
# class constDict(dict):
# class data:
# class codePackage:
# class procedure:
# class assembler:
# def possibleDefault(*toks):
# def possibleImmediateOrRelative(*toks):
# def possibleRegister(*toks):
# def possibleIndirect(*toks):
# def getProperLookup(*toks):
# def findBestMatchTokens(toks):
# def findBestMatch(s):
# def printBestMatch(s):
# def __init__(self, name):
# def __init__(self, name,typ=0):
# def __setitem__(self,key,val):
# def __setitem__(self,key,val):
# def __init__(self,name,dat,size=0):
# def __init__(self):
# def __init__(self,name, typ=CDECL):
# def AddArg(self,name,bytes=4):
# def AddLocal(self,name,bytes=4):
# def LookupArg(self,name):
# def LookupLocal(self,name):
# def LookupVar(self, name):
# def EmitProcStartCode(self, a):
# def EmitProcEndCode(self, a):
# def __init__(self):
# def registerLabel(self,lbl):
# def freezeProc(self):
# def AddInstruction(self,inst):
# def AI(self,inst):
# def AddInstructionLabel(self,name,typ=0):
# def AIL(self,name):
# def AddData(self,name,dat):
# def ADStr(self,name,dat):
# def AddProcedure(self,name,typ=STDCALL):
# def AP(self,name,typ=STDCALL):
# def AddArgument(self,name,size=4):
# def AA(self,name,size=4):
# def AddLocal(self,name,size=4):
# def EndProc(self):
# def EP(self):
# def AddConstant(self,name,val):
# def AC(self,name,val):
# def getVarNameAndSize(t,s):
# def PROC(self,params):
# def ARG(self,params):
# def LOCAL(self,params):
# def ENDPROC(self,params):
# def CALL(self,params):
# def CHARS(self,params):
# def COMMENT(self,params):
# def dispatchDirective(self,s):
# def dispatchStatement(self,s):
# def DispatchString(self,s):
# def __call__(self,s):
# def pass1(self):
# def Compile(self):
# def _log_header(text):
# def codePackageFromFile(fil,constCallback=None):
# def manglePythonNames(cp):
# STDCALL, CDECL, PYTHON = range(1,4)
, which may include functions, classes, or code. Output only the next line. | if patch[2] == DIRECT: |
Next line prediction: <|code_start|> for proc in self.cp.CodeSymbols:
if proc[2] == PYTHON:
glb[proc[0]] = bindFunction(proc[1] + self.codeAddr)
def MakeMemory(self,glb=None):
if not glb:
glb = globals()
self.codeAddr = excmem.AllocateExecutableMemory(len(self.cp.Code))
self.dataAddr = excmem.AllocateExecutableMemory(len(self.cp.Data))
self.symbols = {}
for sym in self.cp.CodeSymbols:
self.symbols[sym[0]] = sym[1] + self.codeAddr
for sym in self.cp.DataSymbols:
self.symbols[sym[0]] = sym[1] + self.dataAddr
self.resolvedCode = self.cp.Code # nondestructive on cp
for patch in self.cp.CodePatchins:
if patch[2] == DIRECT:
resolvedAddr = self.LookupAddress(patch[0])
elif patch[2] == RELATIVE:
#XXX
# I'm just assuming that the pathin is at the end of a function
# and the next instrution address is that +4
# Is this valid or do I need to calculate?
resolvedAddr = self.LookupAddress(patch[0]) - (self.codeAddr + patch[1] + 4)
else:
raise RuntimeError("Invalid patchin information")
<|code_end|>
. Use current file imports:
(from x86inst import RELATIVE,DIRECT
from x86PackUnpack import ulongToString
from x86asm import PYTHON
from sys import dllhandle
import pyasm.excmem as excmem
import logging, sys
import win32api, pywintypes)
and context including class names, function names, or small code snippets from other files:
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
#
# Path: x86PackUnpack.py
# def ulongToString(u): return struct.pack("<L", u)
#
# Path: x86asm.py
# class x86asmError(Exception): pass
# class labelRef:
# class label:
# class labelDict(dict):
# class constDict(dict):
# class data:
# class codePackage:
# class procedure:
# class assembler:
# def possibleDefault(*toks):
# def possibleImmediateOrRelative(*toks):
# def possibleRegister(*toks):
# def possibleIndirect(*toks):
# def getProperLookup(*toks):
# def findBestMatchTokens(toks):
# def findBestMatch(s):
# def printBestMatch(s):
# def __init__(self, name):
# def __init__(self, name,typ=0):
# def __setitem__(self,key,val):
# def __setitem__(self,key,val):
# def __init__(self,name,dat,size=0):
# def __init__(self):
# def __init__(self,name, typ=CDECL):
# def AddArg(self,name,bytes=4):
# def AddLocal(self,name,bytes=4):
# def LookupArg(self,name):
# def LookupLocal(self,name):
# def LookupVar(self, name):
# def EmitProcStartCode(self, a):
# def EmitProcEndCode(self, a):
# def __init__(self):
# def registerLabel(self,lbl):
# def freezeProc(self):
# def AddInstruction(self,inst):
# def AI(self,inst):
# def AddInstructionLabel(self,name,typ=0):
# def AIL(self,name):
# def AddData(self,name,dat):
# def ADStr(self,name,dat):
# def AddProcedure(self,name,typ=STDCALL):
# def AP(self,name,typ=STDCALL):
# def AddArgument(self,name,size=4):
# def AA(self,name,size=4):
# def AddLocal(self,name,size=4):
# def EndProc(self):
# def EP(self):
# def AddConstant(self,name,val):
# def AC(self,name,val):
# def getVarNameAndSize(t,s):
# def PROC(self,params):
# def ARG(self,params):
# def LOCAL(self,params):
# def ENDPROC(self,params):
# def CALL(self,params):
# def CHARS(self,params):
# def COMMENT(self,params):
# def dispatchDirective(self,s):
# def dispatchStatement(self,s):
# def DispatchString(self,s):
# def __call__(self,s):
# def pass1(self):
# def Compile(self):
# def _log_header(text):
# def codePackageFromFile(fil,constCallback=None):
# def manglePythonNames(cp):
# STDCALL, CDECL, PYTHON = range(1,4)
. Output only the next line. | self.resolvedCode = self.resolvedCode[:patch[1]] + ulongToString(resolvedAddr) \ |
Given the code snippet: <|code_start|> try:
addr = win32api.GetProcAddress(dllhandle,funcName)
except pywintypes.error:
raise RuntimeError("Unable to resolve external symbol '%s'" % funcName)
return addr
elif sys.platform in ('linux2'):
def runtimeResolve(funcName):
return excmem.GetSymbolAddress(funcName)
else:
raise RuntimeError("Don't know how to resolve external symbols for platform '%s'" % sys.platform)
class CpToMemory:
def __init__(self,cp):
self.cp = cp
self.symbols = {}
self.resolvedCode = ''
def LookupAddress(self,sym):
if self.symbols.has_key(sym):
return self.symbols[sym]
else: #try runtime resolution, currently windows specific
funcaddress = runtimeResolve(sym)
self.symbols[sym] = funcaddress
return funcaddress
def BindPythonFunctions(self,glb=None,bindFunction=excmem.BindFunctionAddress):
if glb is None:
glb = globals()
for proc in self.cp.CodeSymbols:
<|code_end|>
, generate the next line using the imports in this file:
from x86inst import RELATIVE,DIRECT
from x86PackUnpack import ulongToString
from x86asm import PYTHON
from sys import dllhandle
import pyasm.excmem as excmem
import logging, sys
import win32api, pywintypes
and context (functions, classes, or occasionally code) from other files:
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
#
# Path: x86PackUnpack.py
# def ulongToString(u): return struct.pack("<L", u)
#
# Path: x86asm.py
# class x86asmError(Exception): pass
# class labelRef:
# class label:
# class labelDict(dict):
# class constDict(dict):
# class data:
# class codePackage:
# class procedure:
# class assembler:
# def possibleDefault(*toks):
# def possibleImmediateOrRelative(*toks):
# def possibleRegister(*toks):
# def possibleIndirect(*toks):
# def getProperLookup(*toks):
# def findBestMatchTokens(toks):
# def findBestMatch(s):
# def printBestMatch(s):
# def __init__(self, name):
# def __init__(self, name,typ=0):
# def __setitem__(self,key,val):
# def __setitem__(self,key,val):
# def __init__(self,name,dat,size=0):
# def __init__(self):
# def __init__(self,name, typ=CDECL):
# def AddArg(self,name,bytes=4):
# def AddLocal(self,name,bytes=4):
# def LookupArg(self,name):
# def LookupLocal(self,name):
# def LookupVar(self, name):
# def EmitProcStartCode(self, a):
# def EmitProcEndCode(self, a):
# def __init__(self):
# def registerLabel(self,lbl):
# def freezeProc(self):
# def AddInstruction(self,inst):
# def AI(self,inst):
# def AddInstructionLabel(self,name,typ=0):
# def AIL(self,name):
# def AddData(self,name,dat):
# def ADStr(self,name,dat):
# def AddProcedure(self,name,typ=STDCALL):
# def AP(self,name,typ=STDCALL):
# def AddArgument(self,name,size=4):
# def AA(self,name,size=4):
# def AddLocal(self,name,size=4):
# def EndProc(self):
# def EP(self):
# def AddConstant(self,name,val):
# def AC(self,name,val):
# def getVarNameAndSize(t,s):
# def PROC(self,params):
# def ARG(self,params):
# def LOCAL(self,params):
# def ENDPROC(self,params):
# def CALL(self,params):
# def CHARS(self,params):
# def COMMENT(self,params):
# def dispatchDirective(self,s):
# def dispatchStatement(self,s):
# def DispatchString(self,s):
# def __call__(self,s):
# def pass1(self):
# def Compile(self):
# def _log_header(text):
# def codePackageFromFile(fil,constCallback=None):
# def manglePythonNames(cp):
# STDCALL, CDECL, PYTHON = range(1,4)
. Output only the next line. | if proc[2] == PYTHON: |
Predict the next line for this snippet: <|code_start|>class coffLineNumberList(list):
def DumpInfo(self):
if self:
print "LINE NUMBERS"
print "============"
print "Symbol\tLine Number"
for x in self:
x.DumpInfo()
class coffRelocationEntry:
def __init__(self,addr=0x0,sym=0x0,typ=0x0):
self.Address = addr
self.Symbol = sym
self.Type = typ
def InitFromFile(self,f):
self.Address = ulongFromFile(f)
self.Symbol = ulongFromFile(f)
self.Type = ushortFromFile(f)
def WriteToFile(self,f):
ulongToFile(f,self.Address)
ulongToFile(f, self.Symbol)
ushortToFile(f, self.Type)
def Sizeof(self):
return 10
def DumpInfo(self):
print "%02X\t%02X\t%10s" % (self.Address, self.Symbol,
<|code_end|>
with the help of current file imports:
import logging, sys
from coffConst import *
from x86PackUnpack import *
from coffSymbolEntries import attemptNameLookup, coffSymbolEntry
and context from other files:
# Path: coffSymbolEntries.py
# def attemptNameLookup(const,id):
# """ Doesn't necessarily belong here but this avoids circular imports"""
# return const.get(id, "UNDEF??[%0X]" % id)
#
# class coffSymbolEntry:
# def __init__(self,name="",value=0x0,sec=0x0,typ=0x0,cls=0x0,aux='',fullname=None):
# self.Name = name
# self.Value = value
# self.SectionNumber = sec
# self.Type = typ
# self.StorageClass = cls
# self.NumberAuxiliary = 0x0
# self.Auxiliaries = aux
# self.Location = 0
# if fullname:
# self.Fullname = fullname
# else:
# self.Fullname = name
#
# def InitFromFile(self,f):
# self.Name = stringFromFile(8,f)
# self.Value = ulongFromFile(f)
# self.SectionNumber = shortFromFile(f)
# self.Type = ushortFromFile(f)
# self.StorageClass = ucharFromFile(f)
# self.NumberAuxiliary = ucharFromFile(f)
# self.Auxiliaries = ''
#
# for i in range(self.NumberAuxiliary):
# aux = stringFromFile(18,f)
# self.Auxiliaries += aux
#
#
# def WriteToFile(self,f):
# stringToFile(f, 8, self.Name)
# ulongToFile(f, self.Value)
# shortToFile(f, self.SectionNumber)
# ushortToFile(f, self.Type)
# ucharToFile(f, self.StorageClass)
# ucharToFile(f, self.NumberAuxiliaries)
# stringToFile(f, len(self.Auxiliaries), self.Auxiliaries)
#
# def SetSizes(self):
# assert not len(self.Auxiliaries) % 18, "Invalid Aux length"
# self.NumberAuxiliaries = (len(self.Auxiliaries) // 18)
#
# def Rows(self):
# self.SetSizes()
# return self.NumberAuxiliaries + 1
#
# def DumpInfo(self):
# print "%20s\t%10s\t%10s\t%10s\t%10s\t" % (repr(self.Name),
# attemptNameLookup(SymbolValues.NAME,self.Value),
# repr(self.SectionNumber),
# attemptNameLookup(SymbolTypes.NAME,self.Type),
# attemptNameLookup(SymbolClass.NAME,self.StorageClass))
# tail = repr(self.Auxiliaries)
# head,tail = tail[:70],tail[70:]
# while head:
# print "\t%s" % head
# head,tail = tail[:70],tail[70:]
, which may contain function names, class names, or code. Output only the next line. | attemptNameLookup(RelocationTypes.NAME, self.Type)) |
Using the snippet: <|code_start|> sys.stdout.write(".")
else:
sys.stdout.write(char)
print
head,tail = tail[:16], tail[16:]
class coffSymbolList(list):
def __init__(self):
list.__init__(self)
self.currentLocation = 0
def append(self,item):
item.Location = self.currentLocation
self.currentLocation += item.Rows()
list.append(self,item)
def DumpInfo(self):
if self:
print "Symbol Entry Table"
print "=================="
print "%20s\t%10s\t%10s\t%10s\t%10s\t" % ("Name",
'Value','SectionNumber','Type','StorageClass')
for x in self:
x.DumpInfo()
def InitFromFile(self, f, count):
x = 0
while x < count:
<|code_end|>
, determine the next line of code. You have imports:
import logging, sys
from coffConst import *
from x86PackUnpack import *
from coffSymbolEntries import attemptNameLookup, coffSymbolEntry
and context (class names, function names, or code) available:
# Path: coffSymbolEntries.py
# def attemptNameLookup(const,id):
# """ Doesn't necessarily belong here but this avoids circular imports"""
# return const.get(id, "UNDEF??[%0X]" % id)
#
# class coffSymbolEntry:
# def __init__(self,name="",value=0x0,sec=0x0,typ=0x0,cls=0x0,aux='',fullname=None):
# self.Name = name
# self.Value = value
# self.SectionNumber = sec
# self.Type = typ
# self.StorageClass = cls
# self.NumberAuxiliary = 0x0
# self.Auxiliaries = aux
# self.Location = 0
# if fullname:
# self.Fullname = fullname
# else:
# self.Fullname = name
#
# def InitFromFile(self,f):
# self.Name = stringFromFile(8,f)
# self.Value = ulongFromFile(f)
# self.SectionNumber = shortFromFile(f)
# self.Type = ushortFromFile(f)
# self.StorageClass = ucharFromFile(f)
# self.NumberAuxiliary = ucharFromFile(f)
# self.Auxiliaries = ''
#
# for i in range(self.NumberAuxiliary):
# aux = stringFromFile(18,f)
# self.Auxiliaries += aux
#
#
# def WriteToFile(self,f):
# stringToFile(f, 8, self.Name)
# ulongToFile(f, self.Value)
# shortToFile(f, self.SectionNumber)
# ushortToFile(f, self.Type)
# ucharToFile(f, self.StorageClass)
# ucharToFile(f, self.NumberAuxiliaries)
# stringToFile(f, len(self.Auxiliaries), self.Auxiliaries)
#
# def SetSizes(self):
# assert not len(self.Auxiliaries) % 18, "Invalid Aux length"
# self.NumberAuxiliaries = (len(self.Auxiliaries) // 18)
#
# def Rows(self):
# self.SetSizes()
# return self.NumberAuxiliaries + 1
#
# def DumpInfo(self):
# print "%20s\t%10s\t%10s\t%10s\t%10s\t" % (repr(self.Name),
# attemptNameLookup(SymbolValues.NAME,self.Value),
# repr(self.SectionNumber),
# attemptNameLookup(SymbolTypes.NAME,self.Type),
# attemptNameLookup(SymbolClass.NAME,self.StorageClass))
# tail = repr(self.Auxiliaries)
# head,tail = tail[:70],tail[70:]
# while head:
# print "\t%s" % head
# head,tail = tail[:70],tail[70:]
. Output only the next line. | symbol = coffSymbolEntry() |
Predict the next line after this snippet: <|code_start|> for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
possibleLookups = {
REGISTER:possibleRegister,
OPCODE:possibleDefault,
COMMA:possibleDefault,
LBRACKET:possibleIndirect,
NUMBER:possibleImmediateOrRelative,
SYMBOL:possibleImmediateOrRelative,}
def getProperLookup(*toks):
return possibleLookups[toks[0][0]]
def findBestMatchTokens(toks):
retVal = None
for x in possibleDefault(*toks):
y = tuple(x)
if mnemonicDict.has_key(y):
retVal = mnemonicDict[y]
break
if retVal:
return retVal
else:
raise x86asmError("Unable to find match for " + `toks`)
def findBestMatch(s):
<|code_end|>
using the current file's imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and any relevant context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | toks = tokenizeInst(s) |
Given the code snippet: <|code_start|> if len(first[1]) < 4 and num >= -127 and num <= 128:
immVals.insert(0,(OPERAND,'imm8'))
relVals.insert(0,(OPERAND,'rel8'))
#lookup constant value like INT 3
if first[0] == NUMBER:
vals.append(first)
vals.extend(immVals)
vals.extend(relVals)
if not rest:
for val in vals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in vals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
def possibleRegister(*toks):
"""
Registers may be hardcoded for superfast lookups, or an r or r/m value.
We could probably optimize better with a better understanding of the environment.
i.e. it doesn't make sense to move an r/m8 into an r32
"""
regName = toks[0][1]
<|code_end|>
, generate the next line using the imports in this file:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (functions, classes, or occasionally code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | registerVals = [(REGISTER, '%s' % regName)] |
Given snippet: <|code_start|> if rest[0][0] == RBRACKET:
#Special case
possibleVals.append((REGISTER, '[%s]' % regName))
if regName in rb:
possibleVals.append((OPERAND, 'r/m8'))
elif regName in rw:
possibleVals.append((OPERAND,'r/m16'))
elif regName in rd:
possibleVals.append((OPERAND,'r/m32'))
else:
raise x86asmError("Invalid Register name '%s'" % regName)
while rest[0] != (RBRACKET, ']'):
rest = rest[1:]
rest = rest[1:]
if not rest:
for val in possibleVals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in possibleVals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
possibleLookups = {
REGISTER:possibleRegister,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
which might include code, classes, or functions. Output only the next line. | OPCODE:possibleDefault, |
Predict the next line for this snippet: <|code_start|> #Special case
possibleVals.append((REGISTER, '[%s]' % regName))
if regName in rb:
possibleVals.append((OPERAND, 'r/m8'))
elif regName in rw:
possibleVals.append((OPERAND,'r/m16'))
elif regName in rd:
possibleVals.append((OPERAND,'r/m32'))
else:
raise x86asmError("Invalid Register name '%s'" % regName)
while rest[0] != (RBRACKET, ']'):
rest = rest[1:]
rest = rest[1:]
if not rest:
for val in possibleVals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in possibleVals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
possibleLookups = {
REGISTER:possibleRegister,
OPCODE:possibleDefault,
<|code_end|>
with the help of current file imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
, which may contain function names, class names, or code. Output only the next line. | COMMA:possibleDefault, |
Using the snippet: <|code_start|>at runtime instead of just generating coff files.
I need to get the instruction tokenizer working for this to take off.
"""
class x86asmError(Exception): pass
###########################################################
## Find right instruction def based on concrete instruction
###########################################################
def possibleDefault(*toks):
"By default, a token will just yield itself."
first,rest = toks[0],toks[1:]
if not rest:
yield [first]
else:
i = 1
possibleLookup = getProperLookup(*rest)
for restMatches in possibleLookup(*rest):
i += 1
yldVal = [first]
yldVal.extend(restMatches)
yield yldVal
def possibleImmediateOrRelative(*toks):
# TODO: can we narrow down which one this should be?
<|code_end|>
, determine the next line of code. You have imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (class names, function names, or code) available:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | immVals = [(OPERAND,'imm32'),(OPERAND,'imm16')] |
Using the snippet: <|code_start|> possibleVals.append((REGISTER, '[%s]' % regName))
if regName in rb:
possibleVals.append((OPERAND, 'r/m8'))
elif regName in rw:
possibleVals.append((OPERAND,'r/m16'))
elif regName in rd:
possibleVals.append((OPERAND,'r/m32'))
else:
raise x86asmError("Invalid Register name '%s'" % regName)
while rest[0] != (RBRACKET, ']'):
rest = rest[1:]
rest = rest[1:]
if not rest:
for val in possibleVals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in possibleVals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
possibleLookups = {
REGISTER:possibleRegister,
OPCODE:possibleDefault,
COMMA:possibleDefault,
<|code_end|>
, determine the next line of code. You have imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (class names, function names, or code) available:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | LBRACKET:possibleIndirect, |
Predict the next line after this snippet: <|code_start|> else:
raise x86asmError("Invalid Register name '%s'" % regName)
first,rest = toks[0],toks[1:]
if not rest:
for val in registerVals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in registerVals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
def possibleIndirect(*toks):
"""
This is pretty much an r/m value
i.e. it doesn't make sense to move an r/m8 into an r32
"""
possibleVals = []
lbracket,operand,rest = toks[0],toks[1],toks[2:]
if operand[0] in (NUMBER,SYMBOL):
# TODO: CAN WE OPTIMIZE THIS?
possibleVals.append((OPERAND,'r/m32'))
possibleVals.append((OPERAND,'r/m16'))
possibleVals.append((OPERAND,'r/m8'))
elif operand[0] == REGISTER:
regName = operand[1]
<|code_end|>
using the current file's imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and any relevant context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | if rest[0][0] == RBRACKET: |
Based on the snippet: <|code_start|>
class x86asmError(Exception): pass
###########################################################
## Find right instruction def based on concrete instruction
###########################################################
def possibleDefault(*toks):
"By default, a token will just yield itself."
first,rest = toks[0],toks[1:]
if not rest:
yield [first]
else:
i = 1
possibleLookup = getProperLookup(*rest)
for restMatches in possibleLookup(*rest):
i += 1
yldVal = [first]
yldVal.extend(restMatches)
yield yldVal
def possibleImmediateOrRelative(*toks):
# TODO: can we narrow down which one this should be?
immVals = [(OPERAND,'imm32'),(OPERAND,'imm16')]
relVals = [(OPERAND,'rel32'),(OPERAND,'rel16')]
first,rest = toks[0],toks[1:]
vals = []
#if it's 8 bit, try to grab smaller opcode
<|code_end|>
, predict the immediate next line with the help of imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (classes, functions, sometimes code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | if first[0] == NUMBER: |
Given the code snippet: <|code_start|> registerVals.append((OPERAND, 'r/m8'))
elif regName in rw:
registerVals.append((OPERAND, 'r16'))
registerVals.append((OPERAND,'r/m16'))
elif regName in rd:
registerVals.append((OPERAND,'r32'))
registerVals.append((OPERAND,'r/m32'))
else:
raise x86asmError("Invalid Register name '%s'" % regName)
first,rest = toks[0],toks[1:]
if not rest:
for val in registerVals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in registerVals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
def possibleIndirect(*toks):
"""
This is pretty much an r/m value
i.e. it doesn't make sense to move an r/m8 into an r32
"""
possibleVals = []
lbracket,operand,rest = toks[0],toks[1],toks[2:]
<|code_end|>
, generate the next line using the imports in this file:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (functions, classes, or occasionally code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | if operand[0] in (NUMBER,SYMBOL): |
Based on the snippet: <|code_start|> self.Data = []
self.Labels = {}
self.Constants = constDict()
self.CurrentProcedure = None
self.StartAddress = 0x0
self.DataStartAddress = 0x0
self.inlineStringNo = 1000
def registerLabel(self,lbl):
if self.Labels.has_key(lbl.Name):
raise x86asmError("Duplicate Label Registration [%s]" % lbl.Name)
self.Labels[lbl.Name] = lbl
#
# Write assmebly code
#
def freezeProc(self):
if self.CurrentProcedure and not self.CurrentProcedure.Frozen:
#initialize proc
self.CurrentProcedure.Frozen = 1
self.CurrentProcedure.EmitProcStartCode(self)
def AddInstruction(self,inst):
self.freezeProc()
instToks = tokenizeInst(inst)
instToksMinusLocals = ()
for tok in instToks:
<|code_end|>
, predict the immediate next line with the help of imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (classes, functions, sometimes code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | if tok[0] == STRING: |
Given snippet: <|code_start|> """
a.AI("PUSH EBP")
a.AI("MOV EBP, ESP")
if self.LocalOffset:
a.AI("SUB ESP, %s" % self.LocalOffset)
def EmitProcEndCode(self, a):
"""
Restore settings and RETurn
TODO: Do we need to handle a Return value here?
"""
if self.LocalOffset:
a.AI("ADD ESP, %s" % self.LocalOffset)
#check for malformed stack
#a.AI("CMP EBP,ESP")
#a.AI("CALL __chkesp")
a.AI("MOV ESP, EBP")
a.AI("POP EBP")
if self.Type == STDCALL and self.ArgOffset - 8:
#HAD ARGS AND IS A STDCALL, CLEANUP
a.AI("RET %s" % (self.ArgOffset - 8))
else:
a.AI("RET")
#
# assembler directive re's
#
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
which might include code, classes, or functions. Output only the next line. | strRe = re.compile("\s*" + symbolRe + "\s*" + stringRe + "?$",re.DOTALL) |
Predict the next line for this snippet: <|code_start|> """
a.AI("PUSH EBP")
a.AI("MOV EBP, ESP")
if self.LocalOffset:
a.AI("SUB ESP, %s" % self.LocalOffset)
def EmitProcEndCode(self, a):
"""
Restore settings and RETurn
TODO: Do we need to handle a Return value here?
"""
if self.LocalOffset:
a.AI("ADD ESP, %s" % self.LocalOffset)
#check for malformed stack
#a.AI("CMP EBP,ESP")
#a.AI("CALL __chkesp")
a.AI("MOV ESP, EBP")
a.AI("POP EBP")
if self.Type == STDCALL and self.ArgOffset - 8:
#HAD ARGS AND IS A STDCALL, CLEANUP
a.AI("RET %s" % (self.ArgOffset - 8))
else:
a.AI("RET")
#
# assembler directive re's
#
<|code_end|>
with the help of current file imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
, which may contain function names, class names, or code. Output only the next line. | strRe = re.compile("\s*" + symbolRe + "\s*" + stringRe + "?$",re.DOTALL) |
Given the following code snippet before the placeholder: <|code_start|> while rest[0] != (RBRACKET, ']'):
rest = rest[1:]
rest = rest[1:]
if not rest:
for val in possibleVals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in possibleVals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
possibleLookups = {
REGISTER:possibleRegister,
OPCODE:possibleDefault,
COMMA:possibleDefault,
LBRACKET:possibleIndirect,
NUMBER:possibleImmediateOrRelative,
SYMBOL:possibleImmediateOrRelative,}
def getProperLookup(*toks):
return possibleLookups[toks[0][0]]
def findBestMatchTokens(toks):
retVal = None
for x in possibleDefault(*toks):
y = tuple(x)
<|code_end|>
, predict the next line using imports from the current file:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context including class names, function names, and sometimes code from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | if mnemonicDict.has_key(y): |
Using the snippet: <|code_start|> immVals.insert(0,(OPERAND,'imm8'))
relVals.insert(0,(OPERAND,'rel8'))
#lookup constant value like INT 3
if first[0] == NUMBER:
vals.append(first)
vals.extend(immVals)
vals.extend(relVals)
if not rest:
for val in vals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in vals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
def possibleRegister(*toks):
"""
Registers may be hardcoded for superfast lookups, or an r or r/m value.
We could probably optimize better with a better understanding of the environment.
i.e. it doesn't make sense to move an r/m8 into an r32
"""
regName = toks[0][1]
registerVals = [(REGISTER, '%s' % regName)]
<|code_end|>
, determine the next line of code. You have imports:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (class names, function names, or code) available:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | if regName in rb: |
Given the code snippet: <|code_start|>
#lookup constant value like INT 3
if first[0] == NUMBER:
vals.append(first)
vals.extend(immVals)
vals.extend(relVals)
if not rest:
for val in vals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in vals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
def possibleRegister(*toks):
"""
Registers may be hardcoded for superfast lookups, or an r or r/m value.
We could probably optimize better with a better understanding of the environment.
i.e. it doesn't make sense to move an r/m8 into an r32
"""
regName = toks[0][1]
registerVals = [(REGISTER, '%s' % regName)]
if regName in rb:
registerVals.append((OPERAND,'r8'))
registerVals.append((OPERAND, 'r/m8'))
<|code_end|>
, generate the next line using the imports in this file:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context (functions, classes, or occasionally code) from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | elif regName in rw: |
Given the following code snippet before the placeholder: <|code_start|> vals.append(first)
vals.extend(immVals)
vals.extend(relVals)
if not rest:
for val in vals:
yield [val]
else:
possibleLookup = getProperLookup(*rest)
for val in vals:
for restMatches in possibleLookup(*rest):
yldVal = [val]
yldVal.extend(restMatches)
yield yldVal
def possibleRegister(*toks):
"""
Registers may be hardcoded for superfast lookups, or an r or r/m value.
We could probably optimize better with a better understanding of the environment.
i.e. it doesn't make sense to move an r/m8 into an r32
"""
regName = toks[0][1]
registerVals = [(REGISTER, '%s' % regName)]
if regName in rb:
registerVals.append((OPERAND,'r8'))
registerVals.append((OPERAND, 'r/m8'))
elif regName in rw:
registerVals.append((OPERAND, 'r16'))
registerVals.append((OPERAND,'r/m16'))
<|code_end|>
, predict the next line using imports from the current file:
from x86tokenizer import (tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING,
symbolRe,stringRe)
from x86inst import mnemonicDict, rb, rw, rd, instructionInstance
from tokenize import Number
from pyasm.loggers import x86asmLogger, x86sourceLogger, x86apiLogger
import types, re
and context including class names, function names, and sometimes code from other files:
# Path: x86tokenizer.py
# class tokenizeError(Exception):pass
# REGISTER,OPCODE,COMMA,OPERAND,LBRACKET,RBRACKET,NUMBER,SYMBOL,STRING = range(1,10)
# def tokenizeString(s,reToProcess):
# def tokenizeInstDef(s):
# def tokenizeInst(s):
#
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | elif regName in rd: |
Based on the snippet: <|code_start|> self.coff = c
def linkDirectiveSection(self):
sect = coffSection()
sect.Name = '.drectve'
sect.Flags = (SectionFlags.LNK_REMOVE |
SectionFlags.LNK_INFO |
SectionFlags.ALIGN_1BYTES)
sect.RawData = self.directives
sym = self.coff.Symbols.GetSymbol('.drectve')
sym.RebuildAuxiliaries(len(sect.RawData),0,0,crc32(sect.RawData),0,0)
return sect
def textSection(self):
sect = coffSection()
sect.Name = '.text\x00\x00\x00'
sect.Flags = (SectionFlags.CNT_CODE |
SectionFlags.LNK_COMDAT |
SectionFlags.MEM_EXECUTE |
SectionFlags.MEM_READ |
SectionFlags.ALIGN_16BYTES)
sect.RawData = self.cp.Code
for patchin in self.cp.CodePatchins:
# How do I tell what type it is?
addr = patchin[1]
if patchin[2] == DIRECT:
patchinType = RelocationTypes.I386_DIR32
<|code_end|>
, predict the immediate next line with the help of imports:
from pyasm.coff import (coffError, coffFile, coffSection, coffRelocationEntry,
coffSymbolEntry, coffLineNumberEntry)
from pyasm.coffConst import *
from pyasm.coffSymbolEntries import (coffSymbolFile, coffSectionDef, coffFunctionDef,
coffBf, coffLf, coffEf)
from binascii import crc32
from x86inst import RELATIVE, DIRECT
import logging, time
and context (classes, functions, sometimes code) from other files:
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | elif patchin[2] == RELATIVE: |
Predict the next line after this snippet: <|code_start|> c.MachineType = coffFile.I386MAGIC
self.coff = c
def linkDirectiveSection(self):
sect = coffSection()
sect.Name = '.drectve'
sect.Flags = (SectionFlags.LNK_REMOVE |
SectionFlags.LNK_INFO |
SectionFlags.ALIGN_1BYTES)
sect.RawData = self.directives
sym = self.coff.Symbols.GetSymbol('.drectve')
sym.RebuildAuxiliaries(len(sect.RawData),0,0,crc32(sect.RawData),0,0)
return sect
def textSection(self):
sect = coffSection()
sect.Name = '.text\x00\x00\x00'
sect.Flags = (SectionFlags.CNT_CODE |
SectionFlags.LNK_COMDAT |
SectionFlags.MEM_EXECUTE |
SectionFlags.MEM_READ |
SectionFlags.ALIGN_16BYTES)
sect.RawData = self.cp.Code
for patchin in self.cp.CodePatchins:
# How do I tell what type it is?
addr = patchin[1]
<|code_end|>
using the current file's imports:
from pyasm.coff import (coffError, coffFile, coffSection, coffRelocationEntry,
coffSymbolEntry, coffLineNumberEntry)
from pyasm.coffConst import *
from pyasm.coffSymbolEntries import (coffSymbolFile, coffSectionDef, coffFunctionDef,
coffBf, coffLf, coffEf)
from binascii import crc32
from x86inst import RELATIVE, DIRECT
import logging, time
and any relevant context from other files:
# Path: x86inst.py
# class OpcodeTooShort(Exception):pass
# class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
# class x86instError(Exception):pass
# class OpcodeDict(dict):
# class MnemonicDict(dict):
# class ModRM:
# class instruction:
# class instructionInstance:
# def __getitem__(self,key):
# def __setitem__(self,key,value):
# def GetOp(self,opcode,modRM=None):
# def __setitem__(self,key,val):
# def longToBytes(long, bytes=4):
# def longToString(long, bytes=4):
# def longToBytesRepr(long,bytes=4):
# def __init__(self,byte=None):
# def LoadFromByte(self,byte):
# def SaveToByte(self):
# def HasSIB(self):
# def RegOpString(self,typ):
# def RMString(self,typ=None):
# def GetDisplacementSize(self):
# def __init__(self,opstr,inststr,desc):
# def loadRBWD(self,plus,reg):
# def setOpcodeAndFlags(self):
# def setHasFlags(self):
# def GetInstance(self):
# def __str__(self):
# def __init__(self,inst):
# def GetSymbolPatchins(self,modrm=None):
# def GetSuffixSize(self,modrm=None):
# def GetInstructionSize(self):
# def NextInstructionLoc(self):
# def LoadData(self, data):
# def DataText(self,data,size,skip=False):
# def LoadConcreteValues(self, toks):
# def OpDataAsString(self):
# def OpText(self):
# OS = self.OpcodeString
# IS = self.InstructionString
# ID = self.Description
# OS = OS.replace(plus,plus[1:])
# OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
# IS = IS.replace(reg, regOpcode[reg][i])
# RELATIVE,DIRECT = range(1,3)
. Output only the next line. | if patchin[2] == DIRECT: |
Predict the next line for this snippet: <|code_start|>
class MetricsTestCase(unittest.TestCase):
def test_displacement(self):
nodes = [
Node(1, 50),
Node(2, 50),
Node(804, 50),
Node(854, 50)
]
# should return 0 if the input is empty
<|code_end|>
with the help of current file imports:
import unittest
from labella import metrics
from labella.node import Node
and context from other files:
# Path: labella/metrics.py
# def toLayers(nodes):
# def denominator(layers):
# def denominatorWithoutStubs(layers):
# def displacement(nodes):
# def pathLength(nodes):
# def overflowSpace(nodes, minPos=None, maxPos=None):
# def overDensitySpace(nodes, density=None, layerWidth=None, nodeSpacing=0):
# def overlapCount(nodes, buf=0):
# def overlapSpace(nodes):
# def weightedAllocation(nodes):
# def weightedAllocatedSpace(nodes):
#
# Path: labella/node.py
# class Node(object):
# def __init__(self, idealPos, width, data=None):
# self.idealPos = idealPos
# self.currentPos = idealPos
# self.width = width
# self.data = data
# self.layerIndex = 0
# self.parent = None
# self.overlap = None
# self.overlapCount = 0
# self.child = None
# # for rendering
# self.x = None
# self.dx = None
# self.y = None
# self.dy = None
# # other
# self.w = 0
# self.h = 0
#
# def __repr__(self):
# s = (
# "Node(idealPos=%r, currentPos=%r, width=%r, "
# "layerIndex=%r, data=%r)"
# % (
# self.idealPos,
# self.currentPos,
# self.width,
# self.layerIndex,
# self.data,
# )
# )
# return s
#
# def __str__(self):
# return repr(self)
#
# def distanceFrom(self, node):
# halfWidth = self.width / 2
# nodeHalfWidth = node.width / 2
# maxval = max(
# self.currentPos - halfWidth, node.currentPos - nodeHalfWidth
# )
# minval = min(
# self.currentPos + halfWidth, node.currentPos + nodeHalfWidth
# )
# return maxval - minval
#
# def moveToIdealPosition(self):
# self.currentPos = self.idealPos
#
# def displacement(self):
# return self.idealPos - self.currentPos
#
# def overlapWithNode(self, node, buf=None):
# _buffer = buf if buf else 0
# return self.distanceFrom(node) - _buffer < 0
#
# def overlapWithPoint(self, pos):
# halfWidth = self.width / 2
# return (pos >= self.currentPos - halfWidth) and (
# pos <= self.currentPos + halfWidth
# )
#
# def positionBefore(self, node, buf=None):
# _buffer = buf if buf else 0
# return node.currentLeft() - self.width / 2 - _buffer
#
# def positionAfter(self, node, buf=None):
# _buffer = buf if buf else 0
# return node.currentRight() + self.width / 2 + _buffer
#
# def currentRight(self):
# return self.currentPos + self.width / 2
#
# def currentLeft(self):
# return self.currentPos - self.width / 2
#
# def idealRight(self):
# return self.idealPos + self.width / 2
#
# def idealLeft(self):
# return self.idealPos - self.width / 2
#
# def removeStub(self):
# if self.parent:
# self.parent.child = None
# self.parent = None
# return self
#
# def createStub(self, width=None):
# stub = Node(self.idealPos, width, self.data)
# stub.currentPos = self.currentPos
# stub.child = self
# self.parent = stub
# return stub
#
# def isStub(self):
# return not (not (self.child))
#
# def getPathToRoot(self):
# path = []
# current = self
# while current:
# path.append(current)
# current = current.parent
# return path
#
# def getPathFromRoot(self):
# return list(reversed(self.getPathToRoot()))
#
# def getPathToRootLength(self):
# length = 0
# current = self
# while current:
# targetPos = (
# current.parent.currentPos
# if current.parent
# else current.idealPos
# )
# length += abs(current.currentPos - targetPos)
# current = current.parent
# return length
#
# def getRoot(self):
# previous = self
# current = self
# while current:
# previous = current
# current = current.parent
# return previous
#
# def getLayerIndex(self):
# return self.layerIndex
#
# def clone(self):
# node = Node(self.idealPos, self.width, self.data)
# node.currentPos = self.currentPos
# node.layerIndex = self.layerIndex
# return node
, which may contain function names, class names, or code. Output only the next line. | self.assertEqual(metrics.displacement([]), 0) |
Predict the next line after this snippet: <|code_start|>
class MetricsTestCase(unittest.TestCase):
def test_displacement(self):
nodes = [
<|code_end|>
using the current file's imports:
import unittest
from labella import metrics
from labella.node import Node
and any relevant context from other files:
# Path: labella/metrics.py
# def toLayers(nodes):
# def denominator(layers):
# def denominatorWithoutStubs(layers):
# def displacement(nodes):
# def pathLength(nodes):
# def overflowSpace(nodes, minPos=None, maxPos=None):
# def overDensitySpace(nodes, density=None, layerWidth=None, nodeSpacing=0):
# def overlapCount(nodes, buf=0):
# def overlapSpace(nodes):
# def weightedAllocation(nodes):
# def weightedAllocatedSpace(nodes):
#
# Path: labella/node.py
# class Node(object):
# def __init__(self, idealPos, width, data=None):
# self.idealPos = idealPos
# self.currentPos = idealPos
# self.width = width
# self.data = data
# self.layerIndex = 0
# self.parent = None
# self.overlap = None
# self.overlapCount = 0
# self.child = None
# # for rendering
# self.x = None
# self.dx = None
# self.y = None
# self.dy = None
# # other
# self.w = 0
# self.h = 0
#
# def __repr__(self):
# s = (
# "Node(idealPos=%r, currentPos=%r, width=%r, "
# "layerIndex=%r, data=%r)"
# % (
# self.idealPos,
# self.currentPos,
# self.width,
# self.layerIndex,
# self.data,
# )
# )
# return s
#
# def __str__(self):
# return repr(self)
#
# def distanceFrom(self, node):
# halfWidth = self.width / 2
# nodeHalfWidth = node.width / 2
# maxval = max(
# self.currentPos - halfWidth, node.currentPos - nodeHalfWidth
# )
# minval = min(
# self.currentPos + halfWidth, node.currentPos + nodeHalfWidth
# )
# return maxval - minval
#
# def moveToIdealPosition(self):
# self.currentPos = self.idealPos
#
# def displacement(self):
# return self.idealPos - self.currentPos
#
# def overlapWithNode(self, node, buf=None):
# _buffer = buf if buf else 0
# return self.distanceFrom(node) - _buffer < 0
#
# def overlapWithPoint(self, pos):
# halfWidth = self.width / 2
# return (pos >= self.currentPos - halfWidth) and (
# pos <= self.currentPos + halfWidth
# )
#
# def positionBefore(self, node, buf=None):
# _buffer = buf if buf else 0
# return node.currentLeft() - self.width / 2 - _buffer
#
# def positionAfter(self, node, buf=None):
# _buffer = buf if buf else 0
# return node.currentRight() + self.width / 2 + _buffer
#
# def currentRight(self):
# return self.currentPos + self.width / 2
#
# def currentLeft(self):
# return self.currentPos - self.width / 2
#
# def idealRight(self):
# return self.idealPos + self.width / 2
#
# def idealLeft(self):
# return self.idealPos - self.width / 2
#
# def removeStub(self):
# if self.parent:
# self.parent.child = None
# self.parent = None
# return self
#
# def createStub(self, width=None):
# stub = Node(self.idealPos, width, self.data)
# stub.currentPos = self.currentPos
# stub.child = self
# self.parent = stub
# return stub
#
# def isStub(self):
# return not (not (self.child))
#
# def getPathToRoot(self):
# path = []
# current = self
# while current:
# path.append(current)
# current = current.parent
# return path
#
# def getPathFromRoot(self):
# return list(reversed(self.getPathToRoot()))
#
# def getPathToRootLength(self):
# length = 0
# current = self
# while current:
# targetPos = (
# current.parent.currentPos
# if current.parent
# else current.idealPos
# )
# length += abs(current.currentPos - targetPos)
# current = current.parent
# return length
#
# def getRoot(self):
# previous = self
# current = self
# while current:
# previous = current
# current = current.parent
# return previous
#
# def getLayerIndex(self):
# return self.layerIndex
#
# def clone(self):
# node = Node(self.idealPos, self.width, self.data)
# node.currentPos = self.currentPos
# node.layerIndex = self.layerIndex
# return node
. Output only the next line. | Node(1, 50), |
Next line prediction: <|code_start|>
"""
NOTE: To make writing the tests easier and in accordance with the original d3
tests they are copied from, the local function converts months from 0 based
indexing to Python's 1 based indexing.
"""
def local(year, month, day, hours=0, minutes=0, seconds=0, milliseconds=0):
# helper function copied from d3/test/time/time.js
date = datetime(year, month+1, day, hours, minutes, seconds, milliseconds
* 1000)
return date
class TimeScaleTestCase(unittest.TestCase):
def test_nice_1c(self):
# rounds using the specified time interval
<|code_end|>
. Use current file imports:
(import unittest
from datetime import datetime
from labella.scale import TimeScale
from labella.d3_time import d3_time)
and context including class names, function names, or small code snippets from other files:
# Path: labella/scale.py
# class TimeScale(object):
# def __init__(self, linear=None, methods=None, fmt=None):
# self._linear = LinearScale() if linear is None else linear
# self._methods = (
# d3_time_scaleLocalMethods if methods is None else methods
# )
# # self._format = d3_time_scaleLocalFormat if fmt is None else fmt
# self._format = mytimeformat if fmt is None else fmt
#
# def invert(self, x):
# return milli2dt(self._linear.invert(x))
#
# def domain(self, x=None):
# if x is None:
# return list(map(milli2dt, self._linear.domain()))
# num_domain = list(map(dt2milli, x))
# self._linear.domain(num_domain)
# return self
#
# def tickMethod(self, extent, count):
# span = extent[1] - extent[0]
# target = span / count
# i = d3_bisect(d3_time_scaleSteps, target)
# if i == len(d3_time_scaleSteps):
# return [
# self._methods[-1][0],
# d3_scale_linearTickRange(
# list(map(lambda d: d / 31536e6, extent)), count
# )[2],
# ]
# if not i:
# return [
# d3_time_scaleMilliseconds,
# d3_scale_linearTickRange(extent, count)[2],
# ]
# if target / d3_time_scaleSteps[i - 1] < d3_time_scaleSteps[i] / target:
# return self._methods[i - 1]
# return self._methods[i]
#
# def nice(self, interval=None, skip=0):
# domain = self.domain()
# extent = d3_scaleExtent(domain)
# extent = list(map(dt2milli, extent))
# if interval is None:
# method = self.tickMethod(extent, 10)
# elif str(interval).isnumeric():
# method = self.tickMethod(extent, interval)
# else:
# method = None
# if method:
# interval = method[0]
# skip = method[1]
#
# def skipped(date):
# return (not date is None) and (
# not len(
# interval.range(date, milli2dt(dt2milli(date) + 1), skip)
# )
# )
#
# if skip > 1:
# return self.domain(
# d3_scale_nice(
# domain,
# {
# "floor": lambda x: time_nice_floor(
# x, skipped, interval
# ),
# "ceil": lambda x: time_nice_ceil(x, skipped, interval),
# },
# )
# )
# else:
# return self.domain(d3_scale_nice(domain, interval))
#
# def ticks(self, interval=None, skip=None):
# extent = d3_scaleExtent(self.domain())
# extent = list(map(lambda x: x.timestamp() * 1000, extent))
# method = (
# self.tickMethod(extent, 10)
# if interval is None
# else self.tickMethod(extent, interval)
# )
# if method:
# interval = method[0]
# skip = method[1]
#
# if skip < 1:
# return interval.range(
# milli2dt(extent[0]), milli2dt(extent[1] + 1), 1
# )
# else:
# return interval.range(
# milli2dt(extent[0]), milli2dt(extent[1] + 1), skip
# )
#
# def tickFormat(self):
# return self._format
#
# def range(self, x=None):
# if x is None:
# return self._linear.range()
# self._linear.range(x)
# return self
#
# def rangeRound(self, x):
# pass
#
# def clamp(self, x=None):
# if x is None:
# return self._linear.clamp()
# self._linear.clamp(x)
# return self
#
# def interpolate(self, x=None):
# if x is None:
# return self._linear.interpolate()
# self._linear.interpolate(x)
# return self
#
# def copy(self):
# return TimeScale(self._linear.copy(), self._methods, self._format)
#
# def __call__(self, x):
# return self._linear(dt2milli(x))
#
# Path: labella/d3_time.py
# class d3_time_interval:
# def __init__(self, local, step, number):
# def round(self, date):
# def floor(self, date):
# def ceil(self, date):
# def offset(self, date, k):
# def range(self, t0, t1, dt):
# def __call__(self, date):
# def d3_time_hour_local(date):
# def d3_time_day_offset(date, offset):
# def day_of_year(date):
# def d3_time_week_local(date):
# def d3_time_week_number(date):
# def d3_time_month_local(date):
# def d3_time_month_offset(date, offset):
# def d3_time_year_local(date):
. Output only the next line. | x = TimeScale().domain([local(2009, 0, 1, 0, 12), local(2009, 0, 1, |
Given snippet: <|code_start|>
"""
NOTE: To make writing the tests easier and in accordance with the original d3
tests they are copied from, the local function converts months from 0 based
indexing to Python's 1 based indexing.
"""
def local(year, month, day, hours=0, minutes=0, seconds=0, milliseconds=0):
# helper function copied from d3/test/time/time.js
date = datetime(year, month+1, day, hours, minutes, seconds, milliseconds
* 1000)
return date
class TimeScaleTestCase(unittest.TestCase):
def test_nice_1c(self):
# rounds using the specified time interval
x = TimeScale().domain([local(2009, 0, 1, 0, 12), local(2009, 0, 1,
23, 48)])
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
from datetime import datetime
from labella.scale import TimeScale
from labella.d3_time import d3_time
and context:
# Path: labella/scale.py
# class TimeScale(object):
# def __init__(self, linear=None, methods=None, fmt=None):
# self._linear = LinearScale() if linear is None else linear
# self._methods = (
# d3_time_scaleLocalMethods if methods is None else methods
# )
# # self._format = d3_time_scaleLocalFormat if fmt is None else fmt
# self._format = mytimeformat if fmt is None else fmt
#
# def invert(self, x):
# return milli2dt(self._linear.invert(x))
#
# def domain(self, x=None):
# if x is None:
# return list(map(milli2dt, self._linear.domain()))
# num_domain = list(map(dt2milli, x))
# self._linear.domain(num_domain)
# return self
#
# def tickMethod(self, extent, count):
# span = extent[1] - extent[0]
# target = span / count
# i = d3_bisect(d3_time_scaleSteps, target)
# if i == len(d3_time_scaleSteps):
# return [
# self._methods[-1][0],
# d3_scale_linearTickRange(
# list(map(lambda d: d / 31536e6, extent)), count
# )[2],
# ]
# if not i:
# return [
# d3_time_scaleMilliseconds,
# d3_scale_linearTickRange(extent, count)[2],
# ]
# if target / d3_time_scaleSteps[i - 1] < d3_time_scaleSteps[i] / target:
# return self._methods[i - 1]
# return self._methods[i]
#
# def nice(self, interval=None, skip=0):
# domain = self.domain()
# extent = d3_scaleExtent(domain)
# extent = list(map(dt2milli, extent))
# if interval is None:
# method = self.tickMethod(extent, 10)
# elif str(interval).isnumeric():
# method = self.tickMethod(extent, interval)
# else:
# method = None
# if method:
# interval = method[0]
# skip = method[1]
#
# def skipped(date):
# return (not date is None) and (
# not len(
# interval.range(date, milli2dt(dt2milli(date) + 1), skip)
# )
# )
#
# if skip > 1:
# return self.domain(
# d3_scale_nice(
# domain,
# {
# "floor": lambda x: time_nice_floor(
# x, skipped, interval
# ),
# "ceil": lambda x: time_nice_ceil(x, skipped, interval),
# },
# )
# )
# else:
# return self.domain(d3_scale_nice(domain, interval))
#
# def ticks(self, interval=None, skip=None):
# extent = d3_scaleExtent(self.domain())
# extent = list(map(lambda x: x.timestamp() * 1000, extent))
# method = (
# self.tickMethod(extent, 10)
# if interval is None
# else self.tickMethod(extent, interval)
# )
# if method:
# interval = method[0]
# skip = method[1]
#
# if skip < 1:
# return interval.range(
# milli2dt(extent[0]), milli2dt(extent[1] + 1), 1
# )
# else:
# return interval.range(
# milli2dt(extent[0]), milli2dt(extent[1] + 1), skip
# )
#
# def tickFormat(self):
# return self._format
#
# def range(self, x=None):
# if x is None:
# return self._linear.range()
# self._linear.range(x)
# return self
#
# def rangeRound(self, x):
# pass
#
# def clamp(self, x=None):
# if x is None:
# return self._linear.clamp()
# self._linear.clamp(x)
# return self
#
# def interpolate(self, x=None):
# if x is None:
# return self._linear.interpolate()
# self._linear.interpolate(x)
# return self
#
# def copy(self):
# return TimeScale(self._linear.copy(), self._methods, self._format)
#
# def __call__(self, x):
# return self._linear(dt2milli(x))
#
# Path: labella/d3_time.py
# class d3_time_interval:
# def __init__(self, local, step, number):
# def round(self, date):
# def floor(self, date):
# def ceil(self, date):
# def offset(self, date, k):
# def range(self, t0, t1, dt):
# def __call__(self, date):
# def d3_time_hour_local(date):
# def d3_time_day_offset(date, offset):
# def day_of_year(date):
# def d3_time_week_local(date):
# def d3_time_week_number(date):
# def d3_time_month_local(date):
# def d3_time_month_offset(date, offset):
# def d3_time_year_local(date):
which might include code, classes, or functions. Output only the next line. | self.assertEqual(x.nice(d3_time['day']).domain(), [local(2009, 0, 1), |
Next line prediction: <|code_start|> return x
def ceil(self, x):
return x
d3_time_scaleMilliseconds = d3TimeScaleMilliseconds()
d3_time_scaleSteps = [
1e3, # 1-second
5e3, # 5-second
15e3, # 15-second
3e4, # 30-second
6e4, # 1-minute
3e5, # 5-minute
9e5, # 15-minute
18e5, # 30-minute
36e5, # 1-hour
108e5, # 3-hour
216e5, # 6-hour
432e5, # 12-hour
864e5, # 1-day
1728e5, # 2-day
6048e5, # 1-week
2592e6, # 1-month
7776e6, # 3-month
31536e6, # 1-year
]
d3_time_scaleLocalMethods = [
<|code_end|>
. Use current file imports:
(import math
from datetime import datetime
from labella.d3_time import d3_time)
and context including class names, function names, or small code snippets from other files:
# Path: labella/d3_time.py
# class d3_time_interval:
# def __init__(self, local, step, number):
# def round(self, date):
# def floor(self, date):
# def ceil(self, date):
# def offset(self, date, k):
# def range(self, t0, t1, dt):
# def __call__(self, date):
# def d3_time_hour_local(date):
# def d3_time_day_offset(date, offset):
# def day_of_year(date):
# def d3_time_week_local(date):
# def d3_time_week_number(date):
# def d3_time_month_local(date):
# def d3_time_month_offset(date, offset):
# def d3_time_year_local(date):
. Output only the next line. | [d3_time["second"], 1], |
Given snippet: <|code_start|>
def make_variables(variables):
vs = []
for var in variables:
if isinstance(var, int):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
from labella import vpsc
and context:
# Path: labella/vpsc.py
# class PositionStats(object):
# class Constraint(object):
# class Variable(object):
# class Block(object):
# class Blocks(object):
# class Solver(object):
# def __init__(self, scale):
# def addVariable(self, v):
# def getPosn(self):
# def __init__(self, left, right, gap, equality=None):
# def slack(self):
# def __repr__(self):
# def __str__(self):
# def __init__(self, desiredPosition, weight=None, scale=None):
# def dfdv(self):
# def position(self):
# def visitNeighbours(self, prev, f):
# def ff(c, _next):
# def __repr__(self):
# def __str__(self):
# def __init__(self, v):
# def addVariable(self, v):
# def updateWeightedPosition(self):
# def compute_lm(self, v, u, postAction):
# def f(c, _next):
# def populateSplitBlock(self, v, prev):
# def f(c, _next):
# def traverse(self, visit, acc, v, prev):
# def f(c, _next):
# def findMinLM(self):
# def f(c):
# def findMinLMBetween(self, lv, rv):
# def f(x):
# def f(c, _next):
# def findPath(self, v, prev, to, visit):
# def f(c, _next):
# def isActiveDirectedPathBetween(self, u, v):
# def split(cls, c):
# def createSplitBlock(cls, startVar):
# def splitBetween(self, vl, vr):
# def mergeAcross(self, b, c, dist):
# def cost(self):
# def __init__(self, vs):
# def cost(self):
# def insert(self, b):
# def remove(self, b):
# def merge(self, c):
# def forEach(self, f):
# def updateBlockPositions(self):
# def split(self, inactive):
# def __init__(self, vs, cs):
# def cost(self):
# def setStartingPositions(self, ps):
# def setDesiredPositions(self, ps):
# def mostViolated(self):
# def satisfy(self):
# def solve(self):
# LAGRANGIAN_TOLERANCE = -1e-4
# ZERO_UPPERBOUND = -1e-10
which might include code, classes, or functions. Output only the next line. | v = vpsc.Variable(var) |
Given snippet: <|code_start|>
def test_parse_invalid_timespan():
timespan_id = 'SomeInvalidTimespan'
with assert_raises(ValueError):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from nose.tools import assert_equals, assert_raises
from lib.timespan import parse_timespan
and context:
# Path: lib/timespan.py
# def parse_timespan(timespan_id):
# timespan_str = str(timespan_id).replace(' ', '')
#
# ts_id, ts_alias, ts_name, ts_description, ts_timespan = None, None, None, None, None
#
# # Year, for example '2017'
# if re.match('\d{4}$', timespan_str):
# ts_id = timespan_str
# ts_alias = timespan_str
# ts_name = timespan_str
# ts_description = 'Year {year}'.format(year=timespan_str)
# ts_timespan = '[{year}-01-01, {year}-12-31]'.format(year=timespan_str)
# # Range of years, for example '2017-2018'
# elif re.match('\d{4}-\d{4}$', timespan_str):
# year1, year2 = list(map(int, timespan_str.split('-')))
# if year1 > year2:
# raise ValueError("Invalid timespan '{timespan}' (invalid range of years)".format(timespan=timespan_id))
#
# ts_id = '{year1} - {year2}'.format(year1=year1, year2=year2)
# ts_alias = '{year1} - {year2}'.format(year1=year1, year2=year2)
# ts_name = '{year1} - {year2}'.format(year1=year1, year2=year2)
# ts_description = 'From {year1} to {year2}'.format(year1=year1, year2=year2)
# ts_timespan = '[{year1}-01-01, {year2}-12-31]'.format(year1=year1, year2=year2)
# # Year and month, for example '2017-03'
# elif re.match('\d{4}-\d{2}$', timespan_str):
# year, month = list(map(int, timespan_str.split('-')))
# if month < 1 or month > 12:
# raise ValueError("Invalid timespan '{timespan}' (invalid month)".format(timespan=timespan_id))
#
# last_day_of_month = calendar.monthrange(year, month)[1]
# month_name = calendar.month_name[month]
#
# ts_id = '{year}-{month:02d}'.format(year=year, month=month)
# ts_alias = '{year}-{month:02d}'.format(year=year, month=month)
# ts_name = '{year}-{month:02d}'.format(year=year, month=month)
# ts_description = '{month_name} {year}'.format(month_name=month_name, year=year)
# ts_timespan = '[{year}-{month:02d}-01, {year}-{month:02d}-{last_day}]'.format(year=year, month=month,
# last_day=last_day_of_month)
# # Year and quarter, for example '2017Q3'
# elif re.match('\d{4}Q\d{1}$', timespan_str):
# year, quarter = list(map(int, timespan_str.split('Q')))
# if quarter < 1 or quarter > 4:
# raise ValueError("Invalid timespan '{timespan}' (invalid quarter)".format(timespan=timespan_id))
#
# last_month = quarter * 3
# initial_mont = last_month - 2
# last_day = calendar.monthrange(year, last_month)[1]
#
# ts_id = '{year}Q{quarter}'.format(year=year, quarter=quarter)
# ts_alias = '{year}Q{quarter}'.format(year=year, quarter=quarter)
# ts_name = '{year}Q{quarter}'.format(year=year, quarter=quarter)
# ts_description = '{year}, {quarter} quarter'.format(year=year, quarter=QUARTERS[quarter])
# ts_timespan = '[{year}-{ini_month:02d}-01, {year}-{end_month:02d}-{last_day}]'.format(year=year,
# ini_month=initial_mont,
# end_month=last_month,
# last_day=last_day)
# # Day, month, year, for example '20171231'
# elif re.match('\d{8}$', timespan_str):
# date = parse(timespan_str)
#
# last_day_of_month = calendar.monthrange(date.year, date.month)[1]
#
# ts_id = date.strftime('%Y%m%d')
# ts_alias = date.strftime('%Y%m%d')
# ts_name = date.strftime('%Y%m%d')
# ts_description = date.strftime('%m/%d/%Y')
# ts_timespan = '[{date}, {date}]'.format(date=date.strftime('%Y-%m-%d'))
# else:
# raise ValueError('Unparseable timespan {timespan}'.format(timespan=timespan_id))
#
# return ts_id, ts_alias, ts_name, ts_description, ts_timespan
which might include code, classes, or functions. Output only the next line. | ts_id, ts_alias, ts_name, ts_description, ts_timespan = parse_timespan(timespan_id) |
Given the code snippet: <|code_start|>
class NYCTags(TagsTask):
def version(self):
return 1
def tags(self):
return [
<|code_end|>
, generate the next line using the imports in this file:
from tasks.meta import OBSTag
from tasks.base_tasks import TagsTask
and context (functions, classes, or occasionally code) from other files:
# Path: tasks/meta.py
# class OBSTag(Base):
# '''
# Tags permit arbitrary groupings of columns.
#
# They should only be created as part of a :meth:`~.tasks.TagsTask.tags`
# implementation.
#
# .. py:attribute:: id
#
# The unique identifier for this table. Is always qualified by
# the module name of the :class:`~.tasks.TagsTask` that created it, and
# should never be specified manually.
#
# .. py:attribute:: name
#
# The name of this tag. This is exposed in the API and user interfaces.
#
# .. py:attribute:: type
#
# The type of this tag. This is used to provide more information about
# what the tag means. Examples are ``section``, ``subsection``,
# ``license``, ``unit``, although any arbitrary type can be defined.
#
# .. py:attribute:: description
#
# Description of this tag. This may be exposed in the API and catalog.
#
# .. py:attribute:: columns
#
# An iterable of all the :class:`~.meta.OBSColumn`s tagged with this tag.
#
# .. py:attribute:: version
#
# A version control number, used to determine whether the tag and its
# metadata should be updated.
#
# '''
# __tablename__ = 'obs_tag'
#
# id = Column(Text, primary_key=True)
#
# name = Column(Text, nullable=False)
# type = Column(Text, nullable=False)
# description = Column(Text)
#
# columns = association_proxy('tag_column_tags', 'column')
#
# version = Column(Numeric, default=0, nullable=False)
#
# Path: tasks/base_tasks.py
# class TagsTask(Task):
# '''
# This will update-or-create :class:`OBSTag <tasks.meta.OBSTag>` objects
# int the database when run.
#
# The :meth:`~.TagsTask.tags` method must be overwritten.
#
# :meth:`~TagsTask.version` is used to control updates to the database.
# '''
#
# def tags(self):
# '''
# This method must be overwritten in subclasses.
#
# The return value must be an iterable of instances of
# :class:`OBSTag <tasks.meta.OBSTag>`.
# '''
# raise NotImplementedError('Must return iterable of OBSTags')
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TagsTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def run(self):
# for _, tagtarget in self.output().items():
# tagtarget.update_or_create()
#
# def version(self):
# return 0
#
# def output(self):
# # if self.deps() and not all([d.complete() for d in self.deps()]):
# # raise Exception('Must run prerequisites first')
# output = {}
# for tag in self.tags():
# orig_id = tag.id
# tag.id = '.'.join([classpath(self), orig_id])
# if not tag.version:
# tag.version = self.version()
# output[orig_id] = TagTarget(tag, self)
# return output
. Output only the next line. | OBSTag(id='nyc', |
Here is a snippet: <|code_start|>
class SourceTags(TagsTask):
def tags(self):
return [
<|code_end|>
. Write the next line using the current file imports:
from tasks.meta import OBSTag
from tasks.base_tasks import TagsTask
and context from other files:
# Path: tasks/meta.py
# class OBSTag(Base):
# '''
# Tags permit arbitrary groupings of columns.
#
# They should only be created as part of a :meth:`~.tasks.TagsTask.tags`
# implementation.
#
# .. py:attribute:: id
#
# The unique identifier for this table. Is always qualified by
# the module name of the :class:`~.tasks.TagsTask` that created it, and
# should never be specified manually.
#
# .. py:attribute:: name
#
# The name of this tag. This is exposed in the API and user interfaces.
#
# .. py:attribute:: type
#
# The type of this tag. This is used to provide more information about
# what the tag means. Examples are ``section``, ``subsection``,
# ``license``, ``unit``, although any arbitrary type can be defined.
#
# .. py:attribute:: description
#
# Description of this tag. This may be exposed in the API and catalog.
#
# .. py:attribute:: columns
#
# An iterable of all the :class:`~.meta.OBSColumn`s tagged with this tag.
#
# .. py:attribute:: version
#
# A version control number, used to determine whether the tag and its
# metadata should be updated.
#
# '''
# __tablename__ = 'obs_tag'
#
# id = Column(Text, primary_key=True)
#
# name = Column(Text, nullable=False)
# type = Column(Text, nullable=False)
# description = Column(Text)
#
# columns = association_proxy('tag_column_tags', 'column')
#
# version = Column(Numeric, default=0, nullable=False)
#
# Path: tasks/base_tasks.py
# class TagsTask(Task):
# '''
# This will update-or-create :class:`OBSTag <tasks.meta.OBSTag>` objects
# int the database when run.
#
# The :meth:`~.TagsTask.tags` method must be overwritten.
#
# :meth:`~TagsTask.version` is used to control updates to the database.
# '''
#
# def tags(self):
# '''
# This method must be overwritten in subclasses.
#
# The return value must be an iterable of instances of
# :class:`OBSTag <tasks.meta.OBSTag>`.
# '''
# raise NotImplementedError('Must return iterable of OBSTags')
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TagsTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def run(self):
# for _, tagtarget in self.output().items():
# tagtarget.update_or_create()
#
# def version(self):
# return 0
#
# def output(self):
# # if self.deps() and not all([d.complete() for d in self.deps()]):
# # raise Exception('Must run prerequisites first')
# output = {}
# for tag in self.tags():
# orig_id = tag.id
# tag.id = '.'.join([classpath(self), orig_id])
# if not tag.version:
# tag.version = self.version()
# output[orig_id] = TagTarget(tag, self)
# return output
, which may include functions, classes, or code. Output only the next line. | OBSTag(id='statcan-nhs-2011', |
Given the code snippet: <|code_start|>'''
Util functions for luigi bigmetadata tasks.
'''
OBSERVATORY_PREFIX = 'obs_'
OBSERVATORY_SCHEMA = 'observatory'
<|code_end|>
, generate the next line using the imports in this file:
import os
import time
import re
import subprocess
import shutil
import zipfile
import requests
from lib.logger import get_logger
from itertools import zip_longest
from slugify import slugify
and context (functions, classes, or occasionally code) from other files:
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | LOGGER = get_logger(__name__) |
Given the code snippet: <|code_start|>
class Shoreline(TempTableTask):
def requires(self):
return LowerLayerSuperOutputAreas()
def version(self):
return 1
def run(self):
<|code_end|>
, generate the next line using the imports in this file:
from tasks.base_tasks import TempTableTask
from tasks.uk.gov import LowerLayerSuperOutputAreas
from tasks.meta import current_session
and context (functions, classes, or occasionally code) from other files:
# Path: tasks/base_tasks.py
# class TempTableTask(Task):
# '''
# A Task that generates a table that will not be referred to in metadata.
#
# This is useful for intermediate processing steps that can benefit from the
# session guarantees of the ETL, as well as automatic table naming.
#
# :param force: Optional Boolean, ``False`` by default. If ``True``, will
# overwrite output table even if it exists already.
# '''
#
# force = BoolParameter(default=False, significant=False)
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TempTableTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def target_tablename(self):
# '''
# Can be overriden to expose a different table name
# :return:
# '''
# return unqualified_task_id(self.task_id)[:MAX_PG_IDENTIFIER_LENGTH]
#
# def run(self):
# '''
# Must be overriden by subclass. Should create and populate a table
# named from ``self.output().table``
#
# If this completes without exceptions, the :func:`~.util.current_session
# will be committed; if there is an exception, it will be rolled back.
# '''
# raise Exception('Must override `run`')
#
# def output(self):
# '''
# By default, returns a :class:`~.targets.TableTarget` whose associated
# table lives in a special-purpose schema in Postgres derived using
# :func:`~.util.classpath`.
# '''
# return PostgresTarget(classpath(self), self.target_tablename())
#
# Path: tasks/uk/gov.py
# class LowerLayerSuperOutputAreas(TableTask):
#
# def requires(self):
# return {
# 'columns': LowerLayerSuperOutputAreasColumns(),
# 'lsoa-data': SimplifiedImportLowerLayerSuperOutputAreas(),
# 'dz-data': SimplifiedImportDataZones(),
# 'ni-data': SimplifiedImportNISuperOutputAreas(),
# }
#
# def version(self):
# return 1
#
# def table_timespan(self):
# return get_timespan('2011')
#
# # TODO: https://github.com/CartoDB/bigmetadata/issues/435
# def targets(self):
# return {
# OBSTable(id='.'.join([self.schema(), self.name()])): GEOM_REF,
# }
#
# def columns(self):
# input_ = self.input()
# cols = OrderedDict()
# cols.update(input_['columns'])
# return cols
#
# def populate(self):
# session = current_session()
#
# query = '''
# INSERT INTO {output}
# SELECT ST_MakeValid(wkb_geometry), lsoa11cd, lsoa11nm
# FROM {input_lsoa}
# UNION ALL
# SELECT ST_MakeValid(wkb_geometry), datazone, name
# FROM {input_dz}
# UNION ALL
# SELECT ST_MakeValid(wkb_geometry), soa_code, soa_label
# FROM {input_ni}
# '''.format(output=self.output().table,
# input_lsoa=self.input()['lsoa-data'].table,
# input_dz=self.input()['dz-data'].table,
# input_ni=self.input()['ni-data'].table,)
#
# session.execute(query)
#
# @staticmethod
# def geoid_column():
# return LowerLayerSuperOutputAreasColumns.geoid_column()
#
# @staticmethod
# def geoname_column():
# return LowerLayerSuperOutputAreasColumns.geoname_column()
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
. Output only the next line. | session = current_session() |
Given snippet: <|code_start|>
LOGGER = get_logger(__name__)
GEOGRAPHY_LEVELS = {
'GEO_STE': 'au.geo.STE', # State/Territory
'GEO_SA4': 'au.geo.SA4', # Statistical Area Level 4
'GEO_SA3': 'au.geo.SA3', # Statistical Area Level 3
'GEO_SA2': 'au.geo.SA2', # Statistical Area Level 2
'GEO_SA1': 'au.geo.SA1', # Statistical Area Level 1
'GEO_MB': 'au.geo.MB', # Mesh blocks
'GEO_POA': 'au.geo.POA', # Postal Areas
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.au.data import XCPAllGeographiesAllTables
from tasks.meta import current_session
from lib.logger import get_logger
and context:
# Path: tasks/au/data.py
# class XCPAllGeographiesAllTables(WrapperTask):
# year = IntParameter()
#
# def requires(self):
# for resolution in GEOGRAPHIES[self.year]:
# yield XCPAllTables(resolution=resolution, year=self.year)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
which might include code, classes, or functions. Output only the next line. | 'bcp': XCPAllGeographiesAllTables(year=2011) |
Continue the code snippet: <|code_start|>
GEOGRAPHY_LEVELS = {
'GEO_STE': 'au.geo.STE', # State/Territory
'GEO_SA4': 'au.geo.SA4', # Statistical Area Level 4
'GEO_SA3': 'au.geo.SA3', # Statistical Area Level 3
'GEO_SA2': 'au.geo.SA2', # Statistical Area Level 2
'GEO_SA1': 'au.geo.SA1', # Statistical Area Level 1
'GEO_MB': 'au.geo.MB', # Mesh blocks
'GEO_POA': 'au.geo.POA', # Postal Areas
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
'bcp': XCPAllGeographiesAllTables(year=2011)
}
def _get_config_data(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with (open('{}/{}'.format(dir_path, 'measurements.json'))) as f:
return json.load(f)
def run(self):
<|code_end|>
. Use current file imports:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.au.data import XCPAllGeographiesAllTables
from tasks.meta import current_session
from lib.logger import get_logger
and context (classes, functions, or code) from other files:
# Path: tasks/au/data.py
# class XCPAllGeographiesAllTables(WrapperTask):
# year = IntParameter()
#
# def requires(self):
# for resolution in GEOGRAPHIES[self.year]:
# yield XCPAllTables(resolution=resolution, year=self.year)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | session = current_session() |
Here is a snippet: <|code_start|>
REMOVE_FROM_DO = "removefromdo"
EXTRACT_NULLS_FROM_LOG = "nullsfromlog"
MERGE_NULLS_FILES = "mergenullsfiles"
def remove_from_do(id, exclusions_string = None):
MINIMUM_ID_LENGTH = 5
MAX_COLS_TO_PRINT = 50
exclusions = exclusions_string.split(',') if exclusions_string else []
if len(id) < MINIMUM_ID_LENGTH:
print("The identifier '{}' is too short (minimum {} characters)".format(id, MINIMUM_ID_LENGTH))
return
<|code_end|>
. Write the next line using the current file imports:
import argparse
import os
import json
from tasks.meta import CurrentSession
and context from other files:
# Path: tasks/meta.py
# class CurrentSession(object):
#
# def __init__(self, async_conn=False):
# self._session = None
# self._pid = None
# self._async_conn = False
# # https://docs.python.org/3/howto/logging.html#optimization
# self.debug_logging_enabled = LOGGER.isEnabledFor(DEBUG)
#
# def begin(self):
# if not self._session:
# self._session = sessionmaker(bind=get_engine())()
# self._pid = os.getpid()
#
# def get(self):
# # If we forked, there would be a PID mismatch and we need a new
# # connection
# if self._pid != os.getpid():
# self._session = None
# LOGGER.debug('FORKED: {} not {}'.format(self._pid, os.getpid()))
# if not self._session:
# self.begin()
# try:
# self._session.execute("SELECT 1")
# # Useful for debugging. This is called thousands of times
# if self.debug_logging_enabled:
# curframe = inspect.currentframe()
# calframe = inspect.getouterframes(curframe, 2)
# callers = ' <- '.join([c[3] for c in calframe[1:9]])
# # TODO: don't know why, `.debug` won't work at this point
# LOGGER.info('callers: {}'.format(callers))
# except:
# self._session = None
# self.begin()
# return self._session
#
# def commit(self):
# if self._pid != os.getpid():
# raise Exception('cannot commit forked connection')
# if not self._session:
# return
# try:
# self._session.commit()
# except:
# self._session.rollback()
# self._session.expunge_all()
# raise
# finally:
# self._session.close()
# self._session = None
#
# def rollback(self):
# if self._pid != os.getpid():
# raise Exception('cannot rollback forked connection')
# if not self._session:
# return
# try:
# self._session.rollback()
# except:
# raise
# finally:
# self._session.expunge_all()
# self._session.close()
# self._session = None
, which may include functions, classes, or code. Output only the next line. | session = CurrentSession().get() |
Using the snippet: <|code_start|>
class LicenseTags(TagsTask):
def version(self):
return 3
def tags(self):
return [
<|code_end|>
, determine the next line of code. You have imports:
from tasks.meta import OBSTag
from tasks.base_tasks import TagsTask
and context (class names, function names, or code) available:
# Path: tasks/meta.py
# class OBSTag(Base):
# '''
# Tags permit arbitrary groupings of columns.
#
# They should only be created as part of a :meth:`~.tasks.TagsTask.tags`
# implementation.
#
# .. py:attribute:: id
#
# The unique identifier for this table. Is always qualified by
# the module name of the :class:`~.tasks.TagsTask` that created it, and
# should never be specified manually.
#
# .. py:attribute:: name
#
# The name of this tag. This is exposed in the API and user interfaces.
#
# .. py:attribute:: type
#
# The type of this tag. This is used to provide more information about
# what the tag means. Examples are ``section``, ``subsection``,
# ``license``, ``unit``, although any arbitrary type can be defined.
#
# .. py:attribute:: description
#
# Description of this tag. This may be exposed in the API and catalog.
#
# .. py:attribute:: columns
#
# An iterable of all the :class:`~.meta.OBSColumn`s tagged with this tag.
#
# .. py:attribute:: version
#
# A version control number, used to determine whether the tag and its
# metadata should be updated.
#
# '''
# __tablename__ = 'obs_tag'
#
# id = Column(Text, primary_key=True)
#
# name = Column(Text, nullable=False)
# type = Column(Text, nullable=False)
# description = Column(Text)
#
# columns = association_proxy('tag_column_tags', 'column')
#
# version = Column(Numeric, default=0, nullable=False)
#
# Path: tasks/base_tasks.py
# class TagsTask(Task):
# '''
# This will update-or-create :class:`OBSTag <tasks.meta.OBSTag>` objects
# int the database when run.
#
# The :meth:`~.TagsTask.tags` method must be overwritten.
#
# :meth:`~TagsTask.version` is used to control updates to the database.
# '''
#
# def tags(self):
# '''
# This method must be overwritten in subclasses.
#
# The return value must be an iterable of instances of
# :class:`OBSTag <tasks.meta.OBSTag>`.
# '''
# raise NotImplementedError('Must return iterable of OBSTags')
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TagsTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def run(self):
# for _, tagtarget in self.output().items():
# tagtarget.update_or_create()
#
# def version(self):
# return 0
#
# def output(self):
# # if self.deps() and not all([d.complete() for d in self.deps()]):
# # raise Exception('Must run prerequisites first')
# output = {}
# for tag in self.tags():
# orig_id = tag.id
# tag.id = '.'.join([classpath(self), orig_id])
# if not tag.version:
# tag.version = self.version()
# output[orig_id] = TagTarget(tag, self)
# return output
. Output only the next line. | OBSTag(id='cc-by-4', |
Given the following code snippet before the placeholder: <|code_start|>
SCHEMA = 'schema'
TABLE_ID = 'tableid'
SIMPLIFICATION = 'simplification'
GEOM_FIELD = 'geomfield'
FACTOR = 'factor'
MAX_MEMORY = 'maxmemory'
SKIP_FAILURES = 'skipfailures'
SIMPLIFICATION_MAPSHAPER = 'mapshaper'
SIMPLIFICATION_POSTGIS = 'postgis'
DEFAULT_SIMPLIFICATION = SIMPLIFICATION_MAPSHAPER
DEFAULT_MAXMEMORY = '8192'
DEFAULT_SKIPFAILURES = 'no'
<|code_end|>
, predict the next line using imports from the current file:
from luigi import WrapperTask, Parameter
from lib.logger import get_logger
from .simplification import SimplifyGeometriesMapshaper, \
SimplifyGeometriesPostGIS, SIMPLIFIED_SUFFIX
import json
import os
and context including class names, function names, and sometimes code from other files:
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
#
# Path: tasks/simplification.py
# class SimplifyGeometriesMapshaper(Task):
# schema = Parameter()
# table_input = Parameter()
# table_output = Parameter(default='')
# geomfield = Parameter(default=DEFAULT_GEOMFIELD)
# retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_MAPSHAPER)
# skipfailures = Parameter(default=SKIPFAILURES_NO)
# maxmemory = Parameter(default=DEFAULT_MAX_MEMORY)
#
# def __init__(self, *args, **kwargs):
# super(SimplifyGeometriesMapshaper, self).__init__(*args, **kwargs)
#
# def requires(self):
# return SimplifyShapefile(schema=self.schema, table_input=self.table_input, table_output=self.table_output,
# geomfield=self.geomfield, retainfactor=self.retainfactor,
# skipfailures=self.skipfailures, maxmemory=self.maxmemory)
#
# def run(self):
# cmd = 'PG_USE_COPY=yes ' \
# 'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE active_schema={schema}" ' \
# '-t_srs "EPSG:4326" -nlt MultiPolygon -nln {table} ' \
# '-lco OVERWRITE=yes -lco PRECISION=no -lco GEOMETRY_NAME={geomfield} ' \
# '-lco SCHEMA={schema} {shp_path} '.format(
# schema=self.output().schema,
# table=self.output().tablename,
# geomfield=self.geomfield,
# shp_path=os.path.join(self.input().path, shp_filename(self.table_output)))
# shell(cmd)
#
# session = CurrentSession().get()
# session.execute('UPDATE "{schema}".{table} '
# 'SET {geomfield}=ST_CollectionExtract(ST_MakeValid({geomfield}), 3)'.format(
# schema=self.output().schema, table=self.output().tablename, geomfield=self.geomfield))
# session.commit()
#
# def output(self):
# return PostgresTarget(self.schema, self.table_output)
#
# class SimplifyGeometriesPostGIS(Task):
# schema = Parameter()
# table_input = Parameter()
# table_output = Parameter()
# geomfield = Parameter(default=DEFAULT_GEOMFIELD)
# retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_POSTGIS)
#
# def __init__(self, *args, **kwargs):
# super(SimplifyGeometriesPostGIS, self).__init__(*args, **kwargs)
#
# def run(self):
# session = CurrentSession().get()
#
# columns = session.execute("SELECT column_name "
# "FROM information_schema.columns "
# "WHERE table_schema = '{schema}' "
# "AND table_name = '{table}'".format(
# schema=self.schema, table=self.table_input.lower())).fetchall()
#
# factor = simplification_factor(self.schema, self.table_input, self.geomfield, self.retainfactor)
#
# simplified_geomfield = 'ST_CollectionExtract(ST_MakeValid(ST_SimplifyVW({geomfield}, {factor})), 3) ' \
# '{geomfield}'.format(geomfield=self.geomfield, factor=factor)
#
# session.execute('CREATE TABLE "{schema}".{table_output} '
# 'AS SELECT {fields} '
# 'FROM "{schema}".{table_in} '.format(
# schema=self.output().schema, table_in=self.table_input,
# table_output=self.output().tablename,
# fields=', '.join([x[0] if x[0] != self.geomfield else simplified_geomfield
# for x in columns])))
# session.commit()
# session.execute('CREATE INDEX {table_output}_{geomfield}_geo ON '
# '"{schema}".{table_output} USING GIST ({geomfield})'.format(
# table_output=self.output().tablename, geomfield=self.geomfield, schema=self.output().schema))
#
# def output(self):
# return PostgresTarget(self.schema, self.table_output)
#
# SIMPLIFIED_SUFFIX = '_simpl'
. Output only the next line. | LOGGER = get_logger(__name__) |
Next line prediction: <|code_start|>LOGGER = get_logger(__name__)
def get_simplification_params(table_id):
with open(os.path.join(os.path.dirname(__file__), 'simplifications.json')) as file:
return json.load(file).get(table_id)
class Simplify(WrapperTask):
schema = Parameter()
table = Parameter()
table_id = Parameter(default='')
suffix = Parameter(default=SIMPLIFIED_SUFFIX)
def __init__(self, *args, **kwargs):
super(Simplify, self).__init__(*args, **kwargs)
self.table_key = self.table
if self.table_id:
self.table_key = self.table_id
def requires(self):
params = get_simplification_params(self.table_key.lower())
if not params:
LOGGER.error("Simplification not found. Edit 'simplifications.json' and add an entry for '{}'".format(self.table_key.lower()))
simplification = params.get(SIMPLIFICATION, DEFAULT_SIMPLIFICATION)
LOGGER.info("Simplifying %s.%s using %s", self.schema, self.table, simplification)
table_output = '{tablename}{suffix}'.format(tablename=self.table, suffix=self.suffix)
if simplification == SIMPLIFICATION_MAPSHAPER:
<|code_end|>
. Use current file imports:
(from luigi import WrapperTask, Parameter
from lib.logger import get_logger
from .simplification import SimplifyGeometriesMapshaper, \
SimplifyGeometriesPostGIS, SIMPLIFIED_SUFFIX
import json
import os)
and context including class names, function names, or small code snippets from other files:
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
#
# Path: tasks/simplification.py
# class SimplifyGeometriesMapshaper(Task):
# schema = Parameter()
# table_input = Parameter()
# table_output = Parameter(default='')
# geomfield = Parameter(default=DEFAULT_GEOMFIELD)
# retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_MAPSHAPER)
# skipfailures = Parameter(default=SKIPFAILURES_NO)
# maxmemory = Parameter(default=DEFAULT_MAX_MEMORY)
#
# def __init__(self, *args, **kwargs):
# super(SimplifyGeometriesMapshaper, self).__init__(*args, **kwargs)
#
# def requires(self):
# return SimplifyShapefile(schema=self.schema, table_input=self.table_input, table_output=self.table_output,
# geomfield=self.geomfield, retainfactor=self.retainfactor,
# skipfailures=self.skipfailures, maxmemory=self.maxmemory)
#
# def run(self):
# cmd = 'PG_USE_COPY=yes ' \
# 'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE active_schema={schema}" ' \
# '-t_srs "EPSG:4326" -nlt MultiPolygon -nln {table} ' \
# '-lco OVERWRITE=yes -lco PRECISION=no -lco GEOMETRY_NAME={geomfield} ' \
# '-lco SCHEMA={schema} {shp_path} '.format(
# schema=self.output().schema,
# table=self.output().tablename,
# geomfield=self.geomfield,
# shp_path=os.path.join(self.input().path, shp_filename(self.table_output)))
# shell(cmd)
#
# session = CurrentSession().get()
# session.execute('UPDATE "{schema}".{table} '
# 'SET {geomfield}=ST_CollectionExtract(ST_MakeValid({geomfield}), 3)'.format(
# schema=self.output().schema, table=self.output().tablename, geomfield=self.geomfield))
# session.commit()
#
# def output(self):
# return PostgresTarget(self.schema, self.table_output)
#
# class SimplifyGeometriesPostGIS(Task):
# schema = Parameter()
# table_input = Parameter()
# table_output = Parameter()
# geomfield = Parameter(default=DEFAULT_GEOMFIELD)
# retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_POSTGIS)
#
# def __init__(self, *args, **kwargs):
# super(SimplifyGeometriesPostGIS, self).__init__(*args, **kwargs)
#
# def run(self):
# session = CurrentSession().get()
#
# columns = session.execute("SELECT column_name "
# "FROM information_schema.columns "
# "WHERE table_schema = '{schema}' "
# "AND table_name = '{table}'".format(
# schema=self.schema, table=self.table_input.lower())).fetchall()
#
# factor = simplification_factor(self.schema, self.table_input, self.geomfield, self.retainfactor)
#
# simplified_geomfield = 'ST_CollectionExtract(ST_MakeValid(ST_SimplifyVW({geomfield}, {factor})), 3) ' \
# '{geomfield}'.format(geomfield=self.geomfield, factor=factor)
#
# session.execute('CREATE TABLE "{schema}".{table_output} '
# 'AS SELECT {fields} '
# 'FROM "{schema}".{table_in} '.format(
# schema=self.output().schema, table_in=self.table_input,
# table_output=self.output().tablename,
# fields=', '.join([x[0] if x[0] != self.geomfield else simplified_geomfield
# for x in columns])))
# session.commit()
# session.execute('CREATE INDEX {table_output}_{geomfield}_geo ON '
# '"{schema}".{table_output} USING GIST ({geomfield})'.format(
# table_output=self.output().tablename, geomfield=self.geomfield, schema=self.output().schema))
#
# def output(self):
# return PostgresTarget(self.schema, self.table_output)
#
# SIMPLIFIED_SUFFIX = '_simpl'
. Output only the next line. | return SimplifyGeometriesMapshaper(schema=self.schema, |
Next line prediction: <|code_start|>
SCHEMA = 'schema'
TABLE_ID = 'tableid'
SIMPLIFICATION = 'simplification'
GEOM_FIELD = 'geomfield'
FACTOR = 'factor'
MAX_MEMORY = 'maxmemory'
SKIP_FAILURES = 'skipfailures'
SIMPLIFICATION_MAPSHAPER = 'mapshaper'
SIMPLIFICATION_POSTGIS = 'postgis'
DEFAULT_SIMPLIFICATION = SIMPLIFICATION_MAPSHAPER
DEFAULT_MAXMEMORY = '8192'
DEFAULT_SKIPFAILURES = 'no'
LOGGER = get_logger(__name__)
def get_simplification_params(table_id):
with open(os.path.join(os.path.dirname(__file__), 'simplifications.json')) as file:
return json.load(file).get(table_id)
class Simplify(WrapperTask):
schema = Parameter()
table = Parameter()
table_id = Parameter(default='')
<|code_end|>
. Use current file imports:
(from luigi import WrapperTask, Parameter
from lib.logger import get_logger
from .simplification import SimplifyGeometriesMapshaper, \
SimplifyGeometriesPostGIS, SIMPLIFIED_SUFFIX
import json
import os)
and context including class names, function names, or small code snippets from other files:
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
#
# Path: tasks/simplification.py
# class SimplifyGeometriesMapshaper(Task):
# schema = Parameter()
# table_input = Parameter()
# table_output = Parameter(default='')
# geomfield = Parameter(default=DEFAULT_GEOMFIELD)
# retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_MAPSHAPER)
# skipfailures = Parameter(default=SKIPFAILURES_NO)
# maxmemory = Parameter(default=DEFAULT_MAX_MEMORY)
#
# def __init__(self, *args, **kwargs):
# super(SimplifyGeometriesMapshaper, self).__init__(*args, **kwargs)
#
# def requires(self):
# return SimplifyShapefile(schema=self.schema, table_input=self.table_input, table_output=self.table_output,
# geomfield=self.geomfield, retainfactor=self.retainfactor,
# skipfailures=self.skipfailures, maxmemory=self.maxmemory)
#
# def run(self):
# cmd = 'PG_USE_COPY=yes ' \
# 'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE active_schema={schema}" ' \
# '-t_srs "EPSG:4326" -nlt MultiPolygon -nln {table} ' \
# '-lco OVERWRITE=yes -lco PRECISION=no -lco GEOMETRY_NAME={geomfield} ' \
# '-lco SCHEMA={schema} {shp_path} '.format(
# schema=self.output().schema,
# table=self.output().tablename,
# geomfield=self.geomfield,
# shp_path=os.path.join(self.input().path, shp_filename(self.table_output)))
# shell(cmd)
#
# session = CurrentSession().get()
# session.execute('UPDATE "{schema}".{table} '
# 'SET {geomfield}=ST_CollectionExtract(ST_MakeValid({geomfield}), 3)'.format(
# schema=self.output().schema, table=self.output().tablename, geomfield=self.geomfield))
# session.commit()
#
# def output(self):
# return PostgresTarget(self.schema, self.table_output)
#
# class SimplifyGeometriesPostGIS(Task):
# schema = Parameter()
# table_input = Parameter()
# table_output = Parameter()
# geomfield = Parameter(default=DEFAULT_GEOMFIELD)
# retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_POSTGIS)
#
# def __init__(self, *args, **kwargs):
# super(SimplifyGeometriesPostGIS, self).__init__(*args, **kwargs)
#
# def run(self):
# session = CurrentSession().get()
#
# columns = session.execute("SELECT column_name "
# "FROM information_schema.columns "
# "WHERE table_schema = '{schema}' "
# "AND table_name = '{table}'".format(
# schema=self.schema, table=self.table_input.lower())).fetchall()
#
# factor = simplification_factor(self.schema, self.table_input, self.geomfield, self.retainfactor)
#
# simplified_geomfield = 'ST_CollectionExtract(ST_MakeValid(ST_SimplifyVW({geomfield}, {factor})), 3) ' \
# '{geomfield}'.format(geomfield=self.geomfield, factor=factor)
#
# session.execute('CREATE TABLE "{schema}".{table_output} '
# 'AS SELECT {fields} '
# 'FROM "{schema}".{table_in} '.format(
# schema=self.output().schema, table_in=self.table_input,
# table_output=self.output().tablename,
# fields=', '.join([x[0] if x[0] != self.geomfield else simplified_geomfield
# for x in columns])))
# session.commit()
# session.execute('CREATE INDEX {table_output}_{geomfield}_geo ON '
# '"{schema}".{table_output} USING GIST ({geomfield})'.format(
# table_output=self.output().tablename, geomfield=self.geomfield, schema=self.output().schema))
#
# def output(self):
# return PostgresTarget(self.schema, self.table_output)
#
# SIMPLIFIED_SUFFIX = '_simpl'
. Output only the next line. | suffix = Parameter(default=SIMPLIFIED_SUFFIX) |
Predict the next line for this snippet: <|code_start|>
class DirectoryTarget(Target):
'''
Similar to :class:`LocalTarget <luigi.LocalTarget>` but automatically creates and destroys temp directories
'''
def __init__(self, task):
<|code_end|>
with the help of current file imports:
import os
import shutil
from luigi import Target, LocalTarget
from tasks.util import classpath
and context from other files:
# Path: tasks/util.py
# def classpath(obj):
# '''
# Returns the path to this object's class, relevant to current working dir.
# Excludes the first element of the path. If there is only one element,
# returns ``tmp``.
#
# >>> classpath(object)
# 'tmp'
# >>> from tasks.base_tasks import ColumnsTask
# >>> classpath(ColumnsTask())
# 'util'
# >>> from tasks.es.ine import FiveYearPopulation
# >>> classpath(FiveYearPopulation())
# 'es.ine'
# '''
# classpath_ = '.'.join(obj.__module__.split('.')[1:])
# return classpath_ if classpath_ else 'tmp'
, which may contain function names, class names, or code. Output only the next line. | self.path = os.path.join('tmp', classpath(task), task.task_id) |
Using the snippet: <|code_start|>'''
Define special segments for the census
'''
class SegmentTags(TagsTask):
def tags(self):
return [
<|code_end|>
, determine the next line of code. You have imports:
from tasks.meta import OBSTag
from tasks.base_tasks import TagsTask
and context (class names, function names, or code) available:
# Path: tasks/meta.py
# class OBSTag(Base):
# '''
# Tags permit arbitrary groupings of columns.
#
# They should only be created as part of a :meth:`~.tasks.TagsTask.tags`
# implementation.
#
# .. py:attribute:: id
#
# The unique identifier for this table. Is always qualified by
# the module name of the :class:`~.tasks.TagsTask` that created it, and
# should never be specified manually.
#
# .. py:attribute:: name
#
# The name of this tag. This is exposed in the API and user interfaces.
#
# .. py:attribute:: type
#
# The type of this tag. This is used to provide more information about
# what the tag means. Examples are ``section``, ``subsection``,
# ``license``, ``unit``, although any arbitrary type can be defined.
#
# .. py:attribute:: description
#
# Description of this tag. This may be exposed in the API and catalog.
#
# .. py:attribute:: columns
#
# An iterable of all the :class:`~.meta.OBSColumn`s tagged with this tag.
#
# .. py:attribute:: version
#
# A version control number, used to determine whether the tag and its
# metadata should be updated.
#
# '''
# __tablename__ = 'obs_tag'
#
# id = Column(Text, primary_key=True)
#
# name = Column(Text, nullable=False)
# type = Column(Text, nullable=False)
# description = Column(Text)
#
# columns = association_proxy('tag_column_tags', 'column')
#
# version = Column(Numeric, default=0, nullable=False)
#
# Path: tasks/base_tasks.py
# class TagsTask(Task):
# '''
# This will update-or-create :class:`OBSTag <tasks.meta.OBSTag>` objects
# int the database when run.
#
# The :meth:`~.TagsTask.tags` method must be overwritten.
#
# :meth:`~TagsTask.version` is used to control updates to the database.
# '''
#
# def tags(self):
# '''
# This method must be overwritten in subclasses.
#
# The return value must be an iterable of instances of
# :class:`OBSTag <tasks.meta.OBSTag>`.
# '''
# raise NotImplementedError('Must return iterable of OBSTags')
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TagsTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def run(self):
# for _, tagtarget in self.output().items():
# tagtarget.update_or_create()
#
# def version(self):
# return 0
#
# def output(self):
# # if self.deps() and not all([d.complete() for d in self.deps()]):
# # raise Exception('Must run prerequisites first')
# output = {}
# for tag in self.tags():
# orig_id = tag.id
# tag.id = '.'.join([classpath(self), orig_id])
# if not tag.version:
# tag.version = self.version()
# output[orig_id] = TagTarget(tag, self)
# return output
. Output only the next line. | OBSTag(id='middle_aged_men', |
Given the following code snippet before the placeholder: <|code_start|> 'GEO_PA': 'uk.datashare.pa_geo', # Postcode Areas
'GEO_PD': 'uk.odl.pd_geo', # Postcode Districts
'GEO_PS': 'uk.odl.ps_geo', # Postcode Sectors
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
requirements = {}
if self.geography == 'GEO_PA':
requirements['data'] = CensusPostcodeAreas()
elif self.geography == 'GEO_PD':
requirements['data'] = CensusPostcodeDistricts()
elif self.geography == 'GEO_PS':
requirements['data'] = CensusPostcodeSectors()
return requirements
def _get_config_data(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with (open('{}/{}'.format(dir_path, 'measurements.json'))) as f:
return json.load(f)
def run(self):
<|code_end|>
, predict the next line using imports from the current file:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.meta import current_session
from tasks.uk.census.wrapper import CensusPostcodeAreas, CensusPostcodeDistricts, CensusPostcodeSectors
from lib.logger import get_logger
and context including class names, function names, and sometimes code from other files:
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: tasks/uk/census/wrapper.py
# class CensusPostcodeAreas(InterpolationTask):
# def table_timespan(self):
# return get_timespan('2011')
#
# def columns(self):
# cols = OrderedDict()
# input_ = self.input()
# cols['pa_id'] = input_['target_geom_columns']['pa_id']
# cols.update(input_['source_data_columns'])
# return cols
#
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeAreasColumns(),
# 'target_geom': PostcodeAreas(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# def get_interpolation_parameters(self):
# params = {
# 'source_data_geoid': 'geographycode',
# 'source_geom_geoid': 'oa_sa',
# 'target_data_geoid': 'pa_id',
# 'target_geom_geoid': 'pa_id',
# 'source_geom_geomfield': 'the_geom',
# 'target_geom_geomfield': 'the_geom',
# }
#
# return params
#
# class CensusPostcodeDistricts(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeDistrictsColumns(),
# 'target_geom': PostcodeDistricts(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# class CensusPostcodeSectors(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeSectorsColumns(),
# 'target_geom': PostcodeSectors(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | session = current_session() |
Given the following code snippet before the placeholder: <|code_start|>
LOGGER = get_logger(__name__)
GEOGRAPHY_LEVELS = {
'GEO_PA': 'uk.datashare.pa_geo', # Postcode Areas
'GEO_PD': 'uk.odl.pd_geo', # Postcode Districts
'GEO_PS': 'uk.odl.ps_geo', # Postcode Sectors
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
requirements = {}
if self.geography == 'GEO_PA':
<|code_end|>
, predict the next line using imports from the current file:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.meta import current_session
from tasks.uk.census.wrapper import CensusPostcodeAreas, CensusPostcodeDistricts, CensusPostcodeSectors
from lib.logger import get_logger
and context including class names, function names, and sometimes code from other files:
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: tasks/uk/census/wrapper.py
# class CensusPostcodeAreas(InterpolationTask):
# def table_timespan(self):
# return get_timespan('2011')
#
# def columns(self):
# cols = OrderedDict()
# input_ = self.input()
# cols['pa_id'] = input_['target_geom_columns']['pa_id']
# cols.update(input_['source_data_columns'])
# return cols
#
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeAreasColumns(),
# 'target_geom': PostcodeAreas(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# def get_interpolation_parameters(self):
# params = {
# 'source_data_geoid': 'geographycode',
# 'source_geom_geoid': 'oa_sa',
# 'target_data_geoid': 'pa_id',
# 'target_geom_geoid': 'pa_id',
# 'source_geom_geomfield': 'the_geom',
# 'target_geom_geomfield': 'the_geom',
# }
#
# return params
#
# class CensusPostcodeDistricts(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeDistrictsColumns(),
# 'target_geom': PostcodeDistricts(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# class CensusPostcodeSectors(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeSectorsColumns(),
# 'target_geom': PostcodeSectors(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | requirements['data'] = CensusPostcodeAreas() |
Continue the code snippet: <|code_start|>
LOGGER = get_logger(__name__)
GEOGRAPHY_LEVELS = {
'GEO_PA': 'uk.datashare.pa_geo', # Postcode Areas
'GEO_PD': 'uk.odl.pd_geo', # Postcode Districts
'GEO_PS': 'uk.odl.ps_geo', # Postcode Sectors
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
requirements = {}
if self.geography == 'GEO_PA':
requirements['data'] = CensusPostcodeAreas()
elif self.geography == 'GEO_PD':
<|code_end|>
. Use current file imports:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.meta import current_session
from tasks.uk.census.wrapper import CensusPostcodeAreas, CensusPostcodeDistricts, CensusPostcodeSectors
from lib.logger import get_logger
and context (classes, functions, or code) from other files:
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: tasks/uk/census/wrapper.py
# class CensusPostcodeAreas(InterpolationTask):
# def table_timespan(self):
# return get_timespan('2011')
#
# def columns(self):
# cols = OrderedDict()
# input_ = self.input()
# cols['pa_id'] = input_['target_geom_columns']['pa_id']
# cols.update(input_['source_data_columns'])
# return cols
#
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeAreasColumns(),
# 'target_geom': PostcodeAreas(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# def get_interpolation_parameters(self):
# params = {
# 'source_data_geoid': 'geographycode',
# 'source_geom_geoid': 'oa_sa',
# 'target_data_geoid': 'pa_id',
# 'target_geom_geoid': 'pa_id',
# 'source_geom_geomfield': 'the_geom',
# 'target_geom_geomfield': 'the_geom',
# }
#
# return params
#
# class CensusPostcodeDistricts(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeDistrictsColumns(),
# 'target_geom': PostcodeDistricts(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# class CensusPostcodeSectors(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeSectorsColumns(),
# 'target_geom': PostcodeSectors(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | requirements['data'] = CensusPostcodeDistricts() |
Based on the snippet: <|code_start|>
LOGGER = get_logger(__name__)
GEOGRAPHY_LEVELS = {
'GEO_PA': 'uk.datashare.pa_geo', # Postcode Areas
'GEO_PD': 'uk.odl.pd_geo', # Postcode Districts
'GEO_PS': 'uk.odl.ps_geo', # Postcode Sectors
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
requirements = {}
if self.geography == 'GEO_PA':
requirements['data'] = CensusPostcodeAreas()
elif self.geography == 'GEO_PD':
requirements['data'] = CensusPostcodeDistricts()
elif self.geography == 'GEO_PS':
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.meta import current_session
from tasks.uk.census.wrapper import CensusPostcodeAreas, CensusPostcodeDistricts, CensusPostcodeSectors
from lib.logger import get_logger
and context (classes, functions, sometimes code) from other files:
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: tasks/uk/census/wrapper.py
# class CensusPostcodeAreas(InterpolationTask):
# def table_timespan(self):
# return get_timespan('2011')
#
# def columns(self):
# cols = OrderedDict()
# input_ = self.input()
# cols['pa_id'] = input_['target_geom_columns']['pa_id']
# cols.update(input_['source_data_columns'])
# return cols
#
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeAreasColumns(),
# 'target_geom': PostcodeAreas(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# def get_interpolation_parameters(self):
# params = {
# 'source_data_geoid': 'geographycode',
# 'source_geom_geoid': 'oa_sa',
# 'target_data_geoid': 'pa_id',
# 'target_geom_geoid': 'pa_id',
# 'source_geom_geomfield': 'the_geom',
# 'target_geom_geomfield': 'the_geom',
# }
#
# return params
#
# class CensusPostcodeDistricts(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeDistrictsColumns(),
# 'target_geom': PostcodeDistricts(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# class CensusPostcodeSectors(CensusPostcodeEntitiesFromOAs):
# def requires(self):
# deps = {
# 'source_geom_columns': OutputAreaColumns(),
# 'source_geom': OutputAreas(),
# 'source_data_columns': CensusColumns(),
# 'source_data': CensusOutputAreas(),
# 'target_geom_columns': PostcodeSectorsColumns(),
# 'target_geom': PostcodeSectors(),
# 'target_data_columns': CensusColumns(),
# }
#
# return deps
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | requirements['data'] = CensusPostcodeSectors() |
Continue the code snippet: <|code_start|>
LOGGER = get_logger(__name__)
GEOGRAPHY_LEVELS = {
'GEO_PR': 'ca.statcan.geo.pr_', # Canada, provinces and territories
'GEO_CD': 'ca.statcan.geo.cd_', # Census divisions
'GEO_DA': 'ca.statcan.geo.da_', # Census dissemination areas
'GEO_FSA': 'ca.statcan.geo.fsa', # Forward Sortation Areas
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
'nhs': AllNHSTopics(),
<|code_end|>
. Use current file imports:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.ca.statcan.data import AllCensusTopics, AllNHSTopics
from tasks.meta import current_session
from lib.logger import get_logger
and context (classes, functions, or code) from other files:
# Path: tasks/ca/statcan/data.py
# class AllCensusTopics(BaseParams, WrapperTask):
# def requires(self):
# topic_range = list(range(1, 11)) # 1-10
#
# for resolution in (GEO_CT, GEO_PR, GEO_CD, GEO_CSD, GEO_CMA, GEO_DA, GEO_FSA):
# for count in topic_range:
# topic = 't{:03d}'.format(count)
# yield Census(resolution=resolution, survey=SURVEY_CEN, topic=topic)
#
# class AllNHSTopics(BaseParams, WrapperTask):
# def requires(self):
# topic_range = list(range(1, 30)) # 1-29
#
# for resolution in (GEO_CT, GEO_PR, GEO_CD, GEO_CSD, GEO_CMA, GEO_DA, GEO_FSA):
# for count in topic_range:
# topic = 't{:03d}'.format(count)
# yield NHS(resolution=resolution, survey=SURVEY_NHS, topic=topic)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | 'census': AllCensusTopics(), |
Given the code snippet: <|code_start|>
LOGGER = get_logger(__name__)
GEOGRAPHY_LEVELS = {
'GEO_PR': 'ca.statcan.geo.pr_', # Canada, provinces and territories
'GEO_CD': 'ca.statcan.geo.cd_', # Census divisions
'GEO_DA': 'ca.statcan.geo.da_', # Census dissemination areas
'GEO_FSA': 'ca.statcan.geo.fsa', # Forward Sortation Areas
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
<|code_end|>
, generate the next line using the imports in this file:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.ca.statcan.data import AllCensusTopics, AllNHSTopics
from tasks.meta import current_session
from lib.logger import get_logger
and context (functions, classes, or occasionally code) from other files:
# Path: tasks/ca/statcan/data.py
# class AllCensusTopics(BaseParams, WrapperTask):
# def requires(self):
# topic_range = list(range(1, 11)) # 1-10
#
# for resolution in (GEO_CT, GEO_PR, GEO_CD, GEO_CSD, GEO_CMA, GEO_DA, GEO_FSA):
# for count in topic_range:
# topic = 't{:03d}'.format(count)
# yield Census(resolution=resolution, survey=SURVEY_CEN, topic=topic)
#
# class AllNHSTopics(BaseParams, WrapperTask):
# def requires(self):
# topic_range = list(range(1, 30)) # 1-29
#
# for resolution in (GEO_CT, GEO_PR, GEO_CD, GEO_CSD, GEO_CMA, GEO_DA, GEO_FSA):
# for count in topic_range:
# topic = 't{:03d}'.format(count)
# yield NHS(resolution=resolution, survey=SURVEY_NHS, topic=topic)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
. Output only the next line. | 'nhs': AllNHSTopics(), |
Predict the next line for this snippet: <|code_start|>
LOGGER = get_logger(__name__)
GEOGRAPHY_LEVELS = {
'GEO_PR': 'ca.statcan.geo.pr_', # Canada, provinces and territories
'GEO_CD': 'ca.statcan.geo.cd_', # Census divisions
'GEO_DA': 'ca.statcan.geo.da_', # Census dissemination areas
'GEO_FSA': 'ca.statcan.geo.fsa', # Forward Sortation Areas
}
class Measurements2CSV(Task):
geography = Parameter()
file_name = Parameter()
def __init__(self, *args, **kwargs):
super(Measurements2CSV, self).__init__(*args, **kwargs)
def requires(self):
return {
'nhs': AllNHSTopics(),
'census': AllCensusTopics(),
}
def _get_config_data(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
with (open('{}/{}'.format(dir_path, 'measurements.json'))) as f:
return json.load(f)
def run(self):
<|code_end|>
with the help of current file imports:
import os
import json
import csv
from luigi import Task, WrapperTask, Parameter
from luigi.local_target import LocalTarget
from tasks.ca.statcan.data import AllCensusTopics, AllNHSTopics
from tasks.meta import current_session
from lib.logger import get_logger
and context from other files:
# Path: tasks/ca/statcan/data.py
# class AllCensusTopics(BaseParams, WrapperTask):
# def requires(self):
# topic_range = list(range(1, 11)) # 1-10
#
# for resolution in (GEO_CT, GEO_PR, GEO_CD, GEO_CSD, GEO_CMA, GEO_DA, GEO_FSA):
# for count in topic_range:
# topic = 't{:03d}'.format(count)
# yield Census(resolution=resolution, survey=SURVEY_CEN, topic=topic)
#
# class AllNHSTopics(BaseParams, WrapperTask):
# def requires(self):
# topic_range = list(range(1, 30)) # 1-29
#
# for resolution in (GEO_CT, GEO_PR, GEO_CD, GEO_CSD, GEO_CMA, GEO_DA, GEO_FSA):
# for count in topic_range:
# topic = 't{:03d}'.format(count)
# yield NHS(resolution=resolution, survey=SURVEY_NHS, topic=topic)
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: lib/logger.py
# def get_logger(name):
# '''
# Obtain a logger outputing to stderr with specified name. Defaults to INFO
# log level.
# '''
# logger = logging.getLogger(name)
# if LUIGI_LOG_ARG in sys.argv:
# index = sys.argv.index(LUIGI_LOG_ARG) + 1
# if len(sys.argv) > index:
# logger.setLevel(sys.argv[index])
#
# return logger
, which may contain function names, class names, or code. Output only the next line. | session = current_session() |
Using the snippet: <|code_start|> def get_url(self):
return self.URL
class ImportScotland(TempTableTask):
table = Parameter()
def requires(self):
return DownloadScotlandLocal()
@staticmethod
def id_to_column(colid):
return sanitize_identifier(colid)
def run(self):
infile = os.path.join(self.input().path, self.table + '.csv')
cols = OrderedDict({'geographycode': 'TEXT PRIMARY KEY'})
with open(infile) as csvfile:
reader = csv.reader(csvfile)
header = next(reader)
for c in header[1:]:
cols[self.id_to_column(c)] = 'NUMERIC'
with open(infile) as csvfile:
copy_from_csv(
current_session(),
self.output().table,
cols,
<|code_end|>
, determine the next line of code. You have imports:
from collections import OrderedDict
from luigi import Parameter
from lib.csv_stream import CSVNormalizerStream
from lib.copy import copy_from_csv
from tasks.base_tasks import RepoFileUnzipTask, TempTableTask
from tasks.meta import current_session
from tasks.util import copyfile
from .metadata import sanitize_identifier
import csv
import os
and context (class names, function names, or code) available:
# Path: lib/csv_stream.py
# class CSVNormalizerStream(io.IOBase):
# '''
# Filter for applying a function to each line of a CSV file, compatible with iostreams
# '''
# def __init__(self, infile, func):
# '''
# :param infile: A stream that reads a CSV file. e.g: a file
# :param func: A function to apply to each CSV row. It takes an array of fields and returns an array of fields.
# '''
# self._csvreader = csv.reader(infile)
# self._buffer = ''
# self._func = func
#
# def read(self, nbytes):
# try:
# while len(self._buffer) < nbytes:
# self.getline()
# out, self._buffer = self._buffer[:nbytes], self._buffer[nbytes:]
# return out
# except StopIteration:
# out = self._buffer
# self._buffer = ''
# return out
#
# def getline(self):
# line = next(self._csvreader)
# clean_line = self._func(line)
# self._buffer += (','.join(clean_line) + ' \n')
#
# Path: lib/copy.py
# def copy_from_csv(session, table_name, columns, csv_stream):
# '''
# Creates a table, loading the data from a .csv file.
#
# :param session: A SQL Alchemy session
# :param table_name: Output table name
# :param columns: Dictionary of columns, keys are named, values are types.
# :param csv_stream: A stream that reads a CSV file. e.g: a file or a
# :class:`CSVNormalizerStream <lib.csv_stream.CSVNormalizerStream>`
# '''
# with session.connection().connection.cursor() as cursor:
# cursor.execute('CREATE TABLE {output} ({cols})'.format(
# output=table_name,
# cols=', '.join(['{name} {type}'.format(name=k, type=v) for k, v in columns.items()])
# ))
#
# cursor.copy_expert(
# 'COPY {table} ({cols}) FROM stdin WITH (FORMAT CSV, HEADER)'.format(
# cols=', '.join(columns.keys()),
# table=table_name),
# csv_stream)
#
# Path: tasks/base_tasks.py
# class RepoFileUnzipTask(RepoFileUncompressTask):
# '''
# Download a zip file to location {output}.zip and unzip it to the folder
# {output}.
# '''
# compressed_extension = 'zip'
#
# def uncompress(self):
# uncompress_file(self.output().path)
#
# class TempTableTask(Task):
# '''
# A Task that generates a table that will not be referred to in metadata.
#
# This is useful for intermediate processing steps that can benefit from the
# session guarantees of the ETL, as well as automatic table naming.
#
# :param force: Optional Boolean, ``False`` by default. If ``True``, will
# overwrite output table even if it exists already.
# '''
#
# force = BoolParameter(default=False, significant=False)
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TempTableTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def target_tablename(self):
# '''
# Can be overriden to expose a different table name
# :return:
# '''
# return unqualified_task_id(self.task_id)[:MAX_PG_IDENTIFIER_LENGTH]
#
# def run(self):
# '''
# Must be overriden by subclass. Should create and populate a table
# named from ``self.output().table``
#
# If this completes without exceptions, the :func:`~.util.current_session
# will be committed; if there is an exception, it will be rolled back.
# '''
# raise Exception('Must override `run`')
#
# def output(self):
# '''
# By default, returns a :class:`~.targets.TableTarget` whose associated
# table lives in a special-purpose schema in Postgres derived using
# :func:`~.util.classpath`.
# '''
# return PostgresTarget(classpath(self), self.target_tablename())
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: tasks/util.py
# def copyfile(src, dst):
# # From http://luigi.readthedocs.io/en/stable/_modules/luigi/local_target.html#LocalTarget.makedirs
# normpath = os.path.normpath(dst)
# parentfolder = os.path.dirname(normpath)
# if parentfolder:
# try:
# os.makedirs(parentfolder, exist_ok=True)
# except OSError as e:
# LOGGER.error('Error making dirs for copyfile: %s', e)
# pass
#
# shutil.copyfile(src, dst)
#
# Path: tasks/uk/census/metadata.py
# def sanitize_identifier(colid):
# return DISALLOWED_CHARACTERS_RE.sub('_', '_'.join(colid.split(';')[0].split(':')[-2:]))
. Output only the next line. | CSVNormalizerStream(csvfile, lambda row: ['0' if f == '-' else f.replace(',', '') for f in row]) |
Given the following code snippet before the placeholder: <|code_start|># http://www.scotlandscensus.gov.uk/ods-web/data-warehouse.html#bulkdatatab
class DownloadScotlandLocal(RepoFileUnzipTask):
URL = 'http://www.scotlandscensus.gov.uk/ods-web/download/getDownloadFile.html?downloadFileIds=Output%20Area%20blk'
def get_url(self):
return self.URL
<|code_end|>
, predict the next line using imports from the current file:
from collections import OrderedDict
from luigi import Parameter
from lib.csv_stream import CSVNormalizerStream
from lib.copy import copy_from_csv
from tasks.base_tasks import RepoFileUnzipTask, TempTableTask
from tasks.meta import current_session
from tasks.util import copyfile
from .metadata import sanitize_identifier
import csv
import os
and context including class names, function names, and sometimes code from other files:
# Path: lib/csv_stream.py
# class CSVNormalizerStream(io.IOBase):
# '''
# Filter for applying a function to each line of a CSV file, compatible with iostreams
# '''
# def __init__(self, infile, func):
# '''
# :param infile: A stream that reads a CSV file. e.g: a file
# :param func: A function to apply to each CSV row. It takes an array of fields and returns an array of fields.
# '''
# self._csvreader = csv.reader(infile)
# self._buffer = ''
# self._func = func
#
# def read(self, nbytes):
# try:
# while len(self._buffer) < nbytes:
# self.getline()
# out, self._buffer = self._buffer[:nbytes], self._buffer[nbytes:]
# return out
# except StopIteration:
# out = self._buffer
# self._buffer = ''
# return out
#
# def getline(self):
# line = next(self._csvreader)
# clean_line = self._func(line)
# self._buffer += (','.join(clean_line) + ' \n')
#
# Path: lib/copy.py
# def copy_from_csv(session, table_name, columns, csv_stream):
# '''
# Creates a table, loading the data from a .csv file.
#
# :param session: A SQL Alchemy session
# :param table_name: Output table name
# :param columns: Dictionary of columns, keys are named, values are types.
# :param csv_stream: A stream that reads a CSV file. e.g: a file or a
# :class:`CSVNormalizerStream <lib.csv_stream.CSVNormalizerStream>`
# '''
# with session.connection().connection.cursor() as cursor:
# cursor.execute('CREATE TABLE {output} ({cols})'.format(
# output=table_name,
# cols=', '.join(['{name} {type}'.format(name=k, type=v) for k, v in columns.items()])
# ))
#
# cursor.copy_expert(
# 'COPY {table} ({cols}) FROM stdin WITH (FORMAT CSV, HEADER)'.format(
# cols=', '.join(columns.keys()),
# table=table_name),
# csv_stream)
#
# Path: tasks/base_tasks.py
# class RepoFileUnzipTask(RepoFileUncompressTask):
# '''
# Download a zip file to location {output}.zip and unzip it to the folder
# {output}.
# '''
# compressed_extension = 'zip'
#
# def uncompress(self):
# uncompress_file(self.output().path)
#
# class TempTableTask(Task):
# '''
# A Task that generates a table that will not be referred to in metadata.
#
# This is useful for intermediate processing steps that can benefit from the
# session guarantees of the ETL, as well as automatic table naming.
#
# :param force: Optional Boolean, ``False`` by default. If ``True``, will
# overwrite output table even if it exists already.
# '''
#
# force = BoolParameter(default=False, significant=False)
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TempTableTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def target_tablename(self):
# '''
# Can be overriden to expose a different table name
# :return:
# '''
# return unqualified_task_id(self.task_id)[:MAX_PG_IDENTIFIER_LENGTH]
#
# def run(self):
# '''
# Must be overriden by subclass. Should create and populate a table
# named from ``self.output().table``
#
# If this completes without exceptions, the :func:`~.util.current_session
# will be committed; if there is an exception, it will be rolled back.
# '''
# raise Exception('Must override `run`')
#
# def output(self):
# '''
# By default, returns a :class:`~.targets.TableTarget` whose associated
# table lives in a special-purpose schema in Postgres derived using
# :func:`~.util.classpath`.
# '''
# return PostgresTarget(classpath(self), self.target_tablename())
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: tasks/util.py
# def copyfile(src, dst):
# # From http://luigi.readthedocs.io/en/stable/_modules/luigi/local_target.html#LocalTarget.makedirs
# normpath = os.path.normpath(dst)
# parentfolder = os.path.dirname(normpath)
# if parentfolder:
# try:
# os.makedirs(parentfolder, exist_ok=True)
# except OSError as e:
# LOGGER.error('Error making dirs for copyfile: %s', e)
# pass
#
# shutil.copyfile(src, dst)
#
# Path: tasks/uk/census/metadata.py
# def sanitize_identifier(colid):
# return DISALLOWED_CHARACTERS_RE.sub('_', '_'.join(colid.split(';')[0].split(':')[-2:]))
. Output only the next line. | class ImportScotland(TempTableTask): |
Given snippet: <|code_start|># http://www.scotlandscensus.gov.uk/ods-web/data-warehouse.html#bulkdatatab
class DownloadScotlandLocal(RepoFileUnzipTask):
URL = 'http://www.scotlandscensus.gov.uk/ods-web/download/getDownloadFile.html?downloadFileIds=Output%20Area%20blk'
def get_url(self):
return self.URL
class ImportScotland(TempTableTask):
table = Parameter()
def requires(self):
return DownloadScotlandLocal()
@staticmethod
def id_to_column(colid):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import OrderedDict
from luigi import Parameter
from lib.csv_stream import CSVNormalizerStream
from lib.copy import copy_from_csv
from tasks.base_tasks import RepoFileUnzipTask, TempTableTask
from tasks.meta import current_session
from tasks.util import copyfile
from .metadata import sanitize_identifier
import csv
import os
and context:
# Path: lib/csv_stream.py
# class CSVNormalizerStream(io.IOBase):
# '''
# Filter for applying a function to each line of a CSV file, compatible with iostreams
# '''
# def __init__(self, infile, func):
# '''
# :param infile: A stream that reads a CSV file. e.g: a file
# :param func: A function to apply to each CSV row. It takes an array of fields and returns an array of fields.
# '''
# self._csvreader = csv.reader(infile)
# self._buffer = ''
# self._func = func
#
# def read(self, nbytes):
# try:
# while len(self._buffer) < nbytes:
# self.getline()
# out, self._buffer = self._buffer[:nbytes], self._buffer[nbytes:]
# return out
# except StopIteration:
# out = self._buffer
# self._buffer = ''
# return out
#
# def getline(self):
# line = next(self._csvreader)
# clean_line = self._func(line)
# self._buffer += (','.join(clean_line) + ' \n')
#
# Path: lib/copy.py
# def copy_from_csv(session, table_name, columns, csv_stream):
# '''
# Creates a table, loading the data from a .csv file.
#
# :param session: A SQL Alchemy session
# :param table_name: Output table name
# :param columns: Dictionary of columns, keys are named, values are types.
# :param csv_stream: A stream that reads a CSV file. e.g: a file or a
# :class:`CSVNormalizerStream <lib.csv_stream.CSVNormalizerStream>`
# '''
# with session.connection().connection.cursor() as cursor:
# cursor.execute('CREATE TABLE {output} ({cols})'.format(
# output=table_name,
# cols=', '.join(['{name} {type}'.format(name=k, type=v) for k, v in columns.items()])
# ))
#
# cursor.copy_expert(
# 'COPY {table} ({cols}) FROM stdin WITH (FORMAT CSV, HEADER)'.format(
# cols=', '.join(columns.keys()),
# table=table_name),
# csv_stream)
#
# Path: tasks/base_tasks.py
# class RepoFileUnzipTask(RepoFileUncompressTask):
# '''
# Download a zip file to location {output}.zip and unzip it to the folder
# {output}.
# '''
# compressed_extension = 'zip'
#
# def uncompress(self):
# uncompress_file(self.output().path)
#
# class TempTableTask(Task):
# '''
# A Task that generates a table that will not be referred to in metadata.
#
# This is useful for intermediate processing steps that can benefit from the
# session guarantees of the ETL, as well as automatic table naming.
#
# :param force: Optional Boolean, ``False`` by default. If ``True``, will
# overwrite output table even if it exists already.
# '''
#
# force = BoolParameter(default=False, significant=False)
#
# def on_failure(self, ex):
# session_rollback(self, ex)
# super(TempTableTask, self).on_failure(ex)
#
# def on_success(self):
# session_commit(self)
#
# def target_tablename(self):
# '''
# Can be overriden to expose a different table name
# :return:
# '''
# return unqualified_task_id(self.task_id)[:MAX_PG_IDENTIFIER_LENGTH]
#
# def run(self):
# '''
# Must be overriden by subclass. Should create and populate a table
# named from ``self.output().table``
#
# If this completes without exceptions, the :func:`~.util.current_session
# will be committed; if there is an exception, it will be rolled back.
# '''
# raise Exception('Must override `run`')
#
# def output(self):
# '''
# By default, returns a :class:`~.targets.TableTarget` whose associated
# table lives in a special-purpose schema in Postgres derived using
# :func:`~.util.classpath`.
# '''
# return PostgresTarget(classpath(self), self.target_tablename())
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
#
# Path: tasks/util.py
# def copyfile(src, dst):
# # From http://luigi.readthedocs.io/en/stable/_modules/luigi/local_target.html#LocalTarget.makedirs
# normpath = os.path.normpath(dst)
# parentfolder = os.path.dirname(normpath)
# if parentfolder:
# try:
# os.makedirs(parentfolder, exist_ok=True)
# except OSError as e:
# LOGGER.error('Error making dirs for copyfile: %s', e)
# pass
#
# shutil.copyfile(src, dst)
#
# Path: tasks/uk/census/metadata.py
# def sanitize_identifier(colid):
# return DISALLOWED_CHARACTERS_RE.sub('_', '_'.join(colid.split(';')[0].split(':')[-2:]))
which might include code, classes, or functions. Output only the next line. | return sanitize_identifier(colid) |
Given snippet: <|code_start|>
QUARTERS = {1: 'first',
2: 'second',
3: 'third',
4: 'fourth'}
def get_timespan(timespan_id):
ts_id, ts_alias, ts_name, ts_description, ts_timespan = parse_timespan(timespan_id)
session = current_session()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import calendar
from dateutil.parser import parse
from tasks.meta import OBSTimespan, current_session
and context:
# Path: tasks/meta.py
# class OBSTimespan(UniqueMixin, Base):
# '''
# Describes a timespan table in our database.
# '''
# __tablename__ = 'obs_timespan'
#
# id = Column(Text, primary_key=True) # fully-qualified id
# alias = Column(Text) # alias for backwards compatibility
# name = Column(Text) # human-readable name
# description = Column(Text) # human-readable description
# timespan = Column(DATERANGE)
# weight = Column(Integer, default=0)
#
# @classmethod
# def unique_hash(cls, id):
# return id
#
# @classmethod
# def unique_filter(cls, query, id):
# return query.filter(OBSTimespan.id == id)
#
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
which might include code, classes, or functions. Output only the next line. | return OBSTimespan.as_unique(session, ts_id, id=ts_id, alias=ts_alias, name=ts_name, |
Given the code snippet: <|code_start|>
QUARTERS = {1: 'first',
2: 'second',
3: 'third',
4: 'fourth'}
def get_timespan(timespan_id):
ts_id, ts_alias, ts_name, ts_description, ts_timespan = parse_timespan(timespan_id)
<|code_end|>
, generate the next line using the imports in this file:
import re
import calendar
from dateutil.parser import parse
from tasks.meta import OBSTimespan, current_session
and context (functions, classes, or occasionally code) from other files:
# Path: tasks/meta.py
# class OBSTimespan(UniqueMixin, Base):
# '''
# Describes a timespan table in our database.
# '''
# __tablename__ = 'obs_timespan'
#
# id = Column(Text, primary_key=True) # fully-qualified id
# alias = Column(Text) # alias for backwards compatibility
# name = Column(Text) # human-readable name
# description = Column(Text) # human-readable description
# timespan = Column(DATERANGE)
# weight = Column(Integer, default=0)
#
# @classmethod
# def unique_hash(cls, id):
# return id
#
# @classmethod
# def unique_filter(cls, query, id):
# return query.filter(OBSTimespan.id == id)
#
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
. Output only the next line. | session = current_session() |
Given snippet: <|code_start|>
@with_setup(setup, teardown)
def test_empty_obs_meta_to_local():
'''
OBSMetaToLocal should work even if tables are empty. Should result in
creation of blank obs_meta, obs_meta_numer, obs_meta_denom, obs_meta_geom,
obs_meta_timespan tables.
'''
imp.reload(tasks.carto)
task = tasks.carto.OBSMetaToLocal()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import OrderedDict
from luigi import Parameter
from nose.tools import (assert_equals, with_setup, assert_raises, assert_in,
assert_is_none, assert_true, assert_false)
from tests.util import runtask, setup, teardown, FakeTask
from tasks.meta import current_session
import tasks.carto
import imp
and context:
# Path: tests/util.py
# def runtask(task, superclasses=None):
# '''
# Run deps of tasks then the task, faking session management
#
# superclasses is a list of classes that we will be willing to run as
# pre-reqs, other pre-reqs will be ignored. Can be useful when testing to
# only run metadata classes, for example.
# '''
# from lib.logger import get_logger
# LOGGER = get_logger(__name__)
# if task.complete():
# return
# for dep in task.deps():
# if superclasses:
# for klass in superclasses:
# if isinstance(dep, klass):
# runtask(dep, superclasses=superclasses)
# assert dep.complete() is True, 'dependency {} not complete for class {}'.format(dep, klass)
# else:
# runtask(dep)
# assert dep.complete() is True, 'dependency {} not complete'.format(dep)
# try:
# before = time()
# for klass, cb_dict in task._event_callbacks.items():
# if isinstance(task, klass):
# start_callbacks = cb_dict.get('event.core.start', [])
# for scb in start_callbacks:
# scb(task)
# task.run()
# task.on_success()
# after = time()
# LOGGER.warn('runtask timing %s: %s', task, round(after - before, 2))
# except Exception as exc:
# task.on_failure(exc)
# raise
#
# def setup():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.execute('CREATE SCHEMA observatory')
# session.commit()
# Base.metadata.create_all()
# session.close()
#
# def teardown():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# Base.metadata.drop_all()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.commit()
# session.close()
#
# class FakeTask(object):
#
# task_id = 'fake'
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
which might include code, classes, or functions. Output only the next line. | runtask(task) |
Given snippet: <|code_start|>
@with_setup(setup, teardown)
def test_empty_obs_meta_to_local():
'''
OBSMetaToLocal should work even if tables are empty. Should result in
creation of blank obs_meta, obs_meta_numer, obs_meta_denom, obs_meta_geom,
obs_meta_timespan tables.
'''
imp.reload(tasks.carto)
task = tasks.carto.OBSMetaToLocal()
runtask(task)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import OrderedDict
from luigi import Parameter
from nose.tools import (assert_equals, with_setup, assert_raises, assert_in,
assert_is_none, assert_true, assert_false)
from tests.util import runtask, setup, teardown, FakeTask
from tasks.meta import current_session
import tasks.carto
import imp
and context:
# Path: tests/util.py
# def runtask(task, superclasses=None):
# '''
# Run deps of tasks then the task, faking session management
#
# superclasses is a list of classes that we will be willing to run as
# pre-reqs, other pre-reqs will be ignored. Can be useful when testing to
# only run metadata classes, for example.
# '''
# from lib.logger import get_logger
# LOGGER = get_logger(__name__)
# if task.complete():
# return
# for dep in task.deps():
# if superclasses:
# for klass in superclasses:
# if isinstance(dep, klass):
# runtask(dep, superclasses=superclasses)
# assert dep.complete() is True, 'dependency {} not complete for class {}'.format(dep, klass)
# else:
# runtask(dep)
# assert dep.complete() is True, 'dependency {} not complete'.format(dep)
# try:
# before = time()
# for klass, cb_dict in task._event_callbacks.items():
# if isinstance(task, klass):
# start_callbacks = cb_dict.get('event.core.start', [])
# for scb in start_callbacks:
# scb(task)
# task.run()
# task.on_success()
# after = time()
# LOGGER.warn('runtask timing %s: %s', task, round(after - before, 2))
# except Exception as exc:
# task.on_failure(exc)
# raise
#
# def setup():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.execute('CREATE SCHEMA observatory')
# session.commit()
# Base.metadata.create_all()
# session.close()
#
# def teardown():
# from tasks.meta import current_session, Base
# if Base.metadata.bind.url.database != 'test':
# raise Exception('Can only run tests on database "test"')
# session = current_session()
# session.rollback()
# Base.metadata.drop_all()
# session.execute('DROP SCHEMA IF EXISTS observatory CASCADE')
# session.commit()
# session.close()
#
# class FakeTask(object):
#
# task_id = 'fake'
#
# Path: tasks/meta.py
# def current_session():
# '''
# Returns the session relevant to the currently operating :class:`Task`, if
# any. Outside the context of a :class:`Task`, this can still be used for
# manual session management.
# '''
# return _current_session.get()
which might include code, classes, or functions. Output only the next line. | session = current_session() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.