Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
class ProfileForm(forms.ModelForm):
first_name = forms.CharField(label=_('First name'), required=False)
last_name = forms.CharField(label=_('Last name'), required=False)
email = forms.EmailField(label=_('E-mail'))
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
<|code_end|>
, determine the next line of code. You have imports:
from django import forms
from django.utils.translation import ugettext_lazy as _
from userprofiles.settings import up_settings
from userprofiles.utils import UserProfile
and context (class names, function names, or code) available:
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | if not up_settings.PROFILE_ALLOW_EMAIL_CHANGE: |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
class ChangeEmailForm(forms.Form):
new_email = forms.EmailField(label=_('New e-mail address'), required=True)
def clean_new_email(self):
new_email = self.cleaned_data['new_email']
user_emails = User.objects.filter(email__iexact=new_email).count()
<|code_end|>
, predict the immediate next line with the help of imports:
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from userprofiles.contrib.emailverification.models import EmailVerification
and context (classes, functions, sometimes code) from other files:
# Path: userprofiles/contrib/emailverification/models.py
# class EmailVerification(models.Model):
# user = models.ForeignKey(User, verbose_name=_('User'), blank=False)
# old_email = models.EmailField(_('Old e-mail address'))
# new_email = models.EmailField(_('New e-mail address'))
#
# token = models.CharField(_('Token'), max_length=40, default=generate_token)
# code = models.CharField(_('Code'), max_length=40, default=generate_token)
#
# is_approved = models.BooleanField(_('Approved'), default=False)
# is_expired = models.BooleanField(_('Expired'), default=False)
#
# expiration_date = models.DateTimeField(_('Expiration date'),
# default=generate_confirm_expire_date)
#
# def __unicode__(self):
# return '%s - %s/%s' % (self.user, self.old_email, self.new_email)
#
# def save(self, *args, **kwargs):
# if self.is_approved:
# EmailVerification.objects.filter(
# user=self.user, is_approved=False).update(is_expired=True)
#
# self.is_expired = True
#
# if self.user.email == self.old_email:
# self.user.email = self.new_email
# self.user.save()
# return super(EmailVerification, self).save(*args, **kwargs)
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('E-mail verification')
# verbose_name_plural = _('E-mail verifications')
. Output only the next line. | verification_emails = EmailVerification.objects.filter( |
Continue the code snippet: <|code_start|>
class ModelsTests(TestCase):
def setUp(self):
self.data = {
'username': 'newuser',
'email': 'newuser@example.com',
'password': 'newuserpass',
}
def test_activate_user(self):
<|code_end|>
. Use current file imports:
from datetime import timedelta
from django.contrib.auth.models import User
from django.test import TestCase
from userprofiles.contrib.accountverification.models import AccountVerification
from userprofiles.settings import up_settings
and context (classes, functions, or code) from other files:
# Path: userprofiles/contrib/accountverification/models.py
# class AccountVerification(models.Model):
# ACTIVATED = 'ALREADY_ACTIVATED'
#
# user = models.ForeignKey(User, unique=True, verbose_name=_('User'))
# activation_key = models.CharField(_('Activation key'), max_length=40)
#
# objects = AccountVerificationManager()
#
# def __unicode__(self):
# return u'Account verification: %s' % self.user
#
# def activation_key_expired(self):
# expiration_date = timedelta(days=up_settings.ACCOUNT_VERIFICATION_DAYS)
# return (self.activation_key == self.ACTIVATED
# or (self.user.date_joined + expiration_date <= timezone.now()))
# activation_key_expired.boolean = True
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('Account verification')
# verbose_name_plural = _('Account verifications')
#
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | user = AccountVerification.objects.create_inactive_user( |
Here is a snippet: <|code_start|>
class ModelsTests(TestCase):
def setUp(self):
self.data = {
'username': 'newuser',
'email': 'newuser@example.com',
'password': 'newuserpass',
}
def test_activate_user(self):
user = AccountVerification.objects.create_inactive_user(
self.data['username'], self.data['password'], self.data['email'])
user.date_joined = user.date_joined - timedelta(
<|code_end|>
. Write the next line using the current file imports:
from datetime import timedelta
from django.contrib.auth.models import User
from django.test import TestCase
from userprofiles.contrib.accountverification.models import AccountVerification
from userprofiles.settings import up_settings
and context from other files:
# Path: userprofiles/contrib/accountverification/models.py
# class AccountVerification(models.Model):
# ACTIVATED = 'ALREADY_ACTIVATED'
#
# user = models.ForeignKey(User, unique=True, verbose_name=_('User'))
# activation_key = models.CharField(_('Activation key'), max_length=40)
#
# objects = AccountVerificationManager()
#
# def __unicode__(self):
# return u'Account verification: %s' % self.user
#
# def activation_key_expired(self):
# expiration_date = timedelta(days=up_settings.ACCOUNT_VERIFICATION_DAYS)
# return (self.activation_key == self.ACTIVATED
# or (self.user.date_joined + expiration_date <= timezone.now()))
# activation_key_expired.boolean = True
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('Account verification')
# verbose_name_plural = _('Account verifications')
#
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
, which may include functions, classes, or code. Output only the next line. | days=up_settings.ACCOUNT_VERIFICATION_DAYS, seconds=1) |
Given the code snippet: <|code_start|> verification = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not verification.activation_key_expired():
user = verification.user
user.is_active = True
user.save()
verification.activation_key = self.model.ACTIVATED
verification.save()
return user
return False
def create_inactive_user(self, username, password, email):
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
account_verification = self.create_verification(new_user)
current_site = Site.objects.get_current()
subject = ''.join(render_to_string(
'userprofiles/mails/activation_email_subject.html',
{'site': current_site}).splitlines())
message = render_to_string('userprofiles/mails/activation_email.html', {
'activation_key': account_verification.activation_key,
<|code_end|>
, generate the next line using the imports in this file:
from datetime import timedelta
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from userprofiles.settings import up_settings
import hashlib
import random
import re
import uuid
and context (functions, classes, or occasionally code) from other files:
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | 'expiration_days': up_settings.ACCOUNT_VERIFICATION_DAYS, |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
class AccountVerificationAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'activation_key_expired')
search_fields = ('user__username', 'user__first_name', 'user__last_name')
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib import admin
from userprofiles.contrib.accountverification.models import AccountVerification
and context including class names, function names, and sometimes code from other files:
# Path: userprofiles/contrib/accountverification/models.py
# class AccountVerification(models.Model):
# ACTIVATED = 'ALREADY_ACTIVATED'
#
# user = models.ForeignKey(User, unique=True, verbose_name=_('User'))
# activation_key = models.CharField(_('Activation key'), max_length=40)
#
# objects = AccountVerificationManager()
#
# def __unicode__(self):
# return u'Account verification: %s' % self.user
#
# def activation_key_expired(self):
# expiration_date = timedelta(days=up_settings.ACCOUNT_VERIFICATION_DAYS)
# return (self.activation_key == self.ACTIVATED
# or (self.user.date_joined + expiration_date <= timezone.now()))
# activation_key_expired.boolean = True
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('Account verification')
# verbose_name_plural = _('Account verifications')
. Output only the next line. | admin.site.register(AccountVerification, AccountVerificationAdmin) |
Predict the next line for this snippet: <|code_start|>
@override_settings(USE_ACCOUNT_VERIFICATION=True)
class ViewTests(TestCase):
def setUp(self):
self.data = {
'username': 'newuser',
'email': 'newuser@example.com',
'password': 'newuserpass',
}
def test_registration_activate(self):
<|code_end|>
with the help of current file imports:
import re
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from userprofiles.contrib.accountverification.models import AccountVerification
and context from other files:
# Path: userprofiles/contrib/accountverification/models.py
# class AccountVerification(models.Model):
# ACTIVATED = 'ALREADY_ACTIVATED'
#
# user = models.ForeignKey(User, unique=True, verbose_name=_('User'))
# activation_key = models.CharField(_('Activation key'), max_length=40)
#
# objects = AccountVerificationManager()
#
# def __unicode__(self):
# return u'Account verification: %s' % self.user
#
# def activation_key_expired(self):
# expiration_date = timedelta(days=up_settings.ACCOUNT_VERIFICATION_DAYS)
# return (self.activation_key == self.ACTIVATED
# or (self.user.date_joined + expiration_date <= timezone.now()))
# activation_key_expired.boolean = True
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('Account verification')
# verbose_name_plural = _('Account verifications')
, which may contain function names, class names, or code. Output only the next line. | AccountVerification.objects.create_inactive_user( |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
def generate_token():
return str(uuid.uuid4())
def generate_confirm_expire_date():
<|code_end|>
. Use current file imports:
from datetime import timedelta
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from userprofiles.settings import up_settings
import uuid
and context (classes, functions, or code) from other files:
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | return timezone.now() + timedelta(days=up_settings.EMAIL_VERIFICATION_DAYS) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
class EmailVerificationAdmin(admin.ModelAdmin):
list_display = ('user', 'old_email', 'new_email', 'expiration_date',
'is_approved', 'is_expired')
list_filter = ('is_approved', 'is_expired')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.contrib import admin
from userprofiles.contrib.emailverification.models import EmailVerification
and context:
# Path: userprofiles/contrib/emailverification/models.py
# class EmailVerification(models.Model):
# user = models.ForeignKey(User, verbose_name=_('User'), blank=False)
# old_email = models.EmailField(_('Old e-mail address'))
# new_email = models.EmailField(_('New e-mail address'))
#
# token = models.CharField(_('Token'), max_length=40, default=generate_token)
# code = models.CharField(_('Code'), max_length=40, default=generate_token)
#
# is_approved = models.BooleanField(_('Approved'), default=False)
# is_expired = models.BooleanField(_('Expired'), default=False)
#
# expiration_date = models.DateTimeField(_('Expiration date'),
# default=generate_confirm_expire_date)
#
# def __unicode__(self):
# return '%s - %s/%s' % (self.user, self.old_email, self.new_email)
#
# def save(self, *args, **kwargs):
# if self.is_approved:
# EmailVerification.objects.filter(
# user=self.user, is_approved=False).update(is_expired=True)
#
# self.is_expired = True
#
# if self.user.email == self.old_email:
# self.user.email = self.new_email
# self.user.save()
# return super(EmailVerification, self).save(*args, **kwargs)
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('E-mail verification')
# verbose_name_plural = _('E-mail verifications')
which might include code, classes, or functions. Output only the next line. | admin.site.register(EmailVerification, EmailVerificationAdmin) |
Based on the snippet: <|code_start|>
class ProfileRegistrationForm(RegistrationForm):
short_info = forms.CharField(widget=forms.Textarea)
def save_profile(self, new_user, *args, **kwargs):
<|code_end|>
, predict the immediate next line with the help of imports:
from django import forms
from .models import Profile
from userprofiles.forms import RegistrationForm
and context (classes, functions, sometimes code) from other files:
# Path: test_project/test_accounts/models.py
# class Profile(models.Model):
# user = models.OneToOneField(User)
#
# short_info = models.TextField(blank=True)
#
# Path: userprofiles/forms.py
# class RegistrationForm(forms.Form):
# username = forms.RegexField(label=_("Username"), max_length=30,
# regex=r'^[\w.-]+$', error_messages={'invalid': _(
# 'This value may contain only letters, numbers and ./-/_ characters.')})
#
# email = forms.EmailField(label=_('E-mail'))
# email_repeat = forms.EmailField(label=_('E-mail (repeat)'), required=True)
#
# password = forms.CharField(label=_('Password'),
# widget=forms.PasswordInput(render_value=False))
# password_repeat = forms.CharField(label=_('Password (repeat)'),
# widget=forms.PasswordInput(render_value=False))
#
# first_name = forms.CharField(label=_('First name'), required=False)
# last_name = forms.CharField(label=_('Last name'), required=False)
#
# def __init__(self, *args, **kwargs):
# super(RegistrationForm, self).__init__(*args, **kwargs)
#
# if not up_settings.DOUBLE_CHECK_EMAIL:
# del self.fields['email_repeat']
#
# if not up_settings.DOUBLE_CHECK_PASSWORD:
# del self.fields['password_repeat']
#
# if not up_settings.REGISTRATION_FULLNAME:
# del self.fields['first_name']
# del self.fields['last_name']
#
# if up_settings.EMAIL_ONLY:
# self.fields['username'].widget = forms.widgets.HiddenInput()
# self.fields['username'].required = False
#
# def _generate_username(self):
# """ Generate a unique username """
# while True:
# # Generate a UUID username, removing dashes and the last 2 chars
# # to make it fit into the 30 char User.username field. Gracefully
# # handle any unlikely, but possible duplicate usernames.
# username = str(uuid.uuid4())
# username = username.replace('-', '')
# username = username[:-2]
#
# try:
# User.objects.get(username=username)
# except User.DoesNotExist:
# return username
#
# def clean_username(self):
# if up_settings.EMAIL_ONLY:
# username = self._generate_username()
# else:
# username = self.cleaned_data['username']
# if User.objects.filter(username__iexact=username):
# raise forms.ValidationError(
# _(u'A user with that username already exists.'))
#
# return username
#
# def clean_email(self):
# if not up_settings.CHECK_UNIQUE_EMAIL:
# return self.cleaned_data['email']
#
# new_email = self.cleaned_data['email']
#
# emails = User.objects.filter(email__iexact=new_email).count()
#
# if up_settings.USE_EMAIL_VERIFICATION:
# from userprofiles.contrib.emailverification.models import EmailVerification
#
# emails += EmailVerification.objects.filter(
# new_email__iexact=new_email, is_expired=False).count()
#
# if emails > 0:
# raise forms.ValidationError(
# _(u'This email address is already in use. Please supply a different email address.'))
#
# return new_email
#
# def clean(self):
# if up_settings.DOUBLE_CHECK_EMAIL:
# if 'email' in self.cleaned_data and 'email_repeat' in self.cleaned_data:
# if self.cleaned_data['email'] != self.cleaned_data['email_repeat']:
# raise forms.ValidationError(_('The two email addresses do not match.'))
#
# if up_settings.DOUBLE_CHECK_PASSWORD:
# if 'password' in self.cleaned_data and 'password_repeat' in self.cleaned_data:
# if self.cleaned_data['password'] != self.cleaned_data['password_repeat']:
# raise forms.ValidationError(_('You must type the same password each time.'))
#
# return self.cleaned_data
#
# def save(self, *args, **kwargs):
# if up_settings.USE_ACCOUNT_VERIFICATION:
# from userprofiles.contrib.accountverification.models import AccountVerification
#
# new_user = AccountVerification.objects.create_inactive_user(
# username=self.cleaned_data['username'],
# password=self.cleaned_data['password'],
# email=self.cleaned_data['email'],
# )
# else:
# new_user = User.objects.create_user(
# username=self.cleaned_data['username'],
# password=self.cleaned_data['password'],
# email=self.cleaned_data['email']
# )
#
# if up_settings.REGISTRATION_FULLNAME:
# new_user.first_name = self.cleaned_data['first_name']
# new_user.last_name = self.cleaned_data['last_name']
#
# new_user.save()
#
# if hasattr(self, 'save_profile'):
# self.save_profile(new_user, *args, **kwargs)
#
# return new_user
. Output only the next line. | Profile.objects.create( |
Given the code snippet: <|code_start|>
class EmailChangeView(LoginRequiredMixin, FormView):
template_name = 'userprofiles/email_change.html'
form_class = ChangeEmailForm
def form_valid(self, form):
form.save(self.request.user)
return redirect('userprofiles_email_change_requested')
email_change = EmailChangeView.as_view()
class EmailChangeRequestedView(LoginRequiredMixin, TemplateView):
template_name = 'userprofiles/email_change_requested.html'
def get_context_data(self, **kwargs):
return {
'expiration_days': up_settings.EMAIL_VERIFICATION_DAYS
}
email_change_requested = EmailChangeRequestedView.as_view()
class EmailChangeApproveView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self, token, code):
try:
<|code_end|>
, generate the next line using the imports in this file:
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, TemplateView, RedirectView
from userprofiles.contrib.emailverification.forms import ChangeEmailForm
from userprofiles.contrib.emailverification.models import EmailVerification
from userprofiles.mixins import LoginRequiredMixin
from userprofiles.settings import up_settings
and context (functions, classes, or occasionally code) from other files:
# Path: userprofiles/contrib/emailverification/forms.py
# class ChangeEmailForm(forms.Form):
# new_email = forms.EmailField(label=_('New e-mail address'), required=True)
#
# def clean_new_email(self):
# new_email = self.cleaned_data['new_email']
#
# user_emails = User.objects.filter(email__iexact=new_email).count()
# verification_emails = EmailVerification.objects.filter(
# new_email__iexact=new_email, is_expired=False).count()
# if user_emails + verification_emails > 0:
# raise forms.ValidationError(_(
# 'This email address is already in use. Please supply a different email address.'))
#
# return new_email
#
# def save(self, user):
# verification = EmailVerification.objects.create(user=user,
# old_email=user.email, new_email=self.cleaned_data['new_email'])
#
# context = {
# 'user': user,
# 'verification': verification,
# 'site': Site.objects.get_current(),
# }
#
# subject = ''.join(render_to_string(
# 'userprofiles/mails/emailverification_subject.html', context).splitlines())
# body = render_to_string('userprofiles/mails/emailverification.html', context)
#
# send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
# [self.cleaned_data['new_email']])
#
# return verification
#
# Path: userprofiles/contrib/emailverification/models.py
# class EmailVerification(models.Model):
# user = models.ForeignKey(User, verbose_name=_('User'), blank=False)
# old_email = models.EmailField(_('Old e-mail address'))
# new_email = models.EmailField(_('New e-mail address'))
#
# token = models.CharField(_('Token'), max_length=40, default=generate_token)
# code = models.CharField(_('Code'), max_length=40, default=generate_token)
#
# is_approved = models.BooleanField(_('Approved'), default=False)
# is_expired = models.BooleanField(_('Expired'), default=False)
#
# expiration_date = models.DateTimeField(_('Expiration date'),
# default=generate_confirm_expire_date)
#
# def __unicode__(self):
# return '%s - %s/%s' % (self.user, self.old_email, self.new_email)
#
# def save(self, *args, **kwargs):
# if self.is_approved:
# EmailVerification.objects.filter(
# user=self.user, is_approved=False).update(is_expired=True)
#
# self.is_expired = True
#
# if self.user.email == self.old_email:
# self.user.email = self.new_email
# self.user.save()
# return super(EmailVerification, self).save(*args, **kwargs)
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('E-mail verification')
# verbose_name_plural = _('E-mail verifications')
#
# Path: userprofiles/mixins.py
# class LoginRequiredMixin(object):
# """Ensures that the user is authenticated in order to access the view.
# http://djangosnippets.org/snippets/2442/"""
#
# @method_decorator(login_required)
# def dispatch(self, *args, **kwargs):
# return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
#
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | verification = EmailVerification.objects.get(token=token, code=code, |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
class EmailChangeView(LoginRequiredMixin, FormView):
template_name = 'userprofiles/email_change.html'
form_class = ChangeEmailForm
def form_valid(self, form):
form.save(self.request.user)
return redirect('userprofiles_email_change_requested')
email_change = EmailChangeView.as_view()
class EmailChangeRequestedView(LoginRequiredMixin, TemplateView):
template_name = 'userprofiles/email_change_requested.html'
def get_context_data(self, **kwargs):
return {
<|code_end|>
, predict the immediate next line with the help of imports:
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, TemplateView, RedirectView
from userprofiles.contrib.emailverification.forms import ChangeEmailForm
from userprofiles.contrib.emailverification.models import EmailVerification
from userprofiles.mixins import LoginRequiredMixin
from userprofiles.settings import up_settings
and context (classes, functions, sometimes code) from other files:
# Path: userprofiles/contrib/emailverification/forms.py
# class ChangeEmailForm(forms.Form):
# new_email = forms.EmailField(label=_('New e-mail address'), required=True)
#
# def clean_new_email(self):
# new_email = self.cleaned_data['new_email']
#
# user_emails = User.objects.filter(email__iexact=new_email).count()
# verification_emails = EmailVerification.objects.filter(
# new_email__iexact=new_email, is_expired=False).count()
# if user_emails + verification_emails > 0:
# raise forms.ValidationError(_(
# 'This email address is already in use. Please supply a different email address.'))
#
# return new_email
#
# def save(self, user):
# verification = EmailVerification.objects.create(user=user,
# old_email=user.email, new_email=self.cleaned_data['new_email'])
#
# context = {
# 'user': user,
# 'verification': verification,
# 'site': Site.objects.get_current(),
# }
#
# subject = ''.join(render_to_string(
# 'userprofiles/mails/emailverification_subject.html', context).splitlines())
# body = render_to_string('userprofiles/mails/emailverification.html', context)
#
# send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
# [self.cleaned_data['new_email']])
#
# return verification
#
# Path: userprofiles/contrib/emailverification/models.py
# class EmailVerification(models.Model):
# user = models.ForeignKey(User, verbose_name=_('User'), blank=False)
# old_email = models.EmailField(_('Old e-mail address'))
# new_email = models.EmailField(_('New e-mail address'))
#
# token = models.CharField(_('Token'), max_length=40, default=generate_token)
# code = models.CharField(_('Code'), max_length=40, default=generate_token)
#
# is_approved = models.BooleanField(_('Approved'), default=False)
# is_expired = models.BooleanField(_('Expired'), default=False)
#
# expiration_date = models.DateTimeField(_('Expiration date'),
# default=generate_confirm_expire_date)
#
# def __unicode__(self):
# return '%s - %s/%s' % (self.user, self.old_email, self.new_email)
#
# def save(self, *args, **kwargs):
# if self.is_approved:
# EmailVerification.objects.filter(
# user=self.user, is_approved=False).update(is_expired=True)
#
# self.is_expired = True
#
# if self.user.email == self.old_email:
# self.user.email = self.new_email
# self.user.save()
# return super(EmailVerification, self).save(*args, **kwargs)
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('E-mail verification')
# verbose_name_plural = _('E-mail verifications')
#
# Path: userprofiles/mixins.py
# class LoginRequiredMixin(object):
# """Ensures that the user is authenticated in order to access the view.
# http://djangosnippets.org/snippets/2442/"""
#
# @method_decorator(login_required)
# def dispatch(self, *args, **kwargs):
# return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
#
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | 'expiration_days': up_settings.EMAIL_VERIFICATION_DAYS |
Here is a snippet: <|code_start|>
class SettingsTests(TestCase):
@override_settings(USERPROFILES_USE_ACCOUNT_VERIFICATION=True,
INSTALLED_APPS=list(set(settings.INSTALLED_APPS) - set(
['userprofiles.contrib.accountverification'])))
def test_account_verification(self):
<|code_end|>
. Write the next line using the current file imports:
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from userprofiles.settings import validate_settings
and context from other files:
# Path: userprofiles/settings.py
# def validate_settings():
# if (up_settings.USE_ACCOUNT_VERIFICATION and
# 'userprofiles.contrib.accountverification' not in settings.INSTALLED_APPS):
# raise ImproperlyConfigured('You need to add `userprofiles.contrib.accountverification` '
# 'to INSTALLED_APPS to use account verification.')
#
# # These settings together make no sense
# if up_settings.USE_ACCOUNT_VERIFICATION and up_settings.AUTO_LOGIN:
# raise ImproperlyConfigured("You cannot use autologin with account verification")
#
# if up_settings.USE_PROFILE and 'userprofiles.contrib.profiles' not in settings.INSTALLED_APPS:
# raise ImproperlyConfigured('You need to add `userprofiles.contrib.profiles` '
# 'to INSTALLED_APPS to use profiles.')
#
# if up_settings.PROFILE_ALLOW_EMAIL_CHANGE and up_settings.CHECK_UNIQUE_EMAIL:
# raise ImproperlyConfigured(
# 'USERPROFILES_PROFILE_ALLOW_EMAIL_CHANGE cannot be activated '
# 'when USERPROFILES_CHECK_UNIQUE_EMAIL is active.')
#
# if (up_settings.USE_EMAIL_VERIFICATION and
# 'userprofiles.contrib.emailverification' not in settings.INSTALLED_APPS):
# raise ImproperlyConfigured('You need to add `userprofiles.contrib.emailverification` '
# 'to INSTALLED_APPS to use emailverification.')
#
# if up_settings.PROFILE_ALLOW_EMAIL_CHANGE and up_settings.USE_EMAIL_VERIFICATION:
# raise ImproperlyConfigured(
# 'USERPROFILES_PROFILE_ALLOW_EMAIL_CHANGE cannot be activated '
# 'when USERPROFILES_USE_EMAIL_VERIFICATION is activated.')
#
# if ('test' not in sys.argv and not up_settings.USE_EMAIL_VERIFICATION and
# 'userprofiles.contrib.emailverification' in settings.INSTALLED_APPS):
# raise ImproperlyConfigured('You need to set USERPROFILES_USE_EMAIL_VERIFICATION '
# 'to use `userprofiles.contrib.emailverification`')
, which may include functions, classes, or code. Output only the next line. | self.assertRaises(ImproperlyConfigured, validate_settings) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
class RegistrationForm(forms.Form):
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.-]+$', error_messages={'invalid': _(
'This value may contain only letters, numbers and ./-/_ characters.')})
email = forms.EmailField(label=_('E-mail'))
email_repeat = forms.EmailField(label=_('E-mail (repeat)'), required=True)
password = forms.CharField(label=_('Password'),
widget=forms.PasswordInput(render_value=False))
password_repeat = forms.CharField(label=_('Password (repeat)'),
widget=forms.PasswordInput(render_value=False))
first_name = forms.CharField(label=_('First name'), required=False)
last_name = forms.CharField(label=_('Last name'), required=False)
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
<|code_end|>
, generate the next line using the imports in this file:
import uuid
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from userprofiles.settings import up_settings
from userprofiles.contrib.emailverification.models import EmailVerification
from userprofiles.contrib.accountverification.models import AccountVerification
and context (functions, classes, or occasionally code) from other files:
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | if not up_settings.DOUBLE_CHECK_EMAIL: |
Predict the next line for this snippet: <|code_start|>
class UtilsTests(TestCase):
def test_get_profile_module_disabled(self):
self.assertEqual(utils.get_profile_model(), None)
@override_settings(USERPROFILES_USE_PROFILE=True)
def test_get_profile_module_enabled(self):
settings.AUTH_PROFILE_MODULE = None
self.assertRaises(SiteProfileNotAvailable, utils.get_profile_model)
settings.AUTH_PROFILE_MODULE = 'test_project.test_accounts.Profile'
self.assertRaises(SiteProfileNotAvailable, utils.get_profile_model)
settings.AUTH_PROFILE_MODULE = 'test_accounts.InvalidProfile'
self.assertRaises(SiteProfileNotAvailable, utils.get_profile_model)
settings.AUTH_PROFILE_MODULE = 'test_accounts.Profile'
self.assertEqual(utils.get_profile_model(), get_model('test_accounts', 'Profile'))
def test_get_form_class(self):
self.assertEqual(utils.get_form_class('userprofiles.forms.RegistrationForm'),
<|code_end|>
with the help of current file imports:
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_model
from django.test import TestCase
from django.test.utils import override_settings
from userprofiles import forms
from userprofiles import utils
and context from other files:
# Path: userprofiles/forms.py
# class RegistrationForm(forms.Form):
# def __init__(self, *args, **kwargs):
# def _generate_username(self):
# def clean_username(self):
# def clean_email(self):
# def clean(self):
# def save(self, *args, **kwargs):
, which may contain function names, class names, or code. Output only the next line. | forms.RegistrationForm) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
class RegistrationActivateView(TemplateView):
template_name = 'userprofiles/registration_activate.html'
def get_context_data(self, **kwargs):
activation_key = kwargs['activation_key'].lower()
<|code_end|>
, predict the next line using imports from the current file:
from django.views.generic import TemplateView
from userprofiles.contrib.accountverification.models import AccountVerification
from userprofiles.settings import up_settings
and context including class names, function names, and sometimes code from other files:
# Path: userprofiles/contrib/accountverification/models.py
# class AccountVerification(models.Model):
# ACTIVATED = 'ALREADY_ACTIVATED'
#
# user = models.ForeignKey(User, unique=True, verbose_name=_('User'))
# activation_key = models.CharField(_('Activation key'), max_length=40)
#
# objects = AccountVerificationManager()
#
# def __unicode__(self):
# return u'Account verification: %s' % self.user
#
# def activation_key_expired(self):
# expiration_date = timedelta(days=up_settings.ACCOUNT_VERIFICATION_DAYS)
# return (self.activation_key == self.ACTIVATED
# or (self.user.date_joined + expiration_date <= timezone.now()))
# activation_key_expired.boolean = True
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('Account verification')
# verbose_name_plural = _('Account verifications')
#
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | account = AccountVerification.objects.activate_user(activation_key) |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
class RegistrationActivateView(TemplateView):
template_name = 'userprofiles/registration_activate.html'
def get_context_data(self, **kwargs):
activation_key = kwargs['activation_key'].lower()
account = AccountVerification.objects.activate_user(activation_key)
return {
'account': account,
<|code_end|>
with the help of current file imports:
from django.views.generic import TemplateView
from userprofiles.contrib.accountverification.models import AccountVerification
from userprofiles.settings import up_settings
and context from other files:
# Path: userprofiles/contrib/accountverification/models.py
# class AccountVerification(models.Model):
# ACTIVATED = 'ALREADY_ACTIVATED'
#
# user = models.ForeignKey(User, unique=True, verbose_name=_('User'))
# activation_key = models.CharField(_('Activation key'), max_length=40)
#
# objects = AccountVerificationManager()
#
# def __unicode__(self):
# return u'Account verification: %s' % self.user
#
# def activation_key_expired(self):
# expiration_date = timedelta(days=up_settings.ACCOUNT_VERIFICATION_DAYS)
# return (self.activation_key == self.ACTIVATED
# or (self.user.date_joined + expiration_date <= timezone.now()))
# activation_key_expired.boolean = True
#
# class Meta:
# app_label = 'userprofiles'
# verbose_name = _('Account verification')
# verbose_name_plural = _('Account verifications')
#
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
, which may contain function names, class names, or code. Output only the next line. | 'expiration_days': up_settings.ACCOUNT_VERIFICATION_DAYS |
Predict the next line for this snippet: <|code_start|>
@override_settings(USERPROFILES_USE_PROFILE=True, AUTH_PROFILE_MODULE='test_accounts.Profile')
class ViewTests(TestCase):
def setUp(self):
self.data = {
'username': 'newuser',
'email': 'newuser@example.com',
'new_email': 'anotheremail@example.com',
'password': 'newuserpass',
'first_name': 'John',
'last_name': 'Doe',
'short_info': 'Short Info Test!'
}
self.user = User.objects.create_user(self.data['username'], self.data['email'],
self.data['password'])
<|code_end|>
with the help of current file imports:
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from test_project.test_accounts.models import Profile
and context from other files:
# Path: test_project/test_accounts/models.py
# class Profile(models.Model):
# user = models.OneToOneField(User)
#
# short_info = models.TextField(blank=True)
, which may contain function names, class names, or code. Output only the next line. | Profile(user=self.user).save() |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'userprofiles/profile.html'
def get_context_data(self, **kwargs):
return {
'user': self.request.user,
}
profile = ProfileView.as_view()
class ProfileChangeView(LoginRequiredMixin, FormView):
<|code_end|>
, predict the immediate next line with the help of imports:
from django.contrib import messages
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView, FormView
from userprofiles.mixins import LoginRequiredMixin
from userprofiles.settings import up_settings
from userprofiles.utils import get_form_class, get_profile_model
and context (classes, functions, sometimes code) from other files:
# Path: userprofiles/mixins.py
# class LoginRequiredMixin(object):
# """Ensures that the user is authenticated in order to access the view.
# http://djangosnippets.org/snippets/2442/"""
#
# @method_decorator(login_required)
# def dispatch(self, *args, **kwargs):
# return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
#
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | form_class = get_form_class(up_settings.PROFILE_FORM) |
Given the code snippet: <|code_start|>
class ViewTests(TestCase):
def setUp(self):
self.data = {
'username': 'newuser',
'email': 'newuser@example.com',
'email_repeat': 'newuser@example.com',
'password': 'newuserpass',
'password_repeat': 'newuserpass',
'first_name': 'New',
'last_name': 'User',
}
def test_registration(self):
url = reverse('userprofiles_registration')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(url, data=self.data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response,
<|code_end|>
, generate the next line using the imports in this file:
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from userprofiles.settings import up_settings
and context (functions, classes, or occasionally code) from other files:
# Path: userprofiles/settings.py
# class Settings(object):
# def __init__(self, **kwargs):
# def __getattr__(self, key):
# def validate_settings():
. Output only the next line. | reverse(up_settings.REGISTRATION_REDIRECT)) |
Given snippet: <|code_start|> as dictionaries mapping vertices to lists of neighbors,
however dictionaries of edges have many advantages
over lists: they can store extra information (here,
the lengths), they support fast existence tests,
and they allow easy modification of the graph by edge
insertion and removal. Such modifications are not
needed here but are important in other graph algorithms.
Since dictionaries obey iterator protocol, a graph
represented as described here could be handed without
modification to an algorithm using Guido's representation.
Of course, G and G[v] need not be Python dict objects;
they can be any other object that obeys dict protocol,
for instance a wrapper in which vertices are URLs
and a call to G[v] loads the web page and finds its links.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Dijkstra's algorithm is only guaranteed to work correctly
when all edge lengths are positive. This code does not
verify this property for all edges (only the edges seen
before the end vertex is reached), but will correctly
compute shortest paths even for some graphs with negative
edges, and will raise an exception if it discovers that
a negative edge has caused it to make a mistake.
"""
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import SOMTools
import numpy
import scipy.spatial.distance
import itertools
import matplotlib.pyplot
from priodict import priorityDictionary
from IPython.display import clear_output
and context:
# Path: priodict.py
# class priorityDictionary(dict):
# def __init__(self):
# '''Initialize priorityDictionary by creating binary heap
# of pairs (value,key). Note that changing or removing a dict entry will
# not remove the old pair from the heap until it is found by smallest() or
# until the heap is rebuilt.'''
# self.__heap = []
# dict.__init__(self)
#
# def smallest(self):
# '''Find smallest item after removing deleted items from heap.'''
# if len(self) == 0:
# raise IndexError, "smallest of empty priorityDictionary"
# heap = self.__heap
# while heap[0][1] not in self or self[heap[0][1]] != heap[0][0]:
# lastItem = heap.pop()
# insertionPoint = 0
# while 1:
# smallChild = 2*insertionPoint+1
# if smallChild+1 < len(heap) and \
# heap[smallChild] > heap[smallChild+1]:
# smallChild += 1
# if smallChild >= len(heap) or lastItem <= heap[smallChild]:
# heap[insertionPoint] = lastItem
# break
# heap[insertionPoint] = heap[smallChild]
# insertionPoint = smallChild
# return heap[0][1]
#
# def __iter__(self):
# '''Create destructive sorted iterator of priorityDictionary.'''
# def iterfn():
# while len(self) > 0:
# x = self.smallest()
# yield x
# del self[x]
# return iterfn()
#
# def __setitem__(self,key,val):
# '''Change value stored in dictionary and add corresponding
# pair to heap. Rebuilds the heap if the number of deleted items grows
# too large, to avoid memory leakage.'''
# dict.__setitem__(self,key,val)
# heap = self.__heap
# if len(heap) > 2 * len(self):
# self.__heap = [(v,k) for k,v in self.iteritems()]
# self.__heap.sort() # builtin sort likely faster than O(n) heapify
# else:
# newPair = (val,key)
# insertionPoint = len(heap)
# heap.append(None)
# while insertionPoint > 0 and \
# newPair < heap[(insertionPoint-1)//2]:
# heap[insertionPoint] = heap[(insertionPoint-1)//2]
# insertionPoint = (insertionPoint-1)//2
# heap[insertionPoint] = newPair
#
# def setdefault(self,key,val):
# '''Reimplement setdefault to call our customized __setitem__.'''
# if key not in self:
# self[key] = val
# return self[key]
which might include code, classes, or functions. Output only the next line. | Q = priorityDictionary() # est.dist. of non-final vert. |
Predict the next line after this snippet: <|code_start|> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----------------------------------------------------------------------------
"""
class SchemaFactory(object):
"""
Allows to instantiate a pickled schema.
"""
_schema_entity_cache = None
_schema_entity_cache_path = None
_schema_cache = None
_schema_cache_path = None
@classmethod
def get_schemas(cls, schema_path, schema_entity_path):
"""
Retrieves the schemas from disk.
:param str schema_path: Path to the schema.
:param str schema_entity_path: Path to the entities schema.
:returns: Pair of dictionaries holding the schema and entities schema.
:rtype: tuple
"""
if not os.path.exists(schema_path):
<|code_end|>
using the current file's imports:
from ..six.moves import cPickle as pickle
from .errors import MockgunError
import os
and any relevant context from other files:
# Path: packages/cpenv/vendor/shotgun_api3/lib/mockgun/errors.py
# class MockgunError(Exception):
# """
# Base for all Mockgun related API Errors.
# These are errors that relate to mockgun specifically, for example
# relating to mockups setup and initialization. For operational errors,
# mockgun raises ShotgunErrors just like the Shotgun API.
# """
. Output only the next line. | raise MockgunError("Cannot locate Mockgun schema file '%s'!" % schema_path) |
Here is a snippet: <|code_start|> def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.title)
WORKS_PATH = ['orcid-profile', 'orcid-activities','orcid-works',]
def _parse_publications(l):
if l is not None:
#logger.debug(json.dumps(l, sort_keys=True, indent=4, separators=(',', ': ')))
return [Publication(d) for d in l]
return []
Works = dictmapper('Works', {
'publications':to(WORKS_PATH + ['orcid-work'], _parse_publications),
})
AuthorBase = dictmapper('AuthorBase', {
'orcid':['orcid-profile','orcid-identifier','path'],
'family_name':PERSONAL_DETAILS_PATH + ['family-name','value'],
'given_name':PERSONAL_DETAILS_PATH + ['given-names','value'],
'biography':BIO_PATH + ['biography',],
'keywords':to(BIO_PATH + ['keywords'], _parse_keywords),
#'keywords':to(BIO_PATH + ['keywords', 'keyword'], _parse_keywords),
'researcher_urls':to(BIO_PATH + ['researcher-urls','researcher-url'],
_parse_researcher_urls),
})
class Author(AuthorBase):
_loaded_works = None
def _load_works(self):
<|code_end|>
. Write the next line using the current file imports:
import requests
import json
import logging
from .constants import ORCID_PUBLIC_BASE_URL
from .utils import dictmapper, MappingRule as to
from .exceptions import NotFoundException
and context from other files:
# Path: services/orcid/pyorcid/constants.py
# ORCID_PUBLIC_BASE_URL = 'http://pub.orcid.org/'
#
# Path: services/orcid/pyorcid/utils.py
# def dictmapper(typename, mapping):
# """
# A factory to create `namedtuple`-like classes from a field-to-dict-path
# mapping::
#
# Person = dictmapper({'person':('person','name')})
# example_dict = {'person':{'name':'John'}}
# john = Person(example_dict)
# assert john.name == 'John'
#
# If a function is specified as a mapping value instead of a dict "path", it
# will be run with the backing dict as its first argument.
# """
# def init(self, d, *args, **kwargs):
# """
# Initialize `dictmapper` classes with a dict to back getters.
# """
# self._original_dict = d
#
# def getter_from_dict_path(path):
# if not callable(path) and len(path) < 1:
# raise ValueError('Dict paths should be iterables with at least one'
# ' key or callable objects that take one argument.')
# def getter(self):
# cur_dict = self._original_dict
# if callable(path):
# return path(cur_dict)
# return dict_value_from_path(cur_dict, path)
# return getter
#
# prop_mapping = dict((k, property(getter_from_dict_path(v)))
# for k, v in mapping.iteritems())
# prop_mapping['__init__'] = init
# return type(typename, tuple(), prop_mapping)
#
# class MappingRule(object):
# def __init__(self, path, further_func = lambda x : x):
# self.path = path
# self.further_func = further_func
#
# def __call__(self, d):
# return self.further_func(dict_value_from_path(d, self.path))
#
# Path: services/orcid/pyorcid/exceptions.py
# class NotFoundException(ORCIDException):
# pass
, which may include functions, classes, or code. Output only the next line. | resp = requests.get(ORCID_PUBLIC_BASE_URL + self.orcid |
Given snippet: <|code_start|> return []
CitationBase = dictmapper('CitationBase', {
'text':['citation'],
'type':['work-citation-type']
})
class Citation(CitationBase):
def __unicode__(self):
return self.text
def __repr__(self):
return '<%s [type: %s]>' % (type(self).__name__, self.type)
ExternalIDBase = dictmapper('ExternalIDBase', {
'id':['work-external-identifier-id','value'],
'type':['work-external-identifier-type']
})
class ExternalID(ExternalIDBase):
def __unicode__(self):
return unicode(self.id)
def __repr__(self):
return '<%s %s:%s>' % (type(self).__name__, self.type, str(self.id))
PublicationBase = dictmapper('PublicationBase',{
'title':['work-title','title','value'],
'subtitle':['work-title','subtitle','value'],
'url':['url','value'],
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import requests
import json
import logging
from .constants import ORCID_PUBLIC_BASE_URL
from .utils import dictmapper, MappingRule as to
from .exceptions import NotFoundException
and context:
# Path: services/orcid/pyorcid/constants.py
# ORCID_PUBLIC_BASE_URL = 'http://pub.orcid.org/'
#
# Path: services/orcid/pyorcid/utils.py
# def dictmapper(typename, mapping):
# """
# A factory to create `namedtuple`-like classes from a field-to-dict-path
# mapping::
#
# Person = dictmapper({'person':('person','name')})
# example_dict = {'person':{'name':'John'}}
# john = Person(example_dict)
# assert john.name == 'John'
#
# If a function is specified as a mapping value instead of a dict "path", it
# will be run with the backing dict as its first argument.
# """
# def init(self, d, *args, **kwargs):
# """
# Initialize `dictmapper` classes with a dict to back getters.
# """
# self._original_dict = d
#
# def getter_from_dict_path(path):
# if not callable(path) and len(path) < 1:
# raise ValueError('Dict paths should be iterables with at least one'
# ' key or callable objects that take one argument.')
# def getter(self):
# cur_dict = self._original_dict
# if callable(path):
# return path(cur_dict)
# return dict_value_from_path(cur_dict, path)
# return getter
#
# prop_mapping = dict((k, property(getter_from_dict_path(v)))
# for k, v in mapping.iteritems())
# prop_mapping['__init__'] = init
# return type(typename, tuple(), prop_mapping)
#
# class MappingRule(object):
# def __init__(self, path, further_func = lambda x : x):
# self.path = path
# self.further_func = further_func
#
# def __call__(self, d):
# return self.further_func(dict_value_from_path(d, self.path))
#
# Path: services/orcid/pyorcid/exceptions.py
# class NotFoundException(ORCIDException):
# pass
which might include code, classes, or functions. Output only the next line. | 'citation': to(['work-citation'], lambda d: Citation(d) if d is not None else None), |
Given snippet: <|code_start|>
class Snippet(Model):
org = String()
user = String()
text = String()
keywords = String(required=False)
lang = String()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import time
from codebox.utils.models import Model, String, Float
from codebox.apps.auth.models import User
from codebox.apps.organizations.models import Organization
and context:
# Path: codebox/utils/models.py
# class Model(object):
# __metaclass__ = ModelDescriptor
#
# DoesNotExist = DoesNotExist
# DuplicateKeyError = DuplicateKeyError
#
# def __init__(self, pk, **kwargs):
# self._storage = RedisHashMap(g.redis, '%s:items:%s' % (self._meta.db_name, pk))
# self.pk = pk
# for attname, field in self._meta.fields.iteritems():
# try:
# val = field.to_python(kwargs.pop(attname))
# except KeyError:
# # This is done with an exception rather than the
# # default argument on pop because we don't want
# # get_default() to be evaluated, and then not used.
# # Refs #12057.
# val = field.get_default()
# if val:
# setattr(self, attname, val)
# if kwargs:
# raise ValueError('%s are not part of the schema for %s' % (', '.join(kwargs.keys()), self.__class__.__name__))
#
# def __getattribute__(self, key):
# if key in object.__getattribute__(self, '_meta').fields:
# return self[key]
# return object.__getattribute__(self, key)
#
# def __setattr__(self, key, value):
# if key in object.__getattribute__(self, '_meta').fields:
# self[key] = value
# return object.__setattr__(self, key, value)
#
# def __getitem__(self, key):
# return self._storage[key]
#
# def __setitem__(self, key, value):
# field = self._meta.fields.get(key)
# if field:
# value = field.to_python(value)
# self._storage[key] = value
# # Store additional predefined index
# if (key,) in self._meta.index or (key,) in self._meta.unique:
# self.objects.add_to_index(self.pk, **{key: value})
#
# def __repr__(self):
# return u'<%s: %s>' % (self.__class__.__name__, unicode(self))
#
# def __eq__(self, other):
# if type(self) != type(other):
# return False
# return self.pk == other.pk
#
# def __unicode__(self):
# return self.pk
#
# def __contains__(self, key):
# return key in self._storage
#
# def update(self, **kwargs):
# for k, v in kwargs.iteritems():
# self[k] = v
# if len(kwargs) > 1:
# # Store composite indexes
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# if all([f in kwargs for f in fields]):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.add_to_index(self.pk, **idx_kwargs)
#
# def delete(self):
# # Clear all indexes first
# # Default index
# g.redis.zrem(self.objects._get_default_index_key(), self.pk)
# g.redis.decr(self.objects._get_default_count_key())
#
# # Store additional predefined index
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.remove_from_index(self.pk, **idx_kwargs)
#
# # Clear out the hash table for object
# self._storage.clear()
#
# def post_create(self):
# pass
#
# class String(Field):
# def to_python(self, value=None):
# if value:
# value = unicode(value)
# return value
#
# class Float(Field):
# def to_python(self, value=None):
# if value:
# value = float(value)
# else:
# value = 0.0
# return value
#
# Path: codebox/apps/auth/models.py
# class User(Model):
# name = String()
# email = String(required=False)
# avatar = String(required=False)
# created_at = Float(default=time.time)
# api_token = String(required=False)
#
# class Meta:
# unique = (('email',), ('api_token',))
#
# def get_all_organizations(self, admin=False):
# from codebox.apps.organizations.models import OrganizationMember, Organization
#
# memberships = list(OrganizationMember.objects.filter(user=self.pk))
# orgs = Organization.objects.get_many([m.org for m in memberships])
# if admin:
# orgs = filter(lambda x: x.owned_by == self.pk, orgs)
# return orgs
#
# def can_admin_org(self, org):
# return org.owned_by == self.pk
#
# def get_relation(self, relation):
# return list(relation.objects.filter(user=self.pk))
which might include code, classes, or functions. Output only the next line. | created_at = Float(default=time.time) |
Based on the snippet: <|code_start|>
languages = [
('text', 'Plaintext'),
('diff', 'Diff'),
('javascript', 'JavaScript'),
('html', 'HTML'),
('php', 'PHP'),
('python', 'Python'),
('ruby', 'Ruby'),
('applescript', 'AppleScript'),
('bash', 'Bash'),
('c', 'C'),
('c++', 'C++'),
('css', 'CSS'),
('erlang', 'Erlang'),
('java', 'Java'),
('scala', 'Scala'),
('sql', 'SQL'),
('xml', 'XML'),
]
class Meta:
index = (('org',), ('user',))
def post_create(self):
# Fill our dashboard index
for user in Organization.objects.get(self.org).get_all_members():
Snippet.objects.add_to_index(self.pk, dashboard=user.pk)
def get_user(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import time
from codebox.utils.models import Model, String, Float
from codebox.apps.auth.models import User
from codebox.apps.organizations.models import Organization
and context (classes, functions, sometimes code) from other files:
# Path: codebox/utils/models.py
# class Model(object):
# __metaclass__ = ModelDescriptor
#
# DoesNotExist = DoesNotExist
# DuplicateKeyError = DuplicateKeyError
#
# def __init__(self, pk, **kwargs):
# self._storage = RedisHashMap(g.redis, '%s:items:%s' % (self._meta.db_name, pk))
# self.pk = pk
# for attname, field in self._meta.fields.iteritems():
# try:
# val = field.to_python(kwargs.pop(attname))
# except KeyError:
# # This is done with an exception rather than the
# # default argument on pop because we don't want
# # get_default() to be evaluated, and then not used.
# # Refs #12057.
# val = field.get_default()
# if val:
# setattr(self, attname, val)
# if kwargs:
# raise ValueError('%s are not part of the schema for %s' % (', '.join(kwargs.keys()), self.__class__.__name__))
#
# def __getattribute__(self, key):
# if key in object.__getattribute__(self, '_meta').fields:
# return self[key]
# return object.__getattribute__(self, key)
#
# def __setattr__(self, key, value):
# if key in object.__getattribute__(self, '_meta').fields:
# self[key] = value
# return object.__setattr__(self, key, value)
#
# def __getitem__(self, key):
# return self._storage[key]
#
# def __setitem__(self, key, value):
# field = self._meta.fields.get(key)
# if field:
# value = field.to_python(value)
# self._storage[key] = value
# # Store additional predefined index
# if (key,) in self._meta.index or (key,) in self._meta.unique:
# self.objects.add_to_index(self.pk, **{key: value})
#
# def __repr__(self):
# return u'<%s: %s>' % (self.__class__.__name__, unicode(self))
#
# def __eq__(self, other):
# if type(self) != type(other):
# return False
# return self.pk == other.pk
#
# def __unicode__(self):
# return self.pk
#
# def __contains__(self, key):
# return key in self._storage
#
# def update(self, **kwargs):
# for k, v in kwargs.iteritems():
# self[k] = v
# if len(kwargs) > 1:
# # Store composite indexes
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# if all([f in kwargs for f in fields]):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.add_to_index(self.pk, **idx_kwargs)
#
# def delete(self):
# # Clear all indexes first
# # Default index
# g.redis.zrem(self.objects._get_default_index_key(), self.pk)
# g.redis.decr(self.objects._get_default_count_key())
#
# # Store additional predefined index
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.remove_from_index(self.pk, **idx_kwargs)
#
# # Clear out the hash table for object
# self._storage.clear()
#
# def post_create(self):
# pass
#
# class String(Field):
# def to_python(self, value=None):
# if value:
# value = unicode(value)
# return value
#
# class Float(Field):
# def to_python(self, value=None):
# if value:
# value = float(value)
# else:
# value = 0.0
# return value
#
# Path: codebox/apps/auth/models.py
# class User(Model):
# name = String()
# email = String(required=False)
# avatar = String(required=False)
# created_at = Float(default=time.time)
# api_token = String(required=False)
#
# class Meta:
# unique = (('email',), ('api_token',))
#
# def get_all_organizations(self, admin=False):
# from codebox.apps.organizations.models import OrganizationMember, Organization
#
# memberships = list(OrganizationMember.objects.filter(user=self.pk))
# orgs = Organization.objects.get_many([m.org for m in memberships])
# if admin:
# orgs = filter(lambda x: x.owned_by == self.pk, orgs)
# return orgs
#
# def can_admin_org(self, org):
# return org.owned_by == self.pk
#
# def get_relation(self, relation):
# return list(relation.objects.filter(user=self.pk))
. Output only the next line. | return User.objects.get(self.user) |
Using the snippet: <|code_start|>
class User(Model):
name = String()
email = String(required=False)
avatar = String(required=False)
<|code_end|>
, determine the next line of code. You have imports:
import time
from codebox.utils.models import Model, String, Float, Boolean
from codebox.apps.organizations.models import OrganizationMember, Organization
and context (class names, function names, or code) available:
# Path: codebox/utils/models.py
# class Model(object):
# __metaclass__ = ModelDescriptor
#
# DoesNotExist = DoesNotExist
# DuplicateKeyError = DuplicateKeyError
#
# def __init__(self, pk, **kwargs):
# self._storage = RedisHashMap(g.redis, '%s:items:%s' % (self._meta.db_name, pk))
# self.pk = pk
# for attname, field in self._meta.fields.iteritems():
# try:
# val = field.to_python(kwargs.pop(attname))
# except KeyError:
# # This is done with an exception rather than the
# # default argument on pop because we don't want
# # get_default() to be evaluated, and then not used.
# # Refs #12057.
# val = field.get_default()
# if val:
# setattr(self, attname, val)
# if kwargs:
# raise ValueError('%s are not part of the schema for %s' % (', '.join(kwargs.keys()), self.__class__.__name__))
#
# def __getattribute__(self, key):
# if key in object.__getattribute__(self, '_meta').fields:
# return self[key]
# return object.__getattribute__(self, key)
#
# def __setattr__(self, key, value):
# if key in object.__getattribute__(self, '_meta').fields:
# self[key] = value
# return object.__setattr__(self, key, value)
#
# def __getitem__(self, key):
# return self._storage[key]
#
# def __setitem__(self, key, value):
# field = self._meta.fields.get(key)
# if field:
# value = field.to_python(value)
# self._storage[key] = value
# # Store additional predefined index
# if (key,) in self._meta.index or (key,) in self._meta.unique:
# self.objects.add_to_index(self.pk, **{key: value})
#
# def __repr__(self):
# return u'<%s: %s>' % (self.__class__.__name__, unicode(self))
#
# def __eq__(self, other):
# if type(self) != type(other):
# return False
# return self.pk == other.pk
#
# def __unicode__(self):
# return self.pk
#
# def __contains__(self, key):
# return key in self._storage
#
# def update(self, **kwargs):
# for k, v in kwargs.iteritems():
# self[k] = v
# if len(kwargs) > 1:
# # Store composite indexes
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# if all([f in kwargs for f in fields]):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.add_to_index(self.pk, **idx_kwargs)
#
# def delete(self):
# # Clear all indexes first
# # Default index
# g.redis.zrem(self.objects._get_default_index_key(), self.pk)
# g.redis.decr(self.objects._get_default_count_key())
#
# # Store additional predefined index
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.remove_from_index(self.pk, **idx_kwargs)
#
# # Clear out the hash table for object
# self._storage.clear()
#
# def post_create(self):
# pass
#
# class String(Field):
# def to_python(self, value=None):
# if value:
# value = unicode(value)
# return value
#
# class Float(Field):
# def to_python(self, value=None):
# if value:
# value = float(value)
# else:
# value = 0.0
# return value
#
# class Boolean(Field):
# def to_db(self, value=None):
# if not value:
# value = False
# return int(bool(value))
#
# def to_python(self, value=None):
# if not value:
# value = False
# return bool(int(value))
. Output only the next line. | created_at = Float(default=time.time) |
Predict the next line after this snippet: <|code_start|>
class User(Model):
name = String()
email = String(required=False)
avatar = String(required=False)
created_at = Float(default=time.time)
api_token = String(required=False)
class Meta:
unique = (('email',), ('api_token',))
def get_all_organizations(self, admin=False):
memberships = list(OrganizationMember.objects.filter(user=self.pk))
orgs = Organization.objects.get_many([m.org for m in memberships])
if admin:
orgs = filter(lambda x: x.owned_by == self.pk, orgs)
return orgs
def can_admin_org(self, org):
return org.owned_by == self.pk
def get_relation(self, relation):
return list(relation.objects.filter(user=self.pk))
class Email(Model):
user = String()
email = String()
<|code_end|>
using the current file's imports:
import time
from codebox.utils.models import Model, String, Float, Boolean
from codebox.apps.organizations.models import OrganizationMember, Organization
and any relevant context from other files:
# Path: codebox/utils/models.py
# class Model(object):
# __metaclass__ = ModelDescriptor
#
# DoesNotExist = DoesNotExist
# DuplicateKeyError = DuplicateKeyError
#
# def __init__(self, pk, **kwargs):
# self._storage = RedisHashMap(g.redis, '%s:items:%s' % (self._meta.db_name, pk))
# self.pk = pk
# for attname, field in self._meta.fields.iteritems():
# try:
# val = field.to_python(kwargs.pop(attname))
# except KeyError:
# # This is done with an exception rather than the
# # default argument on pop because we don't want
# # get_default() to be evaluated, and then not used.
# # Refs #12057.
# val = field.get_default()
# if val:
# setattr(self, attname, val)
# if kwargs:
# raise ValueError('%s are not part of the schema for %s' % (', '.join(kwargs.keys()), self.__class__.__name__))
#
# def __getattribute__(self, key):
# if key in object.__getattribute__(self, '_meta').fields:
# return self[key]
# return object.__getattribute__(self, key)
#
# def __setattr__(self, key, value):
# if key in object.__getattribute__(self, '_meta').fields:
# self[key] = value
# return object.__setattr__(self, key, value)
#
# def __getitem__(self, key):
# return self._storage[key]
#
# def __setitem__(self, key, value):
# field = self._meta.fields.get(key)
# if field:
# value = field.to_python(value)
# self._storage[key] = value
# # Store additional predefined index
# if (key,) in self._meta.index or (key,) in self._meta.unique:
# self.objects.add_to_index(self.pk, **{key: value})
#
# def __repr__(self):
# return u'<%s: %s>' % (self.__class__.__name__, unicode(self))
#
# def __eq__(self, other):
# if type(self) != type(other):
# return False
# return self.pk == other.pk
#
# def __unicode__(self):
# return self.pk
#
# def __contains__(self, key):
# return key in self._storage
#
# def update(self, **kwargs):
# for k, v in kwargs.iteritems():
# self[k] = v
# if len(kwargs) > 1:
# # Store composite indexes
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# if all([f in kwargs for f in fields]):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.add_to_index(self.pk, **idx_kwargs)
#
# def delete(self):
# # Clear all indexes first
# # Default index
# g.redis.zrem(self.objects._get_default_index_key(), self.pk)
# g.redis.decr(self.objects._get_default_count_key())
#
# # Store additional predefined index
# for fields in itertools.chain(self._meta.index, self._meta.unique):
# idx_kwargs = dict((f, getattr(self, f)) for f in fields)
# self.objects.remove_from_index(self.pk, **idx_kwargs)
#
# # Clear out the hash table for object
# self._storage.clear()
#
# def post_create(self):
# pass
#
# class String(Field):
# def to_python(self, value=None):
# if value:
# value = unicode(value)
# return value
#
# class Float(Field):
# def to_python(self, value=None):
# if value:
# value = float(value)
# else:
# value = 0.0
# return value
#
# class Boolean(Field):
# def to_db(self, value=None):
# if not value:
# value = False
# return int(bool(value))
#
# def to_python(self, value=None):
# if not value:
# value = False
# return bool(int(value))
. Output only the next line. | verified = Boolean(default=False) |
Continue the code snippet: <|code_start|> return ret
def get_minutes(self):
ct = self.cuts_time()
if ct is None:
# if there are no cuts, use scheduled time
delta = self.end - self.start
minutes = delta.days*60*24 + delta.seconds/60.0
else:
# use amount of video time
minutes = self.cuts_time()/60
return int(minutes)
def add_email(self, email):
if self.emails is None: emails=[]
else: emails = self.emails.split(',')
if email not in emails:
if self.emails:
emails.append(email)
self.emails = ','.join(emails)
else:
self.emails = email
self.save()
def get_authors(self):
authors = self.authors.split(',') if self.authors else []
return authors
@property
<|code_end|>
. Use current file imports:
import os
import socket
import datetime
import random
import re
from django import forms
from django import urls
from django.db import models
from django.db.models.signals import pre_save
from .unique_slugify import unique_slugify
from .titlecase import titlecase
from functools import reduce
and context (classes, functions, or code) from other files:
# Path: dj/main/titlecase.py
# def titlecase(text):
# """
# Titlecases input text
#
# This filter changes all words to Title Caps, and attempts to be clever
# about *un*capitalizing SMALL words like a/an/the in the input.
#
# The list of "SMALL words" which are not capped comes from
# the New York Times Manual of Style, plus 'vs' and 'v'.
#
# """
#
# words = re.split('\s', text)
# line = []
# for word in words:
# if word.startswith('#') or \
# INLINE_PERIOD.search(word) or \
# UC_ELSEWHERE.match(word):
# line.append(word)
# continue
# if SMALL_WORDS.match(word):
# line.append(word.lower())
# continue
# line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
#
# line = " ".join(line)
#
# line = SMALL_FIRST.sub(lambda m: '%s%s' % (
# m.group(1),
# m.group(2).capitalize()
# ), line)
#
# line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)
#
# line = SUBPHRASE.sub(lambda m: '%s%s' % (
# m.group(1),
# m.group(2).capitalize()
# ), line)
#
# return line
. Output only the next line. | def titlecase(self): |
Based on the snippet: <|code_start|>#!/usr/bin/python
# ckblip.py
# checks metadata on blib
# like 'is there a flash version?'
# or 'if a format is not on blip, is it local?'
# and maybe upload it.
# and maybe delete it:
# if the .fvl is not the same file size as the local copy on disk,
# delete the one on blip.
class ckblip(process):
last_del_id=None
def post(self, ep, file_types_to_upload, user, password ):
blip_cli=blip_uploader.Blip_CLI()
files = []
for ext in file_types_to_upload:
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import xml.etree.ElementTree
from . import blip_uploader
from .post_blip import roles
from . import pw
from process import process
from main.models import Episode
and context (classes, functions, sometimes code) from other files:
# Path: dj/scripts/post_blip.py
# class post(process):
# def process_ep(self, ep):
# def add_more_options(self, parser):
. Output only the next line. | role=roles.get(ext, {'description':"extra",'num':'9'}) |
Continue the code snippet: <|code_start|> 'postgresql': "concurrency.triggers.PostgreSQL",
'mysql': "concurrency.triggers.MySQL",
'sqlite3': "concurrency.triggers.Sqlite3",
'sqlite': "concurrency.triggers.Sqlite3",
}
}
def __init__(self, prefix):
"""
Loads our settings from django.conf.settings, applying defaults for any
that are omitted.
"""
self.prefix = prefix
for name, default in self.defaults.items():
prefix_name = (self.prefix + '_' + name).upper()
value = getattr(settings, prefix_name, default)
self._set_attr(prefix_name, value)
setattr(settings, prefix_name, value)
setting_changed.send(self.__class__, setting=prefix_name, value=value, enter=True)
setting_changed.connect(self._handler)
def _set_attr(self, prefix_name, value):
name = prefix_name[len(self.prefix) + 1:]
if name == 'IGNORE_DEFAULT':
raise ImproperlyConfigured('IGNORE_DEFAULT has been removed in django-concurrency 1.5. '
'Use VERSION_FIELD_REQUIRED instead')
elif name == 'CALLBACK':
if isinstance(value, str):
<|code_end|>
. Use current file imports:
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.test.signals import setting_changed
from django.utils.module_loading import import_string
from .compat import get_callable
from django.conf import settings
and context (classes, functions, or code) from other files:
# Path: src/concurrency/compat.py
. Output only the next line. | func = get_callable(value) |
Based on the snippet: <|code_start|> old_value = getattr(model_instance, self.attname, 0)
return max(int(old_value) + 1, (int(time.time() * 1000000) - OFFSET))
class AutoIncVersionField(VersionField):
"""
Version Field increment the revision number each commit
"""
form_class = forms.VersionField
def _get_next_version(self, model_instance):
return int(getattr(model_instance, self.attname, 0)) + 1
class TriggerVersionField(VersionField):
"""
Version Field increment the revision number each commit
"""
form_class = forms.VersionField
def __init__(self, *args, **kwargs):
self._trigger_name = kwargs.pop('trigger_name', None)
self._trigger_exists = False
super().__init__(*args, **kwargs)
def contribute_to_class(self, cls, *args, **kwargs):
super().contribute_to_class(cls, *args, **kwargs)
if not cls._meta.abstract or cls._meta.proxy:
<|code_end|>
, predict the immediate next line with the help of imports:
import copy
import functools
import hashlib
import logging
import time
from collections import OrderedDict
from functools import update_wrapper
from django.db import models
from django.db.models import signals
from django.db.models.fields import Field
from django.db.models.signals import class_prepared, post_migrate
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from concurrency import forms
from concurrency.api import get_revision_of_object
from concurrency.config import conf
from concurrency.core import ConcurrencyOptions
from concurrency.utils import fqn, refetch
from .triggers import _TRIGGERS
from django.db import connections
from concurrency.triggers import create_triggers
from django.core.checks import Warning
from django.db import connections, router
from concurrency.triggers import factory
from concurrency.triggers import get_trigger_name
and context (classes, functions, sometimes code) from other files:
# Path: src/concurrency/triggers.py
# _TRIGGERS = TriggerRegistry()
. Output only the next line. | if self not in _TRIGGERS: |
Here is a snippet: <|code_start|>
def test_basic_term_variables():
assert_true("term_variables(X, [X]).")
assert_false("term_variables(X, []).")
assert_true("term_variables(f(X, Y), [X, Y]).")
assert_true("term_variables(a, []).")
assert_true("term_variables(123, []).")
assert_true("term_variables(f(Z, g(X), Y), [Z, X, Y]).")
assert_false("term_variables(a, a).")
def test_more_advanced_term_variables():
assert_true("term_variables([Y,Y,X,X],[Y,X]).")
assert_true("term_variables([X, Y, a, f(g(A), X)], [X, Y, A]).")
assert_true("term_variables((A :- B, C, A), [A,B,C]).")
assert_true("term_variables(f(X, f(X)), [X]).")
assert_true("X = 1, term_variables(f(X, Y), L), L == [Y], Y = 2.")
assert_true("X = Y, term_variables(f(X, Y), L), L == [Y], Y = 2.")
def test_var_binding():
assert_true("X = a, term_variables(X, []).")
assert_true("term_variables(X, L), X = a, L = [a].")
assert_true("X = f(A,B), term_variables(X, [A,B]).")
def test_term_variables_huge_list():
py.test.skip("")
<|code_end|>
. Write the next line using the current file imports:
import py
from prolog.interpreter.test.tool import assert_true, assert_false, prolog_raises
from prolog.interpreter.test.tool import get_engine
and context from other files:
# Path: prolog/interpreter/test/tool.py
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
#
# def prolog_raises(exc, query, e=None):
# prolog_catch = "catch(((%s), fail), error(%s), true)." % (query, exc)
# return assert_true(prolog_catch, e)
#
# Path: prolog/interpreter/test/tool.py
# def assert_true(query, e=None):
# def assert_false(query, e=None):
# def prolog_raises(exc, query, e=None):
# def __init__(self, module, vars):
# def activate(self, fcont, heap):
# def collect_all(engine, s):
# def parse(inp):
# def create_file(name, content):
# def delete_file(name):
# def create_dir(name):
# def delete_dir(name):
# def file_content(src):
# class CollectAllContinuation(Continuation):
, which may include functions, classes, or code. Output only the next line. | e = get_engine(""" |
Based on the snippet: <|code_start|> self.engine = engine
self.nextcont = None
self._candiscard = True
self.seen = seen
self.module = self.engine.modulewrapper.user_module
def is_done(self):
return False
def activate(self, fcont, heap):
original_heap = heap
# hack: use _dot to count size of tree
seen = set()
list(fcont._dot(seen))
assert len(seen) < self.seen
depth = 0
while fcont.nextcont:
depth += 1
fcont = fcont.nextcont
assert depth < self.seen
depth = 0
numvars = 0
while heap:
depth += 1
numvars += heap.i
heap = heap.prev
assert depth < self.seen
assert numvars < self.seen
return DoneSuccessContinuation(self.engine), DoneFailureContinuation(self.engine), original_heap
def test_cut():
<|code_end|>
, predict the immediate next line with the help of imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import Number
and context (classes, functions, sometimes code) from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
#
# Path: prolog/interpreter/term.py
# class Number(Numeric):#, UnboxedValue):
# TYPE_STANDARD_ORDER = 1
# __slots__ = ("num", )
# _immutable_fields_ = ["num"]
#
# def __init__(self, val):
# self.num = val
#
# @specialize.arg(3)
# def basic_unify(self, other, heap, occurs_check):
# if isinstance(other, Number) and other.num == self.num:
# return
# raise UnificationFailed
#
# def __str__(self):
# return repr(self.num)
#
# def __repr__(self):
# return "Number(%r)" % (self.num, )
#
# def cmp_standard_order(self, other, heap):
# # XXX looks a bit terrible
# if isinstance(other, Number):
# return rcmp(self.num, other.num)
# elif isinstance(other, Float):
# # int/float mixed are always compared as floats
# return rcmp(float(self.num), other.floatval)
# elif isinstance(other, BigInt):
# return bigint_cmp(rbigint.fromint(self.num), other.value)
# assert 0
#
# def quick_unify_check(self, other):
# other = other.dereference(None)
# if isinstance(other, Var):
# return True
# return isinstance(other, Number) and other.num == self.num
. Output only the next line. | e = get_engine(""" |
Given snippet: <|code_start|> self.module = self.engine.modulewrapper.user_module
def is_done(self):
return False
def activate(self, fcont, heap):
original_heap = heap
# hack: use _dot to count size of tree
seen = set()
list(fcont._dot(seen))
assert len(seen) < self.seen
depth = 0
while fcont.nextcont:
depth += 1
fcont = fcont.nextcont
assert depth < self.seen
depth = 0
numvars = 0
while heap:
depth += 1
numvars += heap.i
heap = heap.prev
assert depth < self.seen
assert numvars < self.seen
return DoneSuccessContinuation(self.engine), DoneFailureContinuation(self.engine), original_heap
def test_cut():
e = get_engine("""
f(0).
f(X) :- X>0, X0 is X - 1, !, f(X0).
f(_).""")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import Number
and context:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
#
# Path: prolog/interpreter/term.py
# class Number(Numeric):#, UnboxedValue):
# TYPE_STANDARD_ORDER = 1
# __slots__ = ("num", )
# _immutable_fields_ = ["num"]
#
# def __init__(self, val):
# self.num = val
#
# @specialize.arg(3)
# def basic_unify(self, other, heap, occurs_check):
# if isinstance(other, Number) and other.num == self.num:
# return
# raise UnificationFailed
#
# def __str__(self):
# return repr(self.num)
#
# def __repr__(self):
# return "Number(%r)" % (self.num, )
#
# def cmp_standard_order(self, other, heap):
# # XXX looks a bit terrible
# if isinstance(other, Number):
# return rcmp(self.num, other.num)
# elif isinstance(other, Float):
# # int/float mixed are always compared as floats
# return rcmp(float(self.num), other.floatval)
# elif isinstance(other, BigInt):
# return bigint_cmp(rbigint.fromint(self.num), other.value)
# assert 0
#
# def quick_unify_check(self, other):
# other = other.dereference(None)
# if isinstance(other, Var):
# return True
# return isinstance(other, Number) and other.num == self.num
which might include code, classes, or functions. Output only the next line. | query = Callable.build("f", [Number(100)]) |
Given the following code snippet before the placeholder: <|code_start|>
if not option.slow:
py.test.skip("slow tests")
def test_parser():
def f(x):
if x:
s = "a(X, Y, Z)."
else:
s = "f(a, X, _, _, X, f(X, 2.455))."
<|code_end|>
, predict the next line using imports from the current file:
from rpython.translator.interactive import Translation
from rpython.rpython.test.test_llinterp import interpret
from prolog.interpreter import parsing
from prolog.interpreter.term import Atom
from prolog.interpreter.test.tool import *
from prolog.interpreter.conftest import option
and context including class names, function names, and sometimes code from other files:
# Path: prolog/interpreter/parsing.py
# def make_regexes():
# def make_default_operations():
# def make_from_form(form, op, x, y):
# def make_expansion(y, x, allops):
# def eliminate_immediate_left_recursion(symbol, expansions):
# def make_all_rules(standard_rules, operations=None):
# def add_necessary_regexs(regexs, names, operations=None):
# def terminal_equality(self, symbol, input):
# def match_symbol(self, i, symbol):
# def __init__(self, rules, startsymbol):
# def make_basic_rules():
# def make_parser(basic_rules, names, regexs):
# def make_all():
# def make_parser_at_runtime(operations):
# def _dummyfunc(arg, tree, source_string, file_name):
# def parse_file(s, parser=None, callback=_dummyfunc, arg=None, file_name=None):
# def _parse_file(s, parser, callback, arg, file_name):
# def parse_query(s):
# def parse_query_term(s):
# def get_query_and_vars(s):
# def transform(self, node):
# def transform_extra(self, extranode, child):
# def __init__(self):
# def build(self, s):
# def build_many(self, trees):
# def build_query(self, s):
# def build_fact(self, node):
# def visit(self, node):
# def general_nonterminal_visit(self, node):
# def build_list(self, node):
# def _build_list(self, node, result):
# def find_first_interesting(self, node):
# def general_symbol_visit(self, node):
# def visit_VAR(self, node):
# def visit_NUMBER(self, node):
# def visit_FLOAT(self, node):
# def visit_STRING(self, node):
# def visit_complexterm(self, node):
# def visit_expr(self, node):
# def visit_listexpr(self, node):
# def unescape(s):
# def get_engine(source, create_files=False, load_system=False, **modules):
# def recognize(runner, i):
# class PrologParseTable(LazyParseTable):
# class PrologPackratParser(PackratParser):
# class OrderTransformer(object):
# class TermBuilder(RPythonVisitor):
# ESCAPES = {
# "\\a": "\a",
# "\\b": "\b",
# "\\f": "\f",
# "\\n": "\n",
# "\\r": "\r",
# "\\t": "\t",
# "\\v": "\v",
# "\\\\": "\\"
# }
#
# Path: prolog/interpreter/term.py
# class Atom(Callable):
# TYPE_STANDARD_ORDER = 2
# __slots__ = ('_name', '_signature')
# cache = {}
# _immutable_fields_ = ["_signature"]
#
# def __init__(self, name, signature=None):
# if signature is None:
# signature = Signature(name, 0)
# Callable.__init__(self)
# self._signature = signature
#
# def __str__(self):
# return self.name()
#
# def __repr__(self):
# return "Atom(%r)" % (self.name(),)
#
# @staticmethod
# @jit.elidable
# def newatom(name, signature=None):
# if signature is None:
# signature = Signature.getsignature(name, 0)
# result = Atom.cache.get(signature, None)
# if result is not None:
# return result
# Atom.cache[signature] = result = Atom(name, signature)
# return result
#
# def eval_arithmetic(self, engine):
# #XXX beautify that
# if self.name() == "pi":
# return Float.pi
# if self.name() == "e":
# return Float.e
# error.throw_type_error("evaluable", self.get_prolog_signature())
#
# def arguments(self):
# return []
#
# def argument_at(self, i):
# raise IndexError
#
# def argument_count(self):
# return 0
#
# def name(self):
# return self._signature.name
#
# def signature(self):
# return self._signature
#
# def basic_unify_and_standardize_apart(self, other, heap, env):
# if not isinstance(other, Atom):
# return Callable.basic_unify_and_standardize_apart(self, other, heap, env)
# if not self.signature().eq(other.signature()):
# raise UnificationFailed
. Output only the next line. | term = parsing.parse_file(s) |
Given snippet: <|code_start|> if x:
s = "a(X, Y, Z)."
else:
s = "f(a, X, _, _, X, f(X, 2.455))."
term = parsing.parse_file(s)
assert isinstance(term, parsing.Nonterminal)
return term.symbol
assert f(True) == "file"
assert f(True) == "file"
t = Translation(f)
t.annotate([bool])
t.rtype()
t.backendopt()
func = t.compile_c()
assert func(True) == "file"
assert func(False) == "file"
def test_engine():
e = get_engine("""
g(a, a).
g(a, b).
g(b, c).
f(X, Z) :- g(X, Y), g(Y, Z).
""")
t1 = parse_query_term("f(a, c).")
t2 = parse_query_term("f(X, c).")
def run():
e.run(t1)
e.run(t2)
v0 = e.heap.getvar(0)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from rpython.translator.interactive import Translation
from rpython.rpython.test.test_llinterp import interpret
from prolog.interpreter import parsing
from prolog.interpreter.term import Atom
from prolog.interpreter.test.tool import *
from prolog.interpreter.conftest import option
and context:
# Path: prolog/interpreter/parsing.py
# def make_regexes():
# def make_default_operations():
# def make_from_form(form, op, x, y):
# def make_expansion(y, x, allops):
# def eliminate_immediate_left_recursion(symbol, expansions):
# def make_all_rules(standard_rules, operations=None):
# def add_necessary_regexs(regexs, names, operations=None):
# def terminal_equality(self, symbol, input):
# def match_symbol(self, i, symbol):
# def __init__(self, rules, startsymbol):
# def make_basic_rules():
# def make_parser(basic_rules, names, regexs):
# def make_all():
# def make_parser_at_runtime(operations):
# def _dummyfunc(arg, tree, source_string, file_name):
# def parse_file(s, parser=None, callback=_dummyfunc, arg=None, file_name=None):
# def _parse_file(s, parser, callback, arg, file_name):
# def parse_query(s):
# def parse_query_term(s):
# def get_query_and_vars(s):
# def transform(self, node):
# def transform_extra(self, extranode, child):
# def __init__(self):
# def build(self, s):
# def build_many(self, trees):
# def build_query(self, s):
# def build_fact(self, node):
# def visit(self, node):
# def general_nonterminal_visit(self, node):
# def build_list(self, node):
# def _build_list(self, node, result):
# def find_first_interesting(self, node):
# def general_symbol_visit(self, node):
# def visit_VAR(self, node):
# def visit_NUMBER(self, node):
# def visit_FLOAT(self, node):
# def visit_STRING(self, node):
# def visit_complexterm(self, node):
# def visit_expr(self, node):
# def visit_listexpr(self, node):
# def unescape(s):
# def get_engine(source, create_files=False, load_system=False, **modules):
# def recognize(runner, i):
# class PrologParseTable(LazyParseTable):
# class PrologPackratParser(PackratParser):
# class OrderTransformer(object):
# class TermBuilder(RPythonVisitor):
# ESCAPES = {
# "\\a": "\a",
# "\\b": "\b",
# "\\f": "\f",
# "\\n": "\n",
# "\\r": "\r",
# "\\t": "\t",
# "\\v": "\v",
# "\\\\": "\\"
# }
#
# Path: prolog/interpreter/term.py
# class Atom(Callable):
# TYPE_STANDARD_ORDER = 2
# __slots__ = ('_name', '_signature')
# cache = {}
# _immutable_fields_ = ["_signature"]
#
# def __init__(self, name, signature=None):
# if signature is None:
# signature = Signature(name, 0)
# Callable.__init__(self)
# self._signature = signature
#
# def __str__(self):
# return self.name()
#
# def __repr__(self):
# return "Atom(%r)" % (self.name(),)
#
# @staticmethod
# @jit.elidable
# def newatom(name, signature=None):
# if signature is None:
# signature = Signature.getsignature(name, 0)
# result = Atom.cache.get(signature, None)
# if result is not None:
# return result
# Atom.cache[signature] = result = Atom(name, signature)
# return result
#
# def eval_arithmetic(self, engine):
# #XXX beautify that
# if self.name() == "pi":
# return Float.pi
# if self.name() == "e":
# return Float.e
# error.throw_type_error("evaluable", self.get_prolog_signature())
#
# def arguments(self):
# return []
#
# def argument_at(self, i):
# raise IndexError
#
# def argument_count(self):
# return 0
#
# def name(self):
# return self._signature.name
#
# def signature(self):
# return self._signature
#
# def basic_unify_and_standardize_apart(self, other, heap, env):
# if not isinstance(other, Atom):
# return Callable.basic_unify_and_standardize_apart(self, other, heap, env)
# if not self.signature().eq(other.signature()):
# raise UnificationFailed
which might include code, classes, or functions. Output only the next line. | if isinstance(v0, Atom): |
Based on the snippet: <|code_start|> make_varlist(Num1, R).
when_ground_list(Num, Millis) :-
make_varlist(Num, List),
statistics(walltime, [T1, _]),
when(ground(List), Z = 1),
when_ground_list_inner(List),
statistics(walltime, [T2, _]),
Z == 1,
Millis is T2 - T1.
when_ground_list_inner([]).
when_ground_list_inner([H|R]) :-
H = a,
when_ground_list_inner(R).
""", load_system=True,
mod1 = """
:- module(mod1, [f/2]).
f(N, N1) :-
mod2:g(N, N1).
""",
mod2 = """
:- module(mod2, [g/2]).
g(N, N1) :-
N1 is N - 1.
"""
)
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
from rpython import conftest
from rpython.jit.metainterp.test.test_ajit import LLJitMixin
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.continuation import jitdriver
and context (classes, functions, sometimes code) from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/continuation.py
# def get_printable_location(rule, sconttype):
# def get_jitcell_at(where, rule):
# def set_jitcell_at(newcell, where, rule):
# def driver(scont, fcont, heap):
# def _process_hooks(scont, fcont, heap):
# def __init__(self, load_system=False):
# def _freeze_(self):
# def add_rule(self, ruleterm, end=True):
# def make_rule(self, ruleterm, module):
# def get_builtin(self, signature):
# def _build_and_run(self, tree, source_string, file_name):
# def _term_expand(self, term):
# def runstring(self, s, file_name=None):
# def parse(self, s, file_name=None):
# def getoperations(self):
# def run_query(self, query, module, continuation=None):
# def run_query_in_current(self, query, continuation=None):
# def call(self, query, rule, scont, fcont, heap):
# def call_in_module(self, query, module, scont, fcont, heap):
# def _get_function(self, signature, module, query):
# def switch_module(self, modulename):
# def throw(self, exc, scont, fcont, heap, rule_likely_source=None):
# def __freeze__(self):
# def _make_rule_conts(engine, scont, fcont, heap, query, rulechain):
# def _dot(self, seen):
# def __new__(cls, name, bases, dct):
# def __init__(self, engine, nextcont):
# def activate(self, fcont, heap):
# def is_done(self):
# def find_end_of_cut(self):
# def __init__(self, engine, nextcont, rule):
# def view(*objects, **names):
# def __init__(self, engine, nextcont, orig_fcont, heap):
# def fail(self, heap):
# def cut(self, upto, heap):
# def is_done(self):
# def make_failure_continuation(make_func):
# def __init__(self, engine, scont, fcont, heap, *state):
# def fail(self, heap):
# def make_func_wrapper(*args):
# def __init__(self, engine):
# def is_done(self):
# def __init__(self, engine):
# def fail(self, heap):
# def is_done(self):
# def __init__(self, engine, rule, nextcont, body):
# def activate(self, fcont, heap):
# def __repr__(self):
# def __init__(self, engine, rule, nextcont, builtin, query):
# def activate(self, fcont, heap):
# def __repr__(self):
# def __init__(self, engine, nextcont, orig_fcont, heap, query, rulechain):
# def fail(self, heap):
# def __repr__(self):
# def __init__(self, engine, nextcont, rule, query):
# def activate(self, fcont, heap):
# def __repr__(self):
# def __init__(self, engine, nextcont, fcont_after_cut):
# def insert_scope_notifier(engine, nextcont, fcont):
# def find_end_of_cut(self):
# def activate(self, fcont, heap):
# def __init__(self, engine, rule, nextcont, fcont, catcher, recover, heap):
# def activate(self, fcont, heap):
# def __repr__(self):
# class Engine(object):
# class MetaCont(type):
# class Continuation(object):
# class ContinuationWithRule(Continuation):
# class FailureContinuation(object):
# class C(FailureContinuation):
# class DoneSuccessContinuation(Continuation):
# class DoneFailureContinuation(FailureContinuation):
# class BodyContinuation(ContinuationWithRule):
# class BuiltinContinuation(ContinuationWithRule):
# class UserCallContinuation(FailureContinuation):
# class RuleContinuation(ContinuationWithRule):
# class CutScopeNotifier(Continuation):
# class CatchingDelimiter(ContinuationWithRule):
. Output only the next line. | t1 = parse_query_term("app([1, 2, 3, 4, 5, 6], [8, 9], X), X == [1, 2, 3, 4, 5, 6, 8, 9].") |
Based on the snippet: <|code_start|>class o:
view = False
viewloops = True
conftest.option = o
class TestLLtype(LLJitMixin):
def test_append(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
from rpython import conftest
from rpython.jit.metainterp.test.test_ajit import LLJitMixin
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.continuation import jitdriver
and context (classes, functions, sometimes code) from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/continuation.py
# def get_printable_location(rule, sconttype):
# def get_jitcell_at(where, rule):
# def set_jitcell_at(newcell, where, rule):
# def driver(scont, fcont, heap):
# def _process_hooks(scont, fcont, heap):
# def __init__(self, load_system=False):
# def _freeze_(self):
# def add_rule(self, ruleterm, end=True):
# def make_rule(self, ruleterm, module):
# def get_builtin(self, signature):
# def _build_and_run(self, tree, source_string, file_name):
# def _term_expand(self, term):
# def runstring(self, s, file_name=None):
# def parse(self, s, file_name=None):
# def getoperations(self):
# def run_query(self, query, module, continuation=None):
# def run_query_in_current(self, query, continuation=None):
# def call(self, query, rule, scont, fcont, heap):
# def call_in_module(self, query, module, scont, fcont, heap):
# def _get_function(self, signature, module, query):
# def switch_module(self, modulename):
# def throw(self, exc, scont, fcont, heap, rule_likely_source=None):
# def __freeze__(self):
# def _make_rule_conts(engine, scont, fcont, heap, query, rulechain):
# def _dot(self, seen):
# def __new__(cls, name, bases, dct):
# def __init__(self, engine, nextcont):
# def activate(self, fcont, heap):
# def is_done(self):
# def find_end_of_cut(self):
# def __init__(self, engine, nextcont, rule):
# def view(*objects, **names):
# def __init__(self, engine, nextcont, orig_fcont, heap):
# def fail(self, heap):
# def cut(self, upto, heap):
# def is_done(self):
# def make_failure_continuation(make_func):
# def __init__(self, engine, scont, fcont, heap, *state):
# def fail(self, heap):
# def make_func_wrapper(*args):
# def __init__(self, engine):
# def is_done(self):
# def __init__(self, engine):
# def fail(self, heap):
# def is_done(self):
# def __init__(self, engine, rule, nextcont, body):
# def activate(self, fcont, heap):
# def __repr__(self):
# def __init__(self, engine, rule, nextcont, builtin, query):
# def activate(self, fcont, heap):
# def __repr__(self):
# def __init__(self, engine, nextcont, orig_fcont, heap, query, rulechain):
# def fail(self, heap):
# def __repr__(self):
# def __init__(self, engine, nextcont, rule, query):
# def activate(self, fcont, heap):
# def __repr__(self):
# def __init__(self, engine, nextcont, fcont_after_cut):
# def insert_scope_notifier(engine, nextcont, fcont):
# def find_end_of_cut(self):
# def activate(self, fcont, heap):
# def __init__(self, engine, rule, nextcont, fcont, catcher, recover, heap):
# def activate(self, fcont, heap):
# def __repr__(self):
# class Engine(object):
# class MetaCont(type):
# class Continuation(object):
# class ContinuationWithRule(Continuation):
# class FailureContinuation(object):
# class C(FailureContinuation):
# class DoneSuccessContinuation(Continuation):
# class DoneFailureContinuation(FailureContinuation):
# class BodyContinuation(ContinuationWithRule):
# class BuiltinContinuation(ContinuationWithRule):
# class UserCallContinuation(FailureContinuation):
# class RuleContinuation(ContinuationWithRule):
# class CutScopeNotifier(Continuation):
# class CatchingDelimiter(ContinuationWithRule):
. Output only the next line. | e = get_engine(""" |
Continue the code snippet: <|code_start|>
class EnumerationMemo(object):
"""A memo object to enumerate the variables in a term"""
def __init__(self):
self.seen = {}
self.varcount = 0
def get(self, var):
res = self.seen.get(var, None)
if not res:
<|code_end|>
. Use current file imports:
from prolog.interpreter.term import NumberedVar
and context (classes, functions, or code) from other files:
# Path: prolog/interpreter/term.py
# class NumberedVar(PrologObject):
# _immutable_fields_ = ["num"]
# def __init__(self, index):
# self.num = index
#
# def copy_standardize_apart(self, heap, env):
# if self.num < 0:
# return heap.newvar()
# res = env[self.num]
# if res is None:
# res = env[self.num] = heap.newvar()
# return res
#
# def copy_standardize_apart_as_child_of(self, heap, env, parent, index):
# if self.num < 0:
# return heap.newvar_in_term(parent, index)
# res = env[self.num]
# if res is None:
# res = env[self.num] = heap.newvar_in_term(parent, index)
# return res
#
# def unify_and_standardize_apart(self, other, heap, env):
# if self.num < 0:
# return other
# res = env[self.num]
# if res is None:
# other = env[self.num] = other #.dereference(heap)
# return other
# res.unify(other, heap)
# return res
#
# def dereference(self, heap):
# return self
#
# def __repr__(self):
# return "NumberedVar(%s)" % (self.num, )
. Output only the next line. | self.seen[var] = res = NumberedVar(-1) |
Given the code snippet: <|code_start|>
def test_eq():
sig1 = Signature("a", 0)
assert sig1.eq(sig1)
sig2 = Signature("a", 0)
assert sig1.eq(sig2)
sig3 = Signature("a", 1)
assert not sig1.eq(sig3)
sig4 = Signature("b", 0)
assert not sig1.eq(sig4)
def test_cache():
<|code_end|>
, generate the next line using the imports in this file:
from prolog.interpreter.signature import Signature, SignatureFactory
and context (functions, classes, or occasionally code) from other files:
# Path: prolog/interpreter/signature.py
# class Signature(object):
# """An object representing the signature of a Prolog term."""
#
# _cache = SignatureFactory()
#
# _immutable_fields_ = ["name", "numargs", "atom_signature", "factory"]
#
# def __init__(self, name, numargs, cached=False, factory=None):
# assert name is not None
# assert isinstance(name, str)
# name = rstring.assert_str0(name)
# self.name = name
# self.numargs = numargs
# self.cached = cached
# if factory is None:
# factory = self._cache
# self.factory = factory
# if numargs:
# atom_signature = factory.getsignature(name, 0, cached)
# else:
# atom_signature = self
# self.atom_signature = atom_signature
# factory.init_extra_attrs(self)
#
# def eq(self, other):
# # slightly evil
# if jit.isconstant(self):
# jit.promote(other)
# elif jit.isconstant(other):
# jit.promote(self)
# return self is other or (
# self.numargs == other.numargs and
# self.name == other.name)
#
# @specialize.arg(1)
# def get_extra(self, name):
# aname = "extra_attr_" + name
# if not we_are_translated():
# assert aname in self.factory.extra_attr_names
# self = self.ensure_cached()
# return getattr(self, aname)
#
# @specialize.arg(1)
# def set_extra(self, name, val):
# aname = "extra_attr_" + name
# if not we_are_translated():
# assert aname in self.factory.extra_attr_names
# self = self.ensure_cached()
# setattr(self, aname, val)
#
#
# @specialize.arg(1)
# def get_extra_engine_local(self, name, engine):
# ename = "extra_attr_engine_" + name
# if not we_are_translated():
# assert ename in self.factory.extra_attr_names
# if getattr(self, ename) is not engine:
# setattr(self, ename, engine)
# aname = "extra_attr_" + name
# setattr(self, aname, None)
# return self.get_extra(name)
#
# @specialize.arg(1)
# def set_extra_engine_local(self, name, val, engine):
# ename = "extra_attr_engine_" + name
# setattr(self, ename, engine)
# self.set_extra(name, val)
#
# def ensure_cached(self):
# if self.cached:
# return self
# return self.factory.ensure_cached(self)
#
# def string(self):
# return "%s/%s" % (self.name, self.numargs)
#
# def __repr__(self):
# return "<Signature %s>" % (self.string(), )
#
# @staticmethod
# @jit.elidable
# def getsignature(name, numargs):
# return Signature._cache.getsignature(name, numargs)
#
# @staticmethod
# def register_extr_attr(name, engine=False, default=None):
# Signature._cache.register_extr_attr(name, engine, default)
#
# class SignatureFactory(object):
# def __init__(self):
# self.cache = {}
# self.extra_attrs = []
# self.extra_attr_names = []
# self.init_extra_attrs = lambda self: None
#
# def getsignature(self, name, numargs, cache=True):
# if cache:
# return self._getsignature_elidable(name, numargs)
# return self._getsignature(name, numargs, False)
#
# @jit.elidable
# def _getsignature_elidable(self, name, numargs):
# return self._getsignature(name, numargs, True)
#
# def _getsignature(self, name, numargs, cache):
# if (name, numargs) in self.cache:
# return self.cache[name, numargs]
# res = Signature(name, numargs, cached=cache, factory=self)
# if cache:
# self.cache[name, numargs] = res
# return res
#
# def ensure_cached(self, signature):
# sig = self.cache.get((signature.name, signature.numargs), None)
# if sig:
# return sig
# self.cache[signature.name, signature.numargs] = signature
# signature.cached = True
# return signature
#
# def register_extr_attr(self, name, engine=False, default=None):
# aname = "extra_attr_" + name
# ename = "extra_attr_engine_" + name
# self.extra_attr_names.append(aname)
# self.extra_attrs.append((aname, default))
# if engine:
# assert default is None
# self.extra_attr_names.append(ename)
# self.extra_attrs.append((ename, None))
# for signature in self.cache.itervalues():
# setattr(signature, aname, default)
# if engine:
# setattr(signature, ename, None)
# extra_attrs_unrolling = unrolling_iterable(self.extra_attrs)
# def init_extra_attrs(signature):
# for attr, val in extra_attrs_unrolling:
# setattr(signature, attr, val)
# self.init_extra_attrs = init_extra_attrs
#
# def __freeze__(self):
# return True
. Output only the next line. | factory = SignatureFactory() |
Based on the snippet: <|code_start|>#coding=utf-8
def test_strings():
assert_true('X = "abc", X = [97, 98, 99].')
assert_true('X = "", X = [].')
assert_true('X = [97], X = "a".')
assert_true('X = [97], X = Y, Y = "a".')
<|code_end|>
, predict the immediate next line with the help of imports:
import py
from prolog.interpreter.test.tool import assert_true, assert_false
and context (classes, functions, sometimes code) from other files:
# Path: prolog/interpreter/test/tool.py
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
. Output only the next line. | assert_false('X = "a", X = \'a\'.') |
Predict the next line for this snippet: <|code_start|> def activate(self, fcont, heap):
all.append((X.dereference(heap).name(), Y.dereference(heap).name()))
raise error.UnificationFailed
e.add_rule(Callable.build("f", [Callable.build("x")]), True)
e.add_rule(Callable.build("f", [Callable.build("y")]), True)
e.add_rule(Callable.build("g", [Callable.build("a")]), True)
e.add_rule(Callable.build("g", [Callable.build("b")]), True)
X = BindingVar()
Y = BindingVar()
query = Callable.build(",", [Callable.build("f", [X]), Callable.build("g", [Y])])
py.test.raises(error.UnificationFailed,
e.run_query, query, e.modulewrapper.user_module, CollectContinuation())
assert all == [("x", "a"), ("x", "b"), ("y", "a"), ("y", "b")]
def test_cut_not_reached():
class CheckContinuation(Continuation):
def __init__(self):
self.nextcont = None
self.module = e.modulewrapper.user_module
def is_done(self):
return False
def activate(self, fcont, heap):
assert fcont.is_done()
return DoneSuccessContinuation(e), DoneFailureContinuation(e), heap
e = get_engine("""
g(X, Y) :- X > 0, !, Y = a.
g(_, b).
""")
<|code_end|>
with the help of current file imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import BindingVar, Atom, Term
and context from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
, which may contain function names, class names, or code. Output only the next line. | e.run_query_in_current(parse_query_term("g(-1, Y), Y == b, g(1, Z), Z == a."), |
Predict the next line after this snippet: <|code_start|> def is_done(self):
return False
def discard(self):
pass
def activate(self, fcont, heap):
all.append((X.dereference(heap).name(), Y.dereference(heap).name()))
raise error.UnificationFailed
e.add_rule(Callable.build("f", [Callable.build("x")]), True)
e.add_rule(Callable.build("f", [Callable.build("y")]), True)
e.add_rule(Callable.build("g", [Callable.build("a")]), True)
e.add_rule(Callable.build("g", [Callable.build("b")]), True)
X = BindingVar()
Y = BindingVar()
query = Callable.build(",", [Callable.build("f", [X]), Callable.build("g", [Y])])
py.test.raises(error.UnificationFailed,
e.run_query, query, e.modulewrapper.user_module, CollectContinuation())
assert all == [("x", "a"), ("x", "b"), ("y", "a"), ("y", "b")]
def test_cut_not_reached():
class CheckContinuation(Continuation):
def __init__(self):
self.nextcont = None
self.module = e.modulewrapper.user_module
def is_done(self):
return False
def activate(self, fcont, heap):
assert fcont.is_done()
return DoneSuccessContinuation(e), DoneFailureContinuation(e), heap
<|code_end|>
using the current file's imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import BindingVar, Atom, Term
and any relevant context from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
. Output only the next line. | e = get_engine(""" |
Predict the next line for this snippet: <|code_start|> query = Callable.build(",", [Callable.build("f", [X]), Callable.build("g", [Y])])
py.test.raises(error.UnificationFailed,
e.run_query, query, e.modulewrapper.user_module, CollectContinuation())
assert all == [("x", "a"), ("x", "b"), ("y", "a"), ("y", "b")]
def test_cut_not_reached():
class CheckContinuation(Continuation):
def __init__(self):
self.nextcont = None
self.module = e.modulewrapper.user_module
def is_done(self):
return False
def activate(self, fcont, heap):
assert fcont.is_done()
return DoneSuccessContinuation(e), DoneFailureContinuation(e), heap
e = get_engine("""
g(X, Y) :- X > 0, !, Y = a.
g(_, b).
""")
e.run_query_in_current(parse_query_term("g(-1, Y), Y == b, g(1, Z), Z == a."),
CheckContinuation())
# ___________________________________________________________________
# integration tests
def test_trivial():
e = get_engine("""
f(a).
""")
<|code_end|>
with the help of current file imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import BindingVar, Atom, Term
and context from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
, which may contain function names, class names, or code. Output only the next line. | t, vars = get_query_and_vars("f(X).") |
Predict the next line after this snippet: <|code_start|>
def test_driver():
order = []
done = DoneFailureContinuation(None)
class FakeC(object):
rule = None
cont_type_name = "FakeC"
def __init__(self, next, val):
self.next = next
self.val = val
def is_done(self):
return False
def activate(self, fcont, heap):
if self.val == -1:
<|code_end|>
using the current file's imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import BindingVar, Atom, Term
and any relevant context from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
. Output only the next line. | raise error.UnificationFailed |
Given snippet: <|code_start|># ___________________________________________________________________
# integration tests
def test_trivial():
e = get_engine("""
f(a).
""")
t, vars = get_query_and_vars("f(X).")
e.run_query_in_current(t)
assert vars['X'].dereference(None).name()== "a"
def test_and():
e = get_engine("""
g(a, a).
g(a, b).
g(b, c).
f(X, Z) :- g(X, Y), g(Y, Z).
""")
e.run_query_in_current(parse_query_term("f(a, c)."))
t, vars = get_query_and_vars("f(X, c).")
e.run_query_in_current(t)
assert vars['X'].dereference(None).name()== "a"
def test_and_long():
e = get_engine("""
f(x). f(y). f(z).
g(a). g(b). g(c).
h(d). h(e). h(f).
f(X, Y, Z) :- f(X), g(Y), h(Z).
""")
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import BindingVar, Atom, Term
and context:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
which might include code, classes, or functions. Output only the next line. | heaps = collect_all(e, "f(X, Y, Z).") |
Predict the next line for this snippet: <|code_start|> e.run_query_in_current(parse_query_term("mul(succ(0), 0, 0)."))
e.run_query_in_current(parse_query_term("mul(succ(succ(0)), succ(0), succ(succ(0)))."))
e.run_query_in_current(parse_query_term("mul(succ(succ(0)), succ(succ(0)), succ(succ(succ(succ(0)))))."))
e.run_query_in_current(parse_query_term("factorial(0, succ(0))."))
e.run_query_in_current(parse_query_term("factorial(succ(0), succ(0))."))
e.run_query_in_current(parse_query_term("factorial(%s, %s)." % (nstr(5), nstr(120))))
def test_or_backtrack():
e = get_engine("""
a(a).
b(b).
g(a, b).
g(a, a).
f(X, Y, Z) :- (g(X, Z); g(X, Z); g(Z, Y)), a(Z).
""")
t, vars = get_query_and_vars("f(a, b, Z).")
e.run_query_in_current(t)
assert vars['Z'].dereference(None).name()== "a"
f = collect_all(e, "X = 1; X = 2.")
assert len(f) == 2
def test_backtrack_to_same_choice_point():
e = get_engine("""
a(a).
b(b).
start(Z) :- Z = X, f(X, b), X == b, Z == b.
f(X, Y) :- a(Y).
f(X, Y) :- X = a, a(Y).
f(X, Y) :- X = b, b(Y).
""")
<|code_end|>
with the help of current file imports:
import py
from prolog.interpreter.continuation import *
from prolog.interpreter.parsing import parse_query_term, get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UnificationFailed
from prolog.interpreter.test.tool import collect_all, assert_true, assert_false
from prolog.interpreter.term import BindingVar, Atom, Term
and context from other files:
# Path: prolog/interpreter/parsing.py
# def parse_query_term(s):
# return get_query_and_vars(s)[0]
#
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UnificationFailed(PrologError):
# pass
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
, which may contain function names, class names, or code. Output only the next line. | assert_true("start(Z).", e) |
Here is a snippet: <|code_start|>
def test_get_source():
content = "some important content"
name = "__testfile__"
try:
create_file(name, content)
<|code_end|>
. Write the next line using the current file imports:
import py
import os
from prolog.builtin.sourcehelper import get_source
from prolog.interpreter.test.tool import collect_all, assert_false, assert_true
from prolog.interpreter.test.tool import prolog_raises, create_file, delete_file
from prolog.interpreter.error import CatchableError
and context from other files:
# Path: prolog/builtin/sourcehelper.py
# def get_source(filename):
# try:
# assert isinstance(filename, str)
# fd, actual_filename = get_filehandle(filename, True)
# except OSError:
# throw_existence_error("source_sink", Callable.build(filename))
# assert 0, "unreachable" # make the flow space happy
# try:
# content = []
# while 1:
# s = os.read(fd, 4096)
# if not s:
# break
# content.append(s)
# file_content = "".join(content)
# finally:
# os.close(fd)
# return file_content, actual_filename
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# Path: prolog/interpreter/test/tool.py
# def prolog_raises(exc, query, e=None):
# prolog_catch = "catch(((%s), fail), error(%s), true)." % (query, exc)
# return assert_true(prolog_catch, e)
#
# def create_file(name, content):
# with open(name, "w") as f:
# f.write(content)
#
# def delete_file(name):
# os.unlink(name)
#
# Path: prolog/interpreter/error.py
# class CatchableError(TermedError): pass
, which may include functions, classes, or code. Output only the next line. | source, file_name = get_source(name) |
Next line prediction: <|code_start|>
def test_get_source():
content = "some important content"
name = "__testfile__"
try:
<|code_end|>
. Use current file imports:
(import py
import os
from prolog.builtin.sourcehelper import get_source
from prolog.interpreter.test.tool import collect_all, assert_false, assert_true
from prolog.interpreter.test.tool import prolog_raises, create_file, delete_file
from prolog.interpreter.error import CatchableError)
and context including class names, function names, or small code snippets from other files:
# Path: prolog/builtin/sourcehelper.py
# def get_source(filename):
# try:
# assert isinstance(filename, str)
# fd, actual_filename = get_filehandle(filename, True)
# except OSError:
# throw_existence_error("source_sink", Callable.build(filename))
# assert 0, "unreachable" # make the flow space happy
# try:
# content = []
# while 1:
# s = os.read(fd, 4096)
# if not s:
# break
# content.append(s)
# file_content = "".join(content)
# finally:
# os.close(fd)
# return file_content, actual_filename
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# Path: prolog/interpreter/test/tool.py
# def prolog_raises(exc, query, e=None):
# prolog_catch = "catch(((%s), fail), error(%s), true)." % (query, exc)
# return assert_true(prolog_catch, e)
#
# def create_file(name, content):
# with open(name, "w") as f:
# f.write(content)
#
# def delete_file(name):
# os.unlink(name)
#
# Path: prolog/interpreter/error.py
# class CatchableError(TermedError): pass
. Output only the next line. | create_file(name, content) |
Given the following code snippet before the placeholder: <|code_start|>
def test_get_source():
content = "some important content"
name = "__testfile__"
try:
create_file(name, content)
source, file_name = get_source(name)
finally:
<|code_end|>
, predict the next line using imports from the current file:
import py
import os
from prolog.builtin.sourcehelper import get_source
from prolog.interpreter.test.tool import collect_all, assert_false, assert_true
from prolog.interpreter.test.tool import prolog_raises, create_file, delete_file
from prolog.interpreter.error import CatchableError
and context including class names, function names, and sometimes code from other files:
# Path: prolog/builtin/sourcehelper.py
# def get_source(filename):
# try:
# assert isinstance(filename, str)
# fd, actual_filename = get_filehandle(filename, True)
# except OSError:
# throw_existence_error("source_sink", Callable.build(filename))
# assert 0, "unreachable" # make the flow space happy
# try:
# content = []
# while 1:
# s = os.read(fd, 4096)
# if not s:
# break
# content.append(s)
# file_content = "".join(content)
# finally:
# os.close(fd)
# return file_content, actual_filename
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# Path: prolog/interpreter/test/tool.py
# def prolog_raises(exc, query, e=None):
# prolog_catch = "catch(((%s), fail), error(%s), true)." % (query, exc)
# return assert_true(prolog_catch, e)
#
# def create_file(name, content):
# with open(name, "w") as f:
# f.write(content)
#
# def delete_file(name):
# os.unlink(name)
#
# Path: prolog/interpreter/error.py
# class CatchableError(TermedError): pass
. Output only the next line. | delete_file(name) |
Based on the snippet: <|code_start|>
def test_get_source():
content = "some important content"
name = "__testfile__"
try:
create_file(name, content)
source, file_name = get_source(name)
finally:
delete_file(name)
assert source == content
assert file_name == os.path.abspath(name)
def test_source_does_not_exist():
<|code_end|>
, predict the immediate next line with the help of imports:
import py
import os
from prolog.builtin.sourcehelper import get_source
from prolog.interpreter.test.tool import collect_all, assert_false, assert_true
from prolog.interpreter.test.tool import prolog_raises, create_file, delete_file
from prolog.interpreter.error import CatchableError
and context (classes, functions, sometimes code) from other files:
# Path: prolog/builtin/sourcehelper.py
# def get_source(filename):
# try:
# assert isinstance(filename, str)
# fd, actual_filename = get_filehandle(filename, True)
# except OSError:
# throw_existence_error("source_sink", Callable.build(filename))
# assert 0, "unreachable" # make the flow space happy
# try:
# content = []
# while 1:
# s = os.read(fd, 4096)
# if not s:
# break
# content.append(s)
# file_content = "".join(content)
# finally:
# os.close(fd)
# return file_content, actual_filename
#
# Path: prolog/interpreter/test/tool.py
# def collect_all(engine, s):
# terms, vars = engine.parse(s)
# term, = terms
# collector = CollectAllContinuation(engine.modulewrapper.user_module, vars)
# py.test.raises(UnificationFailed, engine.run_query, term,
# engine.modulewrapper.current_module, collector)
# return collector.heaps
#
# def assert_false(query, e=None):
# if e is None:
# e = Engine()
# term = e.parse(query)[0][0]
# py.test.raises(UnificationFailed, e.run_query_in_current, term)
#
# def assert_true(query, e=None):
# if e is None:
# e = Engine()
# terms, vars = e.parse(query)
# term, = terms
# e.run_query_in_current(term)
# return dict([(name, var.dereference(None))
# for name, var in vars.iteritems()])
#
# Path: prolog/interpreter/test/tool.py
# def prolog_raises(exc, query, e=None):
# prolog_catch = "catch(((%s), fail), error(%s), true)." % (query, exc)
# return assert_true(prolog_catch, e)
#
# def create_file(name, content):
# with open(name, "w") as f:
# f.write(content)
#
# def delete_file(name):
# os.unlink(name)
#
# Path: prolog/interpreter/error.py
# class CatchableError(TermedError): pass
. Output only the next line. | py.test.raises(CatchableError, "get_source('this_file_does_not_exist')") |
Given the following code snippet before the placeholder: <|code_start|>
def get_uncaught_error(query, e):
if isinstance(query, str):
(query, _) = get_query_and_vars(query)
return pytest.raises(UncaughtError, e.run_query_in_current, query).value
def test_errstr():
<|code_end|>
, predict the next line using imports from the current file:
import py, pytest
from prolog.interpreter.parsing import get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UncaughtError
from prolog.interpreter.signature import Signature
and context including class names, function names, and sometimes code from other files:
# Path: prolog/interpreter/parsing.py
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UncaughtError(TermedError):
# def __init__(self, term, sig_context=None, rule_likely_source=None, scont=None):
# TermedError.__init__(self, term, sig_context)
# self.rule = rule_likely_source
# self.traceback = _construct_traceback(scont)
#
# def format_traceback(self, engine):
# out = ["Traceback (most recent call last):"]
# self.traceback._format(out)
# context = ""
# if self.sig_context is not None:
# context = self.sig_context.string()
# if context == "throw/1":
# context = ""
# else:
# context += ": "
# out.append("%s%s" % (context, self.get_errstr(engine)))
# return "\n".join(out)
#
# Path: prolog/interpreter/signature.py
# class Signature(object):
# """An object representing the signature of a Prolog term."""
#
# _cache = SignatureFactory()
#
# _immutable_fields_ = ["name", "numargs", "atom_signature", "factory"]
#
# def __init__(self, name, numargs, cached=False, factory=None):
# assert name is not None
# assert isinstance(name, str)
# name = rstring.assert_str0(name)
# self.name = name
# self.numargs = numargs
# self.cached = cached
# if factory is None:
# factory = self._cache
# self.factory = factory
# if numargs:
# atom_signature = factory.getsignature(name, 0, cached)
# else:
# atom_signature = self
# self.atom_signature = atom_signature
# factory.init_extra_attrs(self)
#
# def eq(self, other):
# # slightly evil
# if jit.isconstant(self):
# jit.promote(other)
# elif jit.isconstant(other):
# jit.promote(self)
# return self is other or (
# self.numargs == other.numargs and
# self.name == other.name)
#
# @specialize.arg(1)
# def get_extra(self, name):
# aname = "extra_attr_" + name
# if not we_are_translated():
# assert aname in self.factory.extra_attr_names
# self = self.ensure_cached()
# return getattr(self, aname)
#
# @specialize.arg(1)
# def set_extra(self, name, val):
# aname = "extra_attr_" + name
# if not we_are_translated():
# assert aname in self.factory.extra_attr_names
# self = self.ensure_cached()
# setattr(self, aname, val)
#
#
# @specialize.arg(1)
# def get_extra_engine_local(self, name, engine):
# ename = "extra_attr_engine_" + name
# if not we_are_translated():
# assert ename in self.factory.extra_attr_names
# if getattr(self, ename) is not engine:
# setattr(self, ename, engine)
# aname = "extra_attr_" + name
# setattr(self, aname, None)
# return self.get_extra(name)
#
# @specialize.arg(1)
# def set_extra_engine_local(self, name, val, engine):
# ename = "extra_attr_engine_" + name
# setattr(self, ename, engine)
# self.set_extra(name, val)
#
# def ensure_cached(self):
# if self.cached:
# return self
# return self.factory.ensure_cached(self)
#
# def string(self):
# return "%s/%s" % (self.name, self.numargs)
#
# def __repr__(self):
# return "<Signature %s>" % (self.string(), )
#
# @staticmethod
# @jit.elidable
# def getsignature(name, numargs):
# return Signature._cache.getsignature(name, numargs)
#
# @staticmethod
# def register_extr_attr(name, engine=False, default=None):
# Signature._cache.register_extr_attr(name, engine, default)
. Output only the next line. | e = get_engine(""" |
Given the code snippet: <|code_start|>
def get_uncaught_error(query, e):
if isinstance(query, str):
(query, _) = get_query_and_vars(query)
<|code_end|>
, generate the next line using the imports in this file:
import py, pytest
from prolog.interpreter.parsing import get_engine
from prolog.interpreter.parsing import get_query_and_vars
from prolog.interpreter.error import UncaughtError
from prolog.interpreter.signature import Signature
and context (functions, classes, or occasionally code) from other files:
# Path: prolog/interpreter/parsing.py
# def get_engine(source, create_files=False, load_system=False, **modules):
# from prolog.interpreter.continuation import Engine
# from prolog.interpreter.test.tool import create_file, delete_file
# e = Engine(load_system)
# for name, module in modules.iteritems():
# if create_files:
# create_file(name, module)
# else:
# e.runstring(module)
# try:
# e.modulewrapper.current_module = e.modulewrapper.user_module
# e.runstring(source)
# finally:
# if create_files:
# for name in modules.keys():
# delete_file(name)
# return e
#
# Path: prolog/interpreter/parsing.py
# def get_query_and_vars(s):
# tokens = lexer.tokenize(s, eof=True)
# s = parser_query.parse(tokens, lazy=False)
# builder = TermBuilder()
# query = builder.build_query(s)
# return query, builder.varname_to_var
#
# Path: prolog/interpreter/error.py
# class UncaughtError(TermedError):
# def __init__(self, term, sig_context=None, rule_likely_source=None, scont=None):
# TermedError.__init__(self, term, sig_context)
# self.rule = rule_likely_source
# self.traceback = _construct_traceback(scont)
#
# def format_traceback(self, engine):
# out = ["Traceback (most recent call last):"]
# self.traceback._format(out)
# context = ""
# if self.sig_context is not None:
# context = self.sig_context.string()
# if context == "throw/1":
# context = ""
# else:
# context += ": "
# out.append("%s%s" % (context, self.get_errstr(engine)))
# return "\n".join(out)
#
# Path: prolog/interpreter/signature.py
# class Signature(object):
# """An object representing the signature of a Prolog term."""
#
# _cache = SignatureFactory()
#
# _immutable_fields_ = ["name", "numargs", "atom_signature", "factory"]
#
# def __init__(self, name, numargs, cached=False, factory=None):
# assert name is not None
# assert isinstance(name, str)
# name = rstring.assert_str0(name)
# self.name = name
# self.numargs = numargs
# self.cached = cached
# if factory is None:
# factory = self._cache
# self.factory = factory
# if numargs:
# atom_signature = factory.getsignature(name, 0, cached)
# else:
# atom_signature = self
# self.atom_signature = atom_signature
# factory.init_extra_attrs(self)
#
# def eq(self, other):
# # slightly evil
# if jit.isconstant(self):
# jit.promote(other)
# elif jit.isconstant(other):
# jit.promote(self)
# return self is other or (
# self.numargs == other.numargs and
# self.name == other.name)
#
# @specialize.arg(1)
# def get_extra(self, name):
# aname = "extra_attr_" + name
# if not we_are_translated():
# assert aname in self.factory.extra_attr_names
# self = self.ensure_cached()
# return getattr(self, aname)
#
# @specialize.arg(1)
# def set_extra(self, name, val):
# aname = "extra_attr_" + name
# if not we_are_translated():
# assert aname in self.factory.extra_attr_names
# self = self.ensure_cached()
# setattr(self, aname, val)
#
#
# @specialize.arg(1)
# def get_extra_engine_local(self, name, engine):
# ename = "extra_attr_engine_" + name
# if not we_are_translated():
# assert ename in self.factory.extra_attr_names
# if getattr(self, ename) is not engine:
# setattr(self, ename, engine)
# aname = "extra_attr_" + name
# setattr(self, aname, None)
# return self.get_extra(name)
#
# @specialize.arg(1)
# def set_extra_engine_local(self, name, val, engine):
# ename = "extra_attr_engine_" + name
# setattr(self, ename, engine)
# self.set_extra(name, val)
#
# def ensure_cached(self):
# if self.cached:
# return self
# return self.factory.ensure_cached(self)
#
# def string(self):
# return "%s/%s" % (self.name, self.numargs)
#
# def __repr__(self):
# return "<Signature %s>" % (self.string(), )
#
# @staticmethod
# @jit.elidable
# def getsignature(name, numargs):
# return Signature._cache.getsignature(name, numargs)
#
# @staticmethod
# def register_extr_attr(name, engine=False, default=None):
# Signature._cache.register_extr_attr(name, engine, default)
. Output only the next line. | return pytest.raises(UncaughtError, e.run_query_in_current, query).value |
Given snippet: <|code_start|> self.trail_var[i] = var
self.trail_binding[i] = var.binding
self.i = i + 1
def _is_created_in_self(self, var):
created_in = var.created_after_choice_point
if self is created_in: # fast path
return True
if created_in is not None and created_in.discarded:
# unroll _find_not_discarded once for better jittability
created_in = created_in.prev
if created_in is not None and created_in.discarded:
created_in = created_in._find_not_discarded()
var.created_after_choice_point = created_in
return self is created_in
def _double_size(self):
l = len(self.trail_var)
if l == 0:
self.trail_var = [None, None]
self.trail_binding = [None, None]
elif l == 1:
assert 0, "cannot happen"
else:
self.trail_var = self.trail_var + [None] * l
self.trail_binding = self.trail_binding + [None] * l
def newvar(self):
""" Make a new variable. Should return a Var instance, possibly with
interesting attributes set that e.g. add_trail can inspect."""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from rpython.rlib import debug
from prolog.interpreter.term import BindingVar, AttVar
from rpython.rlib import jit
from prolog.interpreter.term import var_in_term_classes
and context:
# Path: prolog/interpreter/term.py
# class BindingVar(Var):
# __slots__ = ("binding", "created_after_choice_point")
#
# def __init__(self):
# Var.__init__(self)
# self.binding = None
#
# def getbinding(self):
# return self.binding
#
# def setvalue(self, value, heap):
# heap.add_trail(self)
# self.binding = value
#
# @specialize.arg(4)
# def _unify_potential_recursion(self, next, other, heap, occurs_check):
# assert isinstance(next, NonVar)
# if next is not other:
# if isinstance(other, NonVar):
# self.setvalue(other, heap)
# next._unify_derefed(other, heap, occurs_check)
#
# class AttVar(BindingVar):
# attmap = AttMap()
#
# def __init__(self):
# BindingVar.__init__(self)
# self.value_list = debug.make_sure_not_resized([])
#
# @specialize.arg(3)
# def _unify_derefed(self, other, heap, occurs_check=False):
# if isinstance(other, AttVar):
# if other is not self:
# self.setvalue(other, heap)
# return
# if isinstance(other, Var):
# return other._unify_derefed(self, heap, occurs_check)
# return self.setvalue(other, heap)
#
# def setvalue(self, value, heap):
# if self.value_list is not None:
# heap.add_hook(self)
# BindingVar.setvalue(self, value, heap)
#
# def __repr__(self):
# attrs = []
# attmap = jit.hint(self.attmap, promote=True)
# if self.value_list is not None:
# for key, index in attmap.indexes.iteritems():
# value = self.value_list[index]
# if value is not None:
# attrs.append("%s" % (key, ))
# return "AttVar(%s, %s)" % (self.getbinding(), "[" + ", ".join(attrs) + "]")
#
# def copy(self, heap, memo):
# self = self.dereference(None)
# if isinstance(self, AttVar):
# res = memo.get(self)
# if res is not None:
# return res
# newvar = heap.new_attvar()
# own_list = self.value_list
# newvar.attmap = self.attmap
# if own_list is None:
# newvar.value_list = None
# else:
# length = len(own_list)
# new_values = [None] * length
# for i in range(length):
# if own_list[i] is None:
# new_values[i] = None
# else:
# new_values[i] = own_list[i].copy(heap, memo)
# newvar.value_list = new_values
#
# memo.set(self, newvar)
# return newvar
# return self.copy(heap, memo)
#
# def add_attribute(self, attname, attribute):
# attmap = jit.hint(self.attmap, promote=True)
# index = attmap.get_index(attname)
# if index != -1:
# self.value_list[index] = attribute
# return
# self.attmap = attmap.with_extra_attribute(attname)
# self.value_list = self.value_list + [attribute]
#
# def del_attribute(self, attname):
# attmap = jit.hint(self.attmap, promote=True)
# index = attmap.get_index(attname)
# if self.value_list is not None:
# self.value_list[index] = None
#
# def get_attribute(self, attname):
# if self.value_list is None:
# return None, -1
# attmap = jit.hint(self.attmap, promote=True)
# index = attmap.get_index(attname)
# if index == -1:
# return None, -1
# return self.value_list[index], index
#
# def reset_field(self, index, value):
# if self.value_list is None:
# self.value_list = [None] * (index + 1)
# else:
# self.value_list = self.value_list + [None] * (
# index - len(self.value_list) + 1)
# self.value_list[index] = value
#
# def get_attribute_index(self, attname):
# attmap = jit.hint(self.attmap, promote=True)
# return attmap.get_index(attname)
#
# def is_empty(self):
# if self.value_list is None:
# return True
# for elem in self.value_list:
# if elem is not None:
# return False
# return True
which might include code, classes, or functions. Output only the next line. | result = BindingVar() |
Continue the code snippet: <|code_start|> created_in = var.created_after_choice_point
if self is created_in: # fast path
return True
if created_in is not None and created_in.discarded:
# unroll _find_not_discarded once for better jittability
created_in = created_in.prev
if created_in is not None and created_in.discarded:
created_in = created_in._find_not_discarded()
var.created_after_choice_point = created_in
return self is created_in
def _double_size(self):
l = len(self.trail_var)
if l == 0:
self.trail_var = [None, None]
self.trail_binding = [None, None]
elif l == 1:
assert 0, "cannot happen"
else:
self.trail_var = self.trail_var + [None] * l
self.trail_binding = self.trail_binding + [None] * l
def newvar(self):
""" Make a new variable. Should return a Var instance, possibly with
interesting attributes set that e.g. add_trail can inspect."""
result = BindingVar()
result.created_after_choice_point = self
return result
def new_attvar(self):
<|code_end|>
. Use current file imports:
from rpython.rlib import debug
from prolog.interpreter.term import BindingVar, AttVar
from rpython.rlib import jit
from prolog.interpreter.term import var_in_term_classes
and context (classes, functions, or code) from other files:
# Path: prolog/interpreter/term.py
# class BindingVar(Var):
# __slots__ = ("binding", "created_after_choice_point")
#
# def __init__(self):
# Var.__init__(self)
# self.binding = None
#
# def getbinding(self):
# return self.binding
#
# def setvalue(self, value, heap):
# heap.add_trail(self)
# self.binding = value
#
# @specialize.arg(4)
# def _unify_potential_recursion(self, next, other, heap, occurs_check):
# assert isinstance(next, NonVar)
# if next is not other:
# if isinstance(other, NonVar):
# self.setvalue(other, heap)
# next._unify_derefed(other, heap, occurs_check)
#
# class AttVar(BindingVar):
# attmap = AttMap()
#
# def __init__(self):
# BindingVar.__init__(self)
# self.value_list = debug.make_sure_not_resized([])
#
# @specialize.arg(3)
# def _unify_derefed(self, other, heap, occurs_check=False):
# if isinstance(other, AttVar):
# if other is not self:
# self.setvalue(other, heap)
# return
# if isinstance(other, Var):
# return other._unify_derefed(self, heap, occurs_check)
# return self.setvalue(other, heap)
#
# def setvalue(self, value, heap):
# if self.value_list is not None:
# heap.add_hook(self)
# BindingVar.setvalue(self, value, heap)
#
# def __repr__(self):
# attrs = []
# attmap = jit.hint(self.attmap, promote=True)
# if self.value_list is not None:
# for key, index in attmap.indexes.iteritems():
# value = self.value_list[index]
# if value is not None:
# attrs.append("%s" % (key, ))
# return "AttVar(%s, %s)" % (self.getbinding(), "[" + ", ".join(attrs) + "]")
#
# def copy(self, heap, memo):
# self = self.dereference(None)
# if isinstance(self, AttVar):
# res = memo.get(self)
# if res is not None:
# return res
# newvar = heap.new_attvar()
# own_list = self.value_list
# newvar.attmap = self.attmap
# if own_list is None:
# newvar.value_list = None
# else:
# length = len(own_list)
# new_values = [None] * length
# for i in range(length):
# if own_list[i] is None:
# new_values[i] = None
# else:
# new_values[i] = own_list[i].copy(heap, memo)
# newvar.value_list = new_values
#
# memo.set(self, newvar)
# return newvar
# return self.copy(heap, memo)
#
# def add_attribute(self, attname, attribute):
# attmap = jit.hint(self.attmap, promote=True)
# index = attmap.get_index(attname)
# if index != -1:
# self.value_list[index] = attribute
# return
# self.attmap = attmap.with_extra_attribute(attname)
# self.value_list = self.value_list + [attribute]
#
# def del_attribute(self, attname):
# attmap = jit.hint(self.attmap, promote=True)
# index = attmap.get_index(attname)
# if self.value_list is not None:
# self.value_list[index] = None
#
# def get_attribute(self, attname):
# if self.value_list is None:
# return None, -1
# attmap = jit.hint(self.attmap, promote=True)
# index = attmap.get_index(attname)
# if index == -1:
# return None, -1
# return self.value_list[index], index
#
# def reset_field(self, index, value):
# if self.value_list is None:
# self.value_list = [None] * (index + 1)
# else:
# self.value_list = self.value_list + [None] * (
# index - len(self.value_list) + 1)
# self.value_list[index] = value
#
# def get_attribute_index(self, attname):
# attmap = jit.hint(self.attmap, promote=True)
# return attmap.get_index(attname)
#
# def is_empty(self):
# if self.value_list is None:
# return True
# for elem in self.value_list:
# if elem is not None:
# return False
# return True
. Output only the next line. | result = AttVar() |
Continue the code snippet: <|code_start|> return {
'--account': None,
'--dependents': [],
'--dump-file': None,
'--exact': [],
'--exclude': [],
'--help': False,
'--no-build': False,
'--upto': [],
'--x-assert-hostname': False,
'-H': None,
'TARGET': [],
'build': False,
'push': False,
'images': False,
'tags': ['latest'],
}
def create_repo(path):
repo = git.Repo.init(path)
repo.index.add(repo.untracked_files)
repo.index.commit('Initial Commit')
return repo
def test_without_json_manifest(tmpdir):
path = str(tmpdir.join('no-manifest'))
create_repo(path)
with pytest.raises(SystemExit):
<|code_end|>
. Use current file imports:
import git
import pytest
from docker import tls
from shipwright._lib import cli
and context (classes, functions, or code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
. Output only the next line. | cli.process_arguments( |
Here is a snippet: <|code_start|>from __future__ import absolute_import
def default_args():
return argparse.Namespace(dirty=False, pull_cache=False)
def test_sample(tmpdir, capsys):
path = str(tmpdir.join('shipwright-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
args = get_defaults()
args['images'] = True
<|code_end|>
. Write the next line using the current file imports:
import argparse
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import create_repo, get_defaults
and context from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
, which may include functions, classes, or code. Output only the next line. | shipw_cli.run( |
Here is a snippet: <|code_start|>from __future__ import absolute_import
def default_args():
return argparse.Namespace(dirty=False, pull_cache=False)
def test_sample(tmpdir, capsys):
path = str(tmpdir.join('shipwright-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
<|code_end|>
. Write the next line using the current file imports:
import argparse
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import create_repo, get_defaults
and context from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
, which may include functions, classes, or code. Output only the next line. | repo = create_repo(path, source) |
Based on the snippet: <|code_start|>from __future__ import absolute_import
def default_args():
return argparse.Namespace(dirty=False, pull_cache=False)
def test_sample(tmpdir, capsys):
path = str(tmpdir.join('shipwright-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import create_repo, get_defaults
and context (classes, functions, sometimes code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | args = get_defaults() |
Here is a snippet: <|code_start|>from __future__ import absolute_import
class Shipwright(object):
def __init__(self, source_control, docker_client, tags, cache):
self.source_control = source_control
self.docker_client = docker_client
self.tags = tags
self._cache = cache
def targets(self):
return self.source_control.targets()
def build(self, build_targets):
targets = dependencies.eval(build_targets, self.targets())
this_ref_str = self.source_control.this_ref_str()
return self._build(this_ref_str, targets)
def _build(self, this_ref_str, targets):
client = self.docker_client
ref = this_ref_str
tags = self.source_control.default_tags() + self.tags + [this_ref_str]
for evt in build.do_build(client, ref, targets, self._cache):
<|code_end|>
. Write the next line using the current file imports:
from . import build, dependencies
from .msg import BuildComplete
and context from other files:
# Path: shipwright/_lib/msg.py
# class BuildComplete(Message, namedtuple('BuildComplete', ['target'])):
# """
# A message to signify that the build has finished for a target.
#
# The build may not have completed succesfully
# """
, which may include functions, classes, or code. Output only the next line. | if isinstance(evt, BuildComplete): |
Continue the code snippet: <|code_start|>from __future__ import absolute_import
def test_docker_push(tmpdir, docker_client, registry):
path = str(tmpdir.join('shipwright-localhost-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-localhost-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
cli = docker_client
defaults = get_defaults()
defaults['push'] = True
try:
<|code_end|>
. Use current file imports:
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (classes, functions, or code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | shipw_cli.run( |
Continue the code snippet: <|code_start|> assert set(base['RepoTags']) == {
'localhost:5000/base:master',
'localhost:5000/base:latest',
'localhost:5000/base:' + tag,
}
finally:
old_images = (
cli.images(name='localhost:5000/service1', quiet=True) +
cli.images(name='localhost:5000/shared', quiet=True) +
cli.images(name='localhost:5000/base', quiet=True)
)
for image in old_images:
cli.remove_image(image, force=True)
def test_docker_push_target_ref(tmpdir, docker_client, registry):
"""
Test that shipwright push includes the target ref of every image.
Otherwise --pull-cache will not work.
"""
tmp = tmpdir.join('shipwright-localhost-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-localhost-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
tmp.join('service1/base.txt').write('Hi mum')
<|code_end|>
. Use current file imports:
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (classes, functions, or code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | commit_untracked(repo) |
Based on the snippet: <|code_start|>from __future__ import absolute_import
def test_docker_push(tmpdir, docker_client, registry):
path = str(tmpdir.join('shipwright-localhost-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-localhost-sample',
)
<|code_end|>
, predict the immediate next line with the help of imports:
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (classes, functions, sometimes code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | repo = create_repo(path, source) |
Predict the next line for this snippet: <|code_start|>
assert set(base['RepoTags']) == {
'localhost:5000/base:master',
'localhost:5000/base:latest',
'localhost:5000/base:' + new_tag,
'localhost:5000/base:' + tag,
}
finally:
old_images = (
cli.images(name='localhost:5000/service1', quiet=True) +
cli.images(name='localhost:5000/shared', quiet=True) +
cli.images(name='localhost:5000/base', quiet=True)
)
for image in old_images:
cli.remove_image(image, force=True)
def test_docker_push_direct_registry(tmpdir, docker_client, registry):
tmp = tmpdir.join('shipwright-localhost-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-localhost-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
cli = docker_client
<|code_end|>
with the help of current file imports:
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
, which may contain function names, class names, or code. Output only the next line. | args = default_args() |
Continue the code snippet: <|code_start|>from __future__ import absolute_import
def test_docker_push(tmpdir, docker_client, registry):
path = str(tmpdir.join('shipwright-localhost-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-localhost-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
cli = docker_client
<|code_end|>
. Use current file imports:
import pkg_resources
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (classes, functions, or code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | defaults = get_defaults() |
Using the snippet: <|code_start|>from __future__ import absolute_import
def test_sample(tmpdir, docker_client):
path = str(tmpdir.join('shipwright-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
cli = docker_client
try:
<|code_end|>
, determine the next line of code. You have imports:
import json
import pkg_resources
import pytest
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (class names, function names, or code) available:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | shipw_cli.run( |
Based on the snippet: <|code_start|> cli.images(name='shipwright/service1', quiet=True) +
cli.images(name='shipwright/base', quiet=True)
)
for image in old_images:
cli.remove_image(image, force=True)
def test_clean_tree_avoids_rebuild(tmpdir, docker_client):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = create_repo(path, source)
old_tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
cli = docker_client
try:
shipw_cli.run(
path=path,
client_cfg=client_cfg,
arguments=get_defaults(),
environ={},
)
tmp.join('service1/base.txt').write('Hi mum')
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import pkg_resources
import pytest
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (classes, functions, sometimes code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | commit_untracked(repo) |
Given the code snippet: <|code_start|>from __future__ import absolute_import
def test_sample(tmpdir, docker_client):
path = str(tmpdir.join('shipwright-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
<|code_end|>
, generate the next line using the imports in this file:
import json
import pkg_resources
import pytest
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (functions, classes, or occasionally code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | repo = create_repo(path, source) |
Given snippet: <|code_start|> )
create_repo(path, source)
tmp.join('service1/base.txt').write('Some text')
client_cfg = docker_utils.kwargs_from_env()
args = get_defaults()
result = shipw_cli.run(
path=path,
client_cfg=client_cfg,
arguments=args,
environ={},
)
assert '--dirty' in result
assert 'Abort' in result
def test_dirty_flag(tmpdir, docker_client):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
create_repo(path, source)
tmp.join('service1/base.txt').write('Some text')
client_cfg = docker_utils.kwargs_from_env()
cli = docker_client
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
import pkg_resources
import pytest
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
which might include code, classes, or functions. Output only the next line. | args = default_args() |
Given the code snippet: <|code_start|>from __future__ import absolute_import
def test_sample(tmpdir, docker_client):
path = str(tmpdir.join('shipwright-sample'))
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = create_repo(path, source)
tag = repo.head.ref.commit.hexsha[:12]
client_cfg = docker_utils.kwargs_from_env()
cli = docker_client
try:
shipw_cli.run(
path=path,
client_cfg=client_cfg,
<|code_end|>
, generate the next line using the imports in this file:
import json
import pkg_resources
import pytest
from docker import utils as docker_utils
from shipwright._lib import cli as shipw_cli
from .utils import commit_untracked, create_repo, default_args, get_defaults
and context (functions, classes, or occasionally code) from other files:
# Path: shipwright/_lib/cli.py
# def argparser():
# def a_arg(parser, *args, **kwargs):
# def parse_registry_logins(registry_logins):
# def _flatten(items):
# def old_style_arg_dict(namespace):
# def main():
# def process_arguments(path, arguments, client_cfg, environ):
# def default(self, obj):
# def run(path, arguments, client_cfg, environ, new_style_args=None):
# def exit(msg):
# def memo(f, arg, memos={}):
# def pretty_event(evt, show_progress):
# def highlight(name):
# def highlight_(msg):
# def switch(rec, show_progress):
# class SetJSONEncoder(json.JSONEncoder):
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
#
# def default_args():
# return argparse.Namespace(
# dirty=False,
# pull_cache=False,
# registry_login=[],
# )
#
# def get_defaults():
# return {
# '--account': None,
# '--dependents': [],
# '--dump-file': None,
# '--exact': [],
# '--exclude': [],
# '--help': False,
# '--no-build': False,
# '--dirty': False,
# '--upto': [],
# '--x-assert-hostname': False,
# '-H': None,
# 'TARGET': [],
# 'build': False,
# 'push': False,
# 'images': False,
# 'tags': ['latest'],
# }
. Output only the next line. | arguments=get_defaults(), |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import
def test_mkcontext(tmpdir):
tmp = tmpdir.mkdir('image')
docker_path = tmp.join('Dockerfile')
docker_path.write('FROM example.com/r/image')
tmp.join('bogus').write('hi mom')
<|code_end|>
with the help of current file imports:
import tarfile
from shipwright._lib import tar
and context from other files:
# Path: shipwright/_lib/tar.py
# RE_DOCKER_TAG = re.compile(
# r'^(?P<from>\s*from\s+)'
# r'(?P<registry>[\w.-]+(?P<port>:\d+)?(?P<path>([\w.-]+/)+|/))?'
# r'(?P<name>[\w.-]+)'
# r'(?P<whitespace>(\s*))$',
# flags=re.MULTILINE | re.IGNORECASE,
# )
# def bundle_docker_dir(tag, docker_path):
# def tag_parent(tag, docker_content):
# def mkcontext(tag, docker_path):
, which may contain function names, class names, or code. Output only the next line. | with tar.mkcontext('xyz', str(docker_path)) as f: |
Here is a snippet: <|code_start|> registry_logins = _flatten(new_style_args.registry_login)
namespace = config['namespace']
name_map = config.get('names', {})
scm = source_control.source_control(path, namespace, name_map)
if not dirty and scm.is_dirty():
return (
'Aborting build, due to uncommitted changes. If you are not ready '
'to commit these changes, re-run with the --dirty flag.'
)
if registry_logins:
if isinstance(drc, Exception):
raise drc
registry_config = parse_registry_logins(registry_logins)
registries = {}
for server, config in registry_config.items():
registries[server] = drc.BaseClient(
config['server'],
username=config['username'],
password=config['password'],
api_version=2,
)
the_cache = cache.DirectRegistry(client, registry.Registry(registries))
elif pull_cache:
the_cache = cache.Cache(client)
else:
the_cache = cache.NoCache(client)
<|code_end|>
. Write the next line using the current file imports:
import argparse
import collections
import json
import os
import re
import shlex
import sys
import docker
import docker_registry_client as drc
from itertools import chain, cycle
from docker.utils import kwargs_from_env
from . import cache, registry, source_control
from .base import Shipwright
from .colors import rainbow
from .msg import Message
and context from other files:
# Path: shipwright/_lib/base.py
# class Shipwright(object):
# def __init__(self, source_control, docker_client, tags, cache):
# self.source_control = source_control
# self.docker_client = docker_client
# self.tags = tags
# self._cache = cache
#
# def targets(self):
# return self.source_control.targets()
#
# def build(self, build_targets):
# targets = dependencies.eval(build_targets, self.targets())
# this_ref_str = self.source_control.this_ref_str()
# return self._build(this_ref_str, targets)
#
# def _build(self, this_ref_str, targets):
# client = self.docker_client
# ref = this_ref_str
# tags = self.source_control.default_tags() + self.tags + [this_ref_str]
# for evt in build.do_build(client, ref, targets, self._cache):
# if isinstance(evt, BuildComplete):
# target = evt.target
# for tag_evt in self._cache.tag([target], tags):
# yield tag_evt
# yield evt
#
# def images(self, build_targets):
# for target in dependencies.eval(build_targets, self.targets()):
# yield {
# 'stream': '{t.name}:{t.ref}'.format(t=target),
# 'event': 'log',
# }
#
# def push(self, build_targets, no_build=False):
# """
# Pushes the latest images to the repository.
# """
# targets = dependencies.eval(build_targets, self.targets())
# this_ref_str = self.source_control.this_ref_str()
# tags = self.source_control.default_tags() + self.tags + [this_ref_str]
#
# if no_build:
# for evt in self._cache.push(targets, tags):
# yield evt
# return
#
# for evt in self._build(this_ref_str, targets):
# if isinstance(evt, BuildComplete):
# for push_evt in self._cache.push([evt.target], tags):
# yield push_evt
# yield evt
#
# Path: shipwright/_lib/msg.py
# class Message(object):
# """
# Base class for all other messages
# """
, which may include functions, classes, or code. Output only the next line. | sw = Shipwright(scm, client, arguments['tags'], the_cache) |
Here is a snippet: <|code_start|> raise drc
registry_config = parse_registry_logins(registry_logins)
registries = {}
for server, config in registry_config.items():
registries[server] = drc.BaseClient(
config['server'],
username=config['username'],
password=config['password'],
api_version=2,
)
the_cache = cache.DirectRegistry(client, registry.Registry(registries))
elif pull_cache:
the_cache = cache.Cache(client)
else:
the_cache = cache.NoCache(client)
sw = Shipwright(scm, client, arguments['tags'], the_cache)
command = getattr(sw, command_name)
show_progress = sys.stdout.isatty()
errors = []
if no_build:
events = command(build_targets, no_build)
else:
events = command(build_targets)
for event in events:
<|code_end|>
. Write the next line using the current file imports:
import argparse
import collections
import json
import os
import re
import shlex
import sys
import docker
import docker_registry_client as drc
from itertools import chain, cycle
from docker.utils import kwargs_from_env
from . import cache, registry, source_control
from .base import Shipwright
from .colors import rainbow
from .msg import Message
and context from other files:
# Path: shipwright/_lib/base.py
# class Shipwright(object):
# def __init__(self, source_control, docker_client, tags, cache):
# self.source_control = source_control
# self.docker_client = docker_client
# self.tags = tags
# self._cache = cache
#
# def targets(self):
# return self.source_control.targets()
#
# def build(self, build_targets):
# targets = dependencies.eval(build_targets, self.targets())
# this_ref_str = self.source_control.this_ref_str()
# return self._build(this_ref_str, targets)
#
# def _build(self, this_ref_str, targets):
# client = self.docker_client
# ref = this_ref_str
# tags = self.source_control.default_tags() + self.tags + [this_ref_str]
# for evt in build.do_build(client, ref, targets, self._cache):
# if isinstance(evt, BuildComplete):
# target = evt.target
# for tag_evt in self._cache.tag([target], tags):
# yield tag_evt
# yield evt
#
# def images(self, build_targets):
# for target in dependencies.eval(build_targets, self.targets()):
# yield {
# 'stream': '{t.name}:{t.ref}'.format(t=target),
# 'event': 'log',
# }
#
# def push(self, build_targets, no_build=False):
# """
# Pushes the latest images to the repository.
# """
# targets = dependencies.eval(build_targets, self.targets())
# this_ref_str = self.source_control.this_ref_str()
# tags = self.source_control.default_tags() + self.tags + [this_ref_str]
#
# if no_build:
# for evt in self._cache.push(targets, tags):
# yield evt
# return
#
# for evt in self._build(this_ref_str, targets):
# if isinstance(evt, BuildComplete):
# for push_evt in self._cache.push([evt.target], tags):
# yield push_evt
# yield evt
#
# Path: shipwright/_lib/msg.py
# class Message(object):
# """
# Base class for all other messages
# """
, which may include functions, classes, or code. Output only the next line. | if isinstance(event, Message): |
Based on the snippet: <|code_start|>
def do_build(client, build_ref, targets, cache):
"""
Generic function for building multiple images while
notifying a callback function with output produced.
Given a list of targets it builds the target with the given
build_func while streaming the output through the given
show_func.
Returns an iterator of (image, docker_image_id) pairs as
the final output.
Building an image can take sometime so the results are returned as
an iterator in case the caller wants to use restults in between builds.
The consequences of this is you must either call it as part of a for loop
or pass it to a function like list() which can consume an iterator.
"""
build_index = {t.image.name: t.ref for t in targets}
for target in targets:
parent_ref = None
if target.parent:
parent_ref = build_index.get(target.parent)
for evt in build(client, parent_ref, target, cache):
yield evt
<|code_end|>
, predict the immediate next line with the help of imports:
from . import docker
from .msg import BuildComplete
and context (classes, functions, sometimes code) from other files:
# Path: shipwright/_lib/msg.py
# class BuildComplete(Message, namedtuple('BuildComplete', ['target'])):
# """
# A message to signify that the build has finished for a target.
#
# The build may not have completed succesfully
# """
. Output only the next line. | yield BuildComplete(target) |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import absolute_import
def test_default_tags_works_with_detached_head(tmpdir):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = create_repo(path, source)
old_commit = repo.head.ref.commit
tmp.join('service1/base.txt').write('Hi mum')
commit_untracked(repo)
repo.head.reference = old_commit
<|code_end|>
, predict the next line using imports from the current file:
import pkg_resources
import pytest
from shipwright._lib import source_control
from .utils import commit_untracked, create_repo
and context including class names, function names, and sometimes code from other files:
# Path: shipwright/_lib/source_control.py
# def source_control(path, namespace, name_map, mode=None):
# if mode is None:
# mode = AUTO
# assert isinstance(mode, Mode)
# the_mode = mode if mode is not AUTO else get_mode(path)
# for cls in SourceControl.__subclasses__():
# if cls.mode is the_mode:
# return cls(path, namespace, name_map)
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
. Output only the next line. | scm = source_control.GitSourceControl( |
Based on the snippet: <|code_start|>from __future__ import absolute_import
def test_default_tags_works_with_detached_head(tmpdir):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = create_repo(path, source)
old_commit = repo.head.ref.commit
tmp.join('service1/base.txt').write('Hi mum')
<|code_end|>
, predict the immediate next line with the help of imports:
import pkg_resources
import pytest
from shipwright._lib import source_control
from .utils import commit_untracked, create_repo
and context (classes, functions, sometimes code) from other files:
# Path: shipwright/_lib/source_control.py
# def source_control(path, namespace, name_map, mode=None):
# if mode is None:
# mode = AUTO
# assert isinstance(mode, Mode)
# the_mode = mode if mode is not AUTO else get_mode(path)
# for cls in SourceControl.__subclasses__():
# if cls.mode is the_mode:
# return cls(path, namespace, name_map)
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
. Output only the next line. | commit_untracked(repo) |
Using the snippet: <|code_start|>from __future__ import absolute_import
def test_default_tags_works_with_detached_head(tmpdir):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
<|code_end|>
, determine the next line of code. You have imports:
import pkg_resources
import pytest
from shipwright._lib import source_control
from .utils import commit_untracked, create_repo
and context (class names, function names, or code) available:
# Path: shipwright/_lib/source_control.py
# def source_control(path, namespace, name_map, mode=None):
# if mode is None:
# mode = AUTO
# assert isinstance(mode, Mode)
# the_mode = mode if mode is not AUTO else get_mode(path)
# for cls in SourceControl.__subclasses__():
# if cls.mode is the_mode:
# return cls(path, namespace, name_map)
#
# Path: tests/integration/utils.py
# def commit_untracked(repo, message='WIP'):
# repo.index.add(repo.untracked_files)
# repo.index.commit(message)
#
# def create_repo(path, source=None):
# if source is not None:
# shutil.copytree(source, path)
# repo = git.Repo.init(path)
# repo.index.add(repo.untracked_files)
# repo.index.commit('Initial Commit')
# return repo
. Output only the next line. | repo = create_repo(path, source) |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import
def names_list(targets):
return sorted(n.name for n in targets)
def _names(tree):
<|code_end|>
with the help of current file imports:
from shipwright._lib import dependencies, image, source_control
and context from other files:
# Path: shipwright/_lib/dependencies.py
# def eval(build_targets, targets):
# def _find(tree, name):
# def find_(loc):
# def _make_tree(images):
# def is_child(target):
# def _replace(node, children):
# def _children(item):
# def _is_branch(item):
# def _make_node(node, children):
# def _breadth_first_iter(loc):
# def _lineage(loc):
# def _split(f, children):
# def _brood(loc):
# def _upto(target, tree):
# def _dependents(target, tree):
# def _exact(target, tree):
# def _exclude(target, tree):
#
# Path: shipwright/_lib/image.py
# def list_images(namespace, name_map, root_path):
# def image_name(namespace, name_map, root_path, path):
# def build_files(build_root):
# def name(docker_path):
# def sub_if(ex, repl, string, flags):
# def parse_copy(cmd):
# def copy_paths(docker_path):
# def join(path):
# def copy_paths_gen():
# def parent(docker_path):
#
# Path: shipwright/_lib/source_control.py
# def source_control(path, namespace, name_map, mode=None):
# if mode is None:
# mode = AUTO
# assert isinstance(mode, Mode)
# the_mode = mode if mode is not AUTO else get_mode(path)
# for cls in SourceControl.__subclasses__():
# if cls.mode is the_mode:
# return cls(path, namespace, name_map)
, which may contain function names, class names, or code. Output only the next line. | return [n.name for n in dependencies._brood(tree)] |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import
def names_list(targets):
return sorted(n.name for n in targets)
def _names(tree):
return [n.name for n in dependencies._brood(tree)]
def target(name, dir_path, path, parent):
return source_control.Target(
<|code_end|>
with the help of current file imports:
from shipwright._lib import dependencies, image, source_control
and context from other files:
# Path: shipwright/_lib/dependencies.py
# def eval(build_targets, targets):
# def _find(tree, name):
# def find_(loc):
# def _make_tree(images):
# def is_child(target):
# def _replace(node, children):
# def _children(item):
# def _is_branch(item):
# def _make_node(node, children):
# def _breadth_first_iter(loc):
# def _lineage(loc):
# def _split(f, children):
# def _brood(loc):
# def _upto(target, tree):
# def _dependents(target, tree):
# def _exact(target, tree):
# def _exclude(target, tree):
#
# Path: shipwright/_lib/image.py
# def list_images(namespace, name_map, root_path):
# def image_name(namespace, name_map, root_path, path):
# def build_files(build_root):
# def name(docker_path):
# def sub_if(ex, repl, string, flags):
# def parse_copy(cmd):
# def copy_paths(docker_path):
# def join(path):
# def copy_paths_gen():
# def parent(docker_path):
#
# Path: shipwright/_lib/source_control.py
# def source_control(path, namespace, name_map, mode=None):
# if mode is None:
# mode = AUTO
# assert isinstance(mode, Mode)
# the_mode = mode if mode is not AUTO else get_mode(path)
# for cls in SourceControl.__subclasses__():
# if cls.mode is the_mode:
# return cls(path, namespace, name_map)
, which may contain function names, class names, or code. Output only the next line. | image.Image(name, dir_path, path, parent, name, frozenset([path])), |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import absolute_import
def names_list(targets):
return sorted(n.name for n in targets)
def _names(tree):
return [n.name for n in dependencies._brood(tree)]
def target(name, dir_path, path, parent):
<|code_end|>
, predict the next line using imports from the current file:
from shipwright._lib import dependencies, image, source_control
and context including class names, function names, and sometimes code from other files:
# Path: shipwright/_lib/dependencies.py
# def eval(build_targets, targets):
# def _find(tree, name):
# def find_(loc):
# def _make_tree(images):
# def is_child(target):
# def _replace(node, children):
# def _children(item):
# def _is_branch(item):
# def _make_node(node, children):
# def _breadth_first_iter(loc):
# def _lineage(loc):
# def _split(f, children):
# def _brood(loc):
# def _upto(target, tree):
# def _dependents(target, tree):
# def _exact(target, tree):
# def _exclude(target, tree):
#
# Path: shipwright/_lib/image.py
# def list_images(namespace, name_map, root_path):
# def image_name(namespace, name_map, root_path, path):
# def build_files(build_root):
# def name(docker_path):
# def sub_if(ex, repl, string, flags):
# def parse_copy(cmd):
# def copy_paths(docker_path):
# def join(path):
# def copy_paths_gen():
# def parent(docker_path):
#
# Path: shipwright/_lib/source_control.py
# def source_control(path, namespace, name_map, mode=None):
# if mode is None:
# mode = AUTO
# assert isinstance(mode, Mode)
# the_mode = mode if mode is not AUTO else get_mode(path)
# for cls in SourceControl.__subclasses__():
# if cls.mode is the_mode:
# return cls(path, namespace, name_map)
. Output only the next line. | return source_control.Target( |
Next line prediction: <|code_start|>def test_extra_tags(tmpdir):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = utils.create_repo(path, source)
commit = repo.head.ref.commit.hexsha[:12]
assert targets.targets(path=path, upto=['shared'], tags=['ham/spam']) == [
'shipwright/base:' + commit,
'shipwright/base:ham-spam',
'shipwright/base:master',
'shipwright/base:' + commit,
'shipwright/shared:' + commit,
'shipwright/shared:ham-spam',
'shipwright/shared:master',
'shipwright/shared:' + commit,
]
def test_no_repo(tmpdir):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
utils.create_repo(path, source)
tmp.join('.git').remove(rec=1)
<|code_end|>
. Use current file imports:
(import pkg_resources
import pytest
from shipwright import exceptions, targets
from . import utils)
and context including class names, function names, or small code snippets from other files:
# Path: shipwright/exceptions.py
#
# Path: shipwright/targets.py
. Output only the next line. | with pytest.raises(exceptions.SourceControlNotFound): |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import absolute_import
def test_simple(tmpdir):
tmp = tmpdir.join('shipwright-sample')
path = str(tmp)
source = pkg_resources.resource_filename(
__name__,
'examples/shipwright-sample',
)
repo = utils.create_repo(path, source)
commit = repo.head.ref.commit.hexsha[:12]
<|code_end|>
, predict the next line using imports from the current file:
import pkg_resources
import pytest
from shipwright import exceptions, targets
from . import utils
and context including class names, function names, and sometimes code from other files:
# Path: shipwright/exceptions.py
#
# Path: shipwright/targets.py
. Output only the next line. | assert targets.targets(path=path) == [ |
Based on the snippet: <|code_start|> >>> root = Node("root")
>>> s1 = Node("sub1", parent=root, bar=17)
>>> l = SymlinkNode(s1, parent=root, baz=18)
>>> l0 = Node("l0", parent=l)
>>> print(RenderTree(root))
Node('/root')
βββ Node('/root/sub1', bar=17, baz=18)
βββ SymlinkNode(Node('/root/sub1', bar=17, baz=18))
βββ Node('/root/sub1/l0')
Any modifications on the target node are also available on the linked node and vice-versa:
>>> s1.foo = 4
>>> s1.foo
4
>>> l.foo
4
>>> l.foo = 9
>>> s1.foo
9
>>> l.foo
9
"""
self.target = target
self.target.__dict__.update(kwargs)
self.parent = parent
if children:
self.children = children
def __repr__(self):
<|code_end|>
, predict the immediate next line with the help of imports:
from .symlinknodemixin import SymlinkNodeMixin
from .util import _repr
and context (classes, functions, sometimes code) from other files:
# Path: anytree/node/symlinknodemixin.py
# class SymlinkNodeMixin(NodeMixin):
# u"""
# The :any:`SymlinkNodeMixin` class extends any Python class to a symbolic link to a tree node.
#
# The class **MUST** have a `target` attribute refering to another tree node.
# The :any:`SymlinkNodeMixin` class has its own parent and its own child nodes.
# All other attribute accesses are just forwarded to the target node.
# A minimal implementation looks like (see :any:`SymlinkNode` for a full implemenation):
#
# >>> from anytree import SymlinkNodeMixin, Node, RenderTree
# >>> class SymlinkNode(SymlinkNodeMixin):
# ... def __init__(self, target, parent=None, children=None):
# ... self.target = target
# ... self.parent = parent
# ... if children:
# ... self.children = children
# ... def __repr__(self):
# ... return "SymlinkNode(%r)" % (self.target)
#
# >>> root = Node("root")
# >>> s1 = Node("sub1", parent=root)
# >>> l = SymlinkNode(s1, parent=root)
# >>> l0 = Node("l0", parent=l)
# >>> print(RenderTree(root))
# Node('/root')
# βββ Node('/root/sub1')
# βββ SymlinkNode(Node('/root/sub1'))
# βββ Node('/root/sub1/l0')
#
# Any modifications on the target node are also available on the linked node and vice-versa:
#
# >>> s1.foo = 4
# >>> s1.foo
# 4
# >>> l.foo
# 4
# >>> l.foo = 9
# >>> s1.foo
# 9
# >>> l.foo
# 9
# """
#
# def __getattr__(self, name):
# if name in ('_NodeMixin__parent', '_NodeMixin__children'):
# return super(SymlinkNodeMixin, self).__getattr__(name)
# else:
# return getattr(self.target, name)
#
# def __setattr__(self, name, value):
# if name in ('_NodeMixin__parent', '_NodeMixin__children', 'parent', 'children', 'target'):
# super(SymlinkNodeMixin, self).__setattr__(name, value)
# else:
# return setattr(self.target, name, value)
#
# Path: anytree/node/util.py
# def _repr(node, args=None, nameblacklist=None):
# classname = node.__class__.__name__
# args = args or []
# nameblacklist = nameblacklist or []
# for key, value in filter(lambda item: not item[0].startswith("_") and item[0] not in nameblacklist,
# sorted(node.__dict__.items(),
# key=lambda item: item[0])):
# args.append("%s=%r" % (key, value))
# return "%s(%s)" % (classname, ", ".join(args))
. Output only the next line. | return _repr(self, [repr(self.target)], nameblacklist=("target", )) |
Given the code snippet: <|code_start|>
@property
def anchestors(self):
"""
All parent nodes and their parent nodes - see :any:`ancestors`.
The attribute `anchestors` is just a typo of `ancestors`. Please use `ancestors`.
This attribute will be removed in the 3.0.0 release.
"""
warnings.warn(".anchestors was a typo and will be removed in version 3.0.0", DeprecationWarning)
return self.ancestors
@property
def descendants(self):
"""
All child nodes and all their child nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> soe = Node("Soe", parent=lian)
>>> udo.descendants
(Node('/Udo/Marc'), Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> marc.descendants
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> lian.descendants
(Node('/Udo/Marc/Lian/Soe'),)
"""
<|code_end|>
, generate the next line using the imports in this file:
import warnings
from anytree.iterators import PreOrderIter
from .exceptions import LoopError
from .exceptions import TreeError
and context (functions, classes, or occasionally code) from other files:
# Path: anytree/iterators/preorderiter.py
# class PreOrderIter(AbstractIter):
# """
# Iterate over tree applying pre-order strategy starting at `node`.
#
# Start at root and go-down until reaching a leaf node.
# Step upwards then, and search for the next leafs.
#
# >>> from anytree import Node, RenderTree, AsciiStyle, PreOrderIter
# >>> f = Node("f")
# >>> b = Node("b", parent=f)
# >>> a = Node("a", parent=b)
# >>> d = Node("d", parent=b)
# >>> c = Node("c", parent=d)
# >>> e = Node("e", parent=d)
# >>> g = Node("g", parent=f)
# >>> i = Node("i", parent=g)
# >>> h = Node("h", parent=i)
# >>> print(RenderTree(f, style=AsciiStyle()).by_attr())
# f
# |-- b
# | |-- a
# | +-- d
# | |-- c
# | +-- e
# +-- g
# +-- i
# +-- h
# >>> [node.name for node in PreOrderIter(f)]
# ['f', 'b', 'a', 'd', 'c', 'e', 'g', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, maxlevel=3)]
# ['f', 'b', 'a', 'd', 'g', 'i']
# >>> [node.name for node in PreOrderIter(f, filter_=lambda n: n.name not in ('e', 'g'))]
# ['f', 'b', 'a', 'd', 'c', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, stop=lambda n: n.name == 'd')]
# ['f', 'b', 'a', 'g', 'i', 'h']
# """
#
# @staticmethod
# def _iter(children, filter_, stop, maxlevel):
# for child_ in children:
# if stop(child_):
# continue
# if filter_(child_):
# yield child_
# if not AbstractIter._abort_at_level(2, maxlevel):
# descendantmaxlevel = maxlevel - 1 if maxlevel else None
# for descendant_ in PreOrderIter._iter(child_.children, filter_, stop, descendantmaxlevel):
# yield descendant_
#
# Path: anytree/node/exceptions.py
# class LoopError(TreeError):
# """Tree contains infinite loop."""
#
# pass
#
# Path: anytree/node/exceptions.py
# class TreeError(RuntimeError):
# """Tree Error."""
#
# pass
. Output only the next line. | return tuple(PreOrderIter(self))[1:] |
Here is a snippet: <|code_start|>
>>> marc.is_root
False
>>> marc.parent = None
>>> marc.is_root
True
"""
try:
return self.__parent
except AttributeError:
return None
@parent.setter
def parent(self, value):
if value is not None and not isinstance(value, NodeMixin):
msg = "Parent node %r is not of type 'NodeMixin'." % (value, )
raise TreeError(msg)
try:
parent = self.__parent
except AttributeError:
parent = None
if parent is not value:
self.__check_loop(value)
self.__detach(parent)
self.__attach(value)
def __check_loop(self, node):
if node is not None:
if node is self:
msg = "Cannot set parent. %r cannot be parent of itself."
<|code_end|>
. Write the next line using the current file imports:
import warnings
from anytree.iterators import PreOrderIter
from .exceptions import LoopError
from .exceptions import TreeError
and context from other files:
# Path: anytree/iterators/preorderiter.py
# class PreOrderIter(AbstractIter):
# """
# Iterate over tree applying pre-order strategy starting at `node`.
#
# Start at root and go-down until reaching a leaf node.
# Step upwards then, and search for the next leafs.
#
# >>> from anytree import Node, RenderTree, AsciiStyle, PreOrderIter
# >>> f = Node("f")
# >>> b = Node("b", parent=f)
# >>> a = Node("a", parent=b)
# >>> d = Node("d", parent=b)
# >>> c = Node("c", parent=d)
# >>> e = Node("e", parent=d)
# >>> g = Node("g", parent=f)
# >>> i = Node("i", parent=g)
# >>> h = Node("h", parent=i)
# >>> print(RenderTree(f, style=AsciiStyle()).by_attr())
# f
# |-- b
# | |-- a
# | +-- d
# | |-- c
# | +-- e
# +-- g
# +-- i
# +-- h
# >>> [node.name for node in PreOrderIter(f)]
# ['f', 'b', 'a', 'd', 'c', 'e', 'g', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, maxlevel=3)]
# ['f', 'b', 'a', 'd', 'g', 'i']
# >>> [node.name for node in PreOrderIter(f, filter_=lambda n: n.name not in ('e', 'g'))]
# ['f', 'b', 'a', 'd', 'c', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, stop=lambda n: n.name == 'd')]
# ['f', 'b', 'a', 'g', 'i', 'h']
# """
#
# @staticmethod
# def _iter(children, filter_, stop, maxlevel):
# for child_ in children:
# if stop(child_):
# continue
# if filter_(child_):
# yield child_
# if not AbstractIter._abort_at_level(2, maxlevel):
# descendantmaxlevel = maxlevel - 1 if maxlevel else None
# for descendant_ in PreOrderIter._iter(child_.children, filter_, stop, descendantmaxlevel):
# yield descendant_
#
# Path: anytree/node/exceptions.py
# class LoopError(TreeError):
# """Tree contains infinite loop."""
#
# pass
#
# Path: anytree/node/exceptions.py
# class TreeError(RuntimeError):
# """Tree Error."""
#
# pass
, which may include functions, classes, or code. Output only the next line. | raise LoopError(msg % (self, )) |
Given the following code snippet before the placeholder: <|code_start|> Node('/Marc')
βββ Node('/Marc/Lian')
**Attach**
>>> marc.parent = udo
>>> print(RenderTree(udo))
Node('/Udo')
βββ Node('/Udo/Marc')
βββ Node('/Udo/Marc/Lian')
**Detach**
To make a node to a root node, just set this attribute to `None`.
>>> marc.is_root
False
>>> marc.parent = None
>>> marc.is_root
True
"""
try:
return self.__parent
except AttributeError:
return None
@parent.setter
def parent(self, value):
if value is not None and not isinstance(value, NodeMixin):
msg = "Parent node %r is not of type 'NodeMixin'." % (value, )
<|code_end|>
, predict the next line using imports from the current file:
import warnings
from anytree.iterators import PreOrderIter
from .exceptions import LoopError
from .exceptions import TreeError
and context including class names, function names, and sometimes code from other files:
# Path: anytree/iterators/preorderiter.py
# class PreOrderIter(AbstractIter):
# """
# Iterate over tree applying pre-order strategy starting at `node`.
#
# Start at root and go-down until reaching a leaf node.
# Step upwards then, and search for the next leafs.
#
# >>> from anytree import Node, RenderTree, AsciiStyle, PreOrderIter
# >>> f = Node("f")
# >>> b = Node("b", parent=f)
# >>> a = Node("a", parent=b)
# >>> d = Node("d", parent=b)
# >>> c = Node("c", parent=d)
# >>> e = Node("e", parent=d)
# >>> g = Node("g", parent=f)
# >>> i = Node("i", parent=g)
# >>> h = Node("h", parent=i)
# >>> print(RenderTree(f, style=AsciiStyle()).by_attr())
# f
# |-- b
# | |-- a
# | +-- d
# | |-- c
# | +-- e
# +-- g
# +-- i
# +-- h
# >>> [node.name for node in PreOrderIter(f)]
# ['f', 'b', 'a', 'd', 'c', 'e', 'g', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, maxlevel=3)]
# ['f', 'b', 'a', 'd', 'g', 'i']
# >>> [node.name for node in PreOrderIter(f, filter_=lambda n: n.name not in ('e', 'g'))]
# ['f', 'b', 'a', 'd', 'c', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, stop=lambda n: n.name == 'd')]
# ['f', 'b', 'a', 'g', 'i', 'h']
# """
#
# @staticmethod
# def _iter(children, filter_, stop, maxlevel):
# for child_ in children:
# if stop(child_):
# continue
# if filter_(child_):
# yield child_
# if not AbstractIter._abort_at_level(2, maxlevel):
# descendantmaxlevel = maxlevel - 1 if maxlevel else None
# for descendant_ in PreOrderIter._iter(child_.children, filter_, stop, descendantmaxlevel):
# yield descendant_
#
# Path: anytree/node/exceptions.py
# class LoopError(TreeError):
# """Tree contains infinite loop."""
#
# pass
#
# Path: anytree/node/exceptions.py
# class TreeError(RuntimeError):
# """Tree Error."""
#
# pass
. Output only the next line. | raise TreeError(msg) |
Next line prediction: <|code_start|> ... "children": [
... {
... "a": "sub0",
... "children": [
... {
... "a": "sub0A",
... "b": "foo"
... },
... {
... "a": "sub0B"
... }
... ]
... },
... {
... "a": "sub1"
... }
... ]
... }'''
>>> root = importer.import_(data)
>>> print(RenderTree(root))
AnyNode(a='root')
βββ AnyNode(a='sub0')
β βββ AnyNode(a='sub0A', b='foo')
β βββ AnyNode(a='sub0B')
βββ AnyNode(a='sub1')
"""
self.dictimporter = dictimporter
self.kwargs = kwargs
def __import(self, data):
<|code_end|>
. Use current file imports:
(import json
from .dictimporter import DictImporter)
and context including class names, function names, or small code snippets from other files:
# Path: anytree/importer/dictimporter.py
# class DictImporter(object):
#
# def __init__(self, nodecls=AnyNode):
# u"""
# Import Tree from dictionary.
#
# Every dictionary is converted to an instance of `nodecls`.
# The dictionaries listed in the children attribute are converted
# likewise and added as children.
#
# Keyword Args:
# nodecls: class used for nodes.
#
# >>> from anytree.importer import DictImporter
# >>> from anytree import RenderTree
# >>> importer = DictImporter()
# >>> data = {
# ... 'a': 'root',
# ... 'children': [{'a': 'sub0',
# ... 'children': [{'a': 'sub0A', 'b': 'foo'}, {'a': 'sub0B'}]},
# ... {'a': 'sub1'}]}
# >>> root = importer.import_(data)
# >>> print(RenderTree(root))
# AnyNode(a='root')
# βββ AnyNode(a='sub0')
# β βββ AnyNode(a='sub0A', b='foo')
# β βββ AnyNode(a='sub0B')
# βββ AnyNode(a='sub1')
# """
# self.nodecls = nodecls
#
# def import_(self, data):
# """Import tree from `data`."""
# return self.__import(data)
#
# def __import(self, data, parent=None):
# assert isinstance(data, dict)
# assert "parent" not in data
# attrs = dict(data)
# children = attrs.pop("children", [])
# node = self.nodecls(parent=parent, **attrs)
# for child in children:
# self.__import(child, parent=node)
# return node
. Output only the next line. | dictimporter = self.dictimporter or DictImporter() |
Given the code snippet: <|code_start|> >>> print(exporter.export(root))
{
"a": "root",
"children": [
{
"a": "sub0",
"children": [
{
"a": "sub0A",
"b": "foo"
},
{
"a": "sub0B"
}
]
},
{
"a": "sub1"
}
]
}
.. note:: Whenever the json output does not meet your expections, see the :any:`json` documentation.
For instance, if you have unicode/ascii issues, please try `JsonExporter(..., ensure_ascii=False)`.
"""
self.dictexporter = dictexporter
self.maxlevel = maxlevel
self.kwargs = kwargs
def _export(self, node):
<|code_end|>
, generate the next line using the imports in this file:
import json
from .dictexporter import DictExporter
and context (functions, classes, or occasionally code) from other files:
# Path: anytree/exporter/dictexporter.py
# class DictExporter(object):
#
# def __init__(self, dictcls=dict, attriter=None, childiter=list, maxlevel=None):
# """
# Tree to dictionary exporter.
#
# Every node is converted to a dictionary with all instance
# attributes as key-value pairs.
# Child nodes are exported to the children attribute.
# A list of dictionaries.
#
# Keyword Args:
# dictcls: class used as dictionary. :any:`dict` by default.
# attriter: attribute iterator for sorting and/or filtering.
# childiter: child iterator for sorting and/or filtering.
# maxlevel (int): Limit export to this number of levels.
#
# >>> from pprint import pprint # just for nice printing
# >>> from anytree import AnyNode
# >>> from anytree.exporter import DictExporter
# >>> root = AnyNode(a="root")
# >>> s0 = AnyNode(a="sub0", parent=root)
# >>> s0a = AnyNode(a="sub0A", b="foo", parent=s0)
# >>> s0b = AnyNode(a="sub0B", parent=s0)
# >>> s1 = AnyNode(a="sub1", parent=root)
#
# >>> exporter = DictExporter()
# >>> pprint(exporter.export(root)) # order within dictionary might vary!
# {'a': 'root',
# 'children': [{'a': 'sub0',
# 'children': [{'a': 'sub0A', 'b': 'foo'}, {'a': 'sub0B'}]},
# {'a': 'sub1'}]}
#
# Pythons dictionary `dict` does not preserve order.
# :any:`collections.OrderedDict` does.
# In this case attributes can be ordered via `attriter`.
#
# >>> from collections import OrderedDict
# >>> exporter = DictExporter(dictcls=OrderedDict, attriter=sorted)
# >>> pprint(exporter.export(root))
# OrderedDict([('a', 'root'),
# ('children',
# [OrderedDict([('a', 'sub0'),
# ('children',
# [OrderedDict([('a', 'sub0A'), ('b', 'foo')]),
# OrderedDict([('a', 'sub0B')])])]),
# OrderedDict([('a', 'sub1')])])])
#
# The attribute iterator `attriter` may be used for filtering too.
# For example, just dump attributes named `a`:
#
# >>> exporter = DictExporter(attriter=lambda attrs: [(k, v) for k, v in attrs if k == "a"])
# >>> pprint(exporter.export(root))
# {'a': 'root',
# 'children': [{'a': 'sub0', 'children': [{'a': 'sub0A'}, {'a': 'sub0B'}]},
# {'a': 'sub1'}]}
#
# The child iterator `childiter` can be used for sorting and filtering likewise:
#
# >>> exporter = DictExporter(childiter=lambda children: [child for child in children if "0" in child.a])
# >>> pprint(exporter.export(root))
# {'a': 'root',
# 'children': [{'a': 'sub0',
# 'children': [{'a': 'sub0A', 'b': 'foo'}, {'a': 'sub0B'}]}]}
# """
# self.dictcls = dictcls
# self.attriter = attriter
# self.childiter = childiter
# self.maxlevel = maxlevel
#
# def export(self, node):
# """Export tree starting at `node`."""
# attriter = self.attriter or (lambda attr_values: attr_values)
# return self.__export(node, self.dictcls, attriter, self.childiter)
#
# def __export(self, node, dictcls, attriter, childiter, level=1):
# attr_values = attriter(self._iter_attr_values(node))
# data = dictcls(attr_values)
# maxlevel = self.maxlevel
# if maxlevel is None or level < maxlevel:
# children = [self.__export(child, dictcls, attriter, childiter, level=level + 1)
# for child in childiter(node.children)]
# if children:
# data['children'] = children
# return data
#
# def _iter_attr_values(self, node):
# for k, v in node.__dict__.items():
# if k in ('_NodeMixin__children', '_NodeMixin__parent'):
# continue
# yield k, v
. Output only the next line. | dictexporter = self.dictexporter or DictExporter() |
Predict the next line for this snippet: <|code_start|> >>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find_by_attr(f, "d")
Node('/f/b/d')
>>> find_by_attr(f, name="foo", value=4)
Node('/f/b/d/c', foo=4)
>>> find_by_attr(f, name="foo", value=8)
"""
return _find(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel)
def _find(node, filter_, stop=None, maxlevel=None):
items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)
return items[0] if items else None
def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):
<|code_end|>
with the help of current file imports:
from anytree.iterators import PreOrderIter
and context from other files:
# Path: anytree/iterators/preorderiter.py
# class PreOrderIter(AbstractIter):
# """
# Iterate over tree applying pre-order strategy starting at `node`.
#
# Start at root and go-down until reaching a leaf node.
# Step upwards then, and search for the next leafs.
#
# >>> from anytree import Node, RenderTree, AsciiStyle, PreOrderIter
# >>> f = Node("f")
# >>> b = Node("b", parent=f)
# >>> a = Node("a", parent=b)
# >>> d = Node("d", parent=b)
# >>> c = Node("c", parent=d)
# >>> e = Node("e", parent=d)
# >>> g = Node("g", parent=f)
# >>> i = Node("i", parent=g)
# >>> h = Node("h", parent=i)
# >>> print(RenderTree(f, style=AsciiStyle()).by_attr())
# f
# |-- b
# | |-- a
# | +-- d
# | |-- c
# | +-- e
# +-- g
# +-- i
# +-- h
# >>> [node.name for node in PreOrderIter(f)]
# ['f', 'b', 'a', 'd', 'c', 'e', 'g', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, maxlevel=3)]
# ['f', 'b', 'a', 'd', 'g', 'i']
# >>> [node.name for node in PreOrderIter(f, filter_=lambda n: n.name not in ('e', 'g'))]
# ['f', 'b', 'a', 'd', 'c', 'i', 'h']
# >>> [node.name for node in PreOrderIter(f, stop=lambda n: n.name == 'd')]
# ['f', 'b', 'a', 'g', 'i', 'h']
# """
#
# @staticmethod
# def _iter(children, filter_, stop, maxlevel):
# for child_ in children:
# if stop(child_):
# continue
# if filter_(child_):
# yield child_
# if not AbstractIter._abort_at_level(2, maxlevel):
# descendantmaxlevel = maxlevel - 1 if maxlevel else None
# for descendant_ in PreOrderIter._iter(child_.children, filter_, stop, descendantmaxlevel):
# yield descendant_
, which may contain function names, class names, or code. Output only the next line. | result = tuple(PreOrderIter(node, filter_, stop, maxlevel)) |
Given the code snippet: <|code_start|> >>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> [[node.name for node in children] for children in ZigZagGroupIter(f)]
[['f'], ['g', 'b'], ['a', 'd', 'i'], ['h', 'e', 'c']]
>>> [[node.name for node in children] for children in ZigZagGroupIter(f, maxlevel=3)]
[['f'], ['g', 'b'], ['a', 'd', 'i']]
>>> [[node.name for node in children]
... for children in ZigZagGroupIter(f, filter_=lambda n: n.name not in ('e', 'g'))]
[['f'], ['b'], ['a', 'd', 'i'], ['h', 'c']]
>>> [[node.name for node in children]
... for children in ZigZagGroupIter(f, stop=lambda n: n.name == 'd')]
[['f'], ['g', 'b'], ['a', 'i'], ['h']]
"""
@staticmethod
def _iter(children, filter_, stop, maxlevel):
if children:
assert len(children) == 1
<|code_end|>
, generate the next line using the imports in this file:
from .abstractiter import AbstractIter
from .levelordergroupiter import LevelOrderGroupIter
and context (functions, classes, or occasionally code) from other files:
# Path: anytree/iterators/abstractiter.py
# class AbstractIter(six.Iterator):
#
# def __init__(self, node, filter_=None, stop=None, maxlevel=None):
# """
# Iterate over tree starting at `node`.
#
# Base class for all iterators.
#
# Keyword Args:
# filter_: function called with every `node` as argument, `node` is returned if `True`.
# stop: stop iteration at `node` if `stop` function returns `True` for `node`.
# maxlevel (int): maximum descending in the node hierarchy.
# """
# self.node = node
# self.filter_ = filter_
# self.stop = stop
# self.maxlevel = maxlevel
# self.__iter = None
#
# def __init(self):
# node = self.node
# maxlevel = self.maxlevel
# filter_ = self.filter_ or AbstractIter.__default_filter
# stop = self.stop or AbstractIter.__default_stop
# children = [] if AbstractIter._abort_at_level(1, maxlevel) else AbstractIter._get_children([node], stop)
# return self._iter(children, filter_, stop, maxlevel)
#
# @staticmethod
# def __default_filter(node):
# return True
#
# @staticmethod
# def __default_stop(node):
# return False
#
# def __iter__(self):
# return self
#
# def __next__(self):
# if self.__iter is None:
# self.__iter = self.__init()
# return next(self.__iter)
#
# @staticmethod
# def _iter(children, filter_, stop, maxlevel):
# raise NotImplementedError() # pragma: no cover
#
# @staticmethod
# def _abort_at_level(level, maxlevel):
# return maxlevel is not None and level > maxlevel
#
# @staticmethod
# def _get_children(children, stop):
# return [child for child in children if not stop(child)]
#
# Path: anytree/iterators/levelordergroupiter.py
# class LevelOrderGroupIter(AbstractIter):
# """
# Iterate over tree applying level-order strategy with grouping starting at `node`.
#
# Return a tuple of nodes for each level. The first tuple contains the
# nodes at level 0 (always `node`). The second tuple contains the nodes at level 1
# (children of `node`). The next level contains the children of the children, and so on.
#
# >>> from anytree import Node, RenderTree, AsciiStyle, LevelOrderGroupIter
# >>> f = Node("f")
# >>> b = Node("b", parent=f)
# >>> a = Node("a", parent=b)
# >>> d = Node("d", parent=b)
# >>> c = Node("c", parent=d)
# >>> e = Node("e", parent=d)
# >>> g = Node("g", parent=f)
# >>> i = Node("i", parent=g)
# >>> h = Node("h", parent=i)
# >>> print(RenderTree(f, style=AsciiStyle()).by_attr())
# f
# |-- b
# | |-- a
# | +-- d
# | |-- c
# | +-- e
# +-- g
# +-- i
# +-- h
# >>> [[node.name for node in children] for children in LevelOrderGroupIter(f)]
# [['f'], ['b', 'g'], ['a', 'd', 'i'], ['c', 'e', 'h']]
# >>> [[node.name for node in children] for children in LevelOrderGroupIter(f, maxlevel=3)]
# [['f'], ['b', 'g'], ['a', 'd', 'i']]
# >>> [[node.name for node in children]
# ... for children in LevelOrderGroupIter(f, filter_=lambda n: n.name not in ('e', 'g'))]
# [['f'], ['b'], ['a', 'd', 'i'], ['c', 'h']]
# >>> [[node.name for node in children]
# ... for children in LevelOrderGroupIter(f, stop=lambda n: n.name == 'd')]
# [['f'], ['b', 'g'], ['a', 'i'], ['h']]
# """
#
# @staticmethod
# def _iter(children, filter_, stop, maxlevel):
# level = 1
# while children:
# yield tuple([child for child in children if filter_(child)])
# level += 1
# if AbstractIter._abort_at_level(level, maxlevel):
# break
# children = LevelOrderGroupIter._get_grandchildren(children, stop)
#
# @staticmethod
# def _get_grandchildren(children, stop):
# next_children = []
# for child in children:
# next_children = next_children + AbstractIter._get_children(child.children, stop)
# return next_children
. Output only the next line. | _iter = LevelOrderGroupIter(children[0], filter_, stop, maxlevel) |
Given the code snippet: <|code_start|>"""Utility methods."""
def housecode_to_byte(housecode):
"""Return the byte value of an X10 housecode."""
return HC_LOOKUP.get(housecode.lower())
def unitcode_to_byte(unitcode):
"""Return the byte value of an X10 unitcode."""
<|code_end|>
, generate the next line using the imports in this file:
from insteonplm.constants import (
HC_LOOKUP,
UC_LOOKUP,
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
X10CommandType,
)
and context (functions, classes, or occasionally code) from other files:
# Path: insteonplm/constants.py
# HC_LOOKUP = {
# "a": 0x06,
# "b": 0x0E,
# "c": 0x02,
# "d": 0x0A,
# "e": 0x01,
# "f": 0x09,
# "g": 0x05,
# "h": 0x0D,
# "i": 0x07,
# "j": 0x0F,
# "k": 0x03,
# "l": 0x0B,
# "m": 0x00,
# "n": 0x08,
# "o": 0x04,
# "p": 0x0C,
# }
#
# UC_LOOKUP = {
# 1: 0x06,
# 2: 0x0E,
# 3: 0x02,
# 4: 0x0A,
# 5: 0x01,
# 6: 0x09,
# 7: 0x05,
# 8: 0x0D,
# 9: 0x07,
# 10: 0x0F,
# 11: 0x03,
# 12: 0x0B,
# 13: 0x00,
# 14: 0x08,
# 15: 0x04,
# 16: 0x0C,
# 20: 0x20, # All Units Off fake device
# 21: 0x21, # All Lights On fake device
# 22: 0x22,
# } # All Lights Off fake device
#
# X10_COMMAND_ALL_UNITS_OFF = 0x00
#
# X10_COMMAND_ALL_LIGHTS_ON = 0x01
#
# X10_COMMAND_ALL_LIGHTS_OFF = 0x06
#
# class X10CommandType(Enum):
# """X10 command types."""
#
# DIRECT = 0
# BROADCAST = 1
. Output only the next line. | return UC_LOOKUP.get(unitcode) |
Continue the code snippet: <|code_start|>"""Utility methods."""
def housecode_to_byte(housecode):
"""Return the byte value of an X10 housecode."""
return HC_LOOKUP.get(housecode.lower())
def unitcode_to_byte(unitcode):
"""Return the byte value of an X10 unitcode."""
return UC_LOOKUP.get(unitcode)
def byte_to_housecode(bytecode):
"""Return an X10 housecode value from a byte value."""
hc = list(HC_LOOKUP.keys())[list(HC_LOOKUP.values()).index(bytecode)]
return hc.upper()
def byte_to_unitcode(bytecode):
"""Return an X10 unitcode value from a byte value."""
return list(UC_LOOKUP.keys())[list(UC_LOOKUP.values()).index(bytecode)]
def x10_command_type(command):
"""Return the X10 command type from an X10 command."""
command_type = X10CommandType.DIRECT
if command in [
<|code_end|>
. Use current file imports:
from insteonplm.constants import (
HC_LOOKUP,
UC_LOOKUP,
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
X10CommandType,
)
and context (classes, functions, or code) from other files:
# Path: insteonplm/constants.py
# HC_LOOKUP = {
# "a": 0x06,
# "b": 0x0E,
# "c": 0x02,
# "d": 0x0A,
# "e": 0x01,
# "f": 0x09,
# "g": 0x05,
# "h": 0x0D,
# "i": 0x07,
# "j": 0x0F,
# "k": 0x03,
# "l": 0x0B,
# "m": 0x00,
# "n": 0x08,
# "o": 0x04,
# "p": 0x0C,
# }
#
# UC_LOOKUP = {
# 1: 0x06,
# 2: 0x0E,
# 3: 0x02,
# 4: 0x0A,
# 5: 0x01,
# 6: 0x09,
# 7: 0x05,
# 8: 0x0D,
# 9: 0x07,
# 10: 0x0F,
# 11: 0x03,
# 12: 0x0B,
# 13: 0x00,
# 14: 0x08,
# 15: 0x04,
# 16: 0x0C,
# 20: 0x20, # All Units Off fake device
# 21: 0x21, # All Lights On fake device
# 22: 0x22,
# } # All Lights Off fake device
#
# X10_COMMAND_ALL_UNITS_OFF = 0x00
#
# X10_COMMAND_ALL_LIGHTS_ON = 0x01
#
# X10_COMMAND_ALL_LIGHTS_OFF = 0x06
#
# class X10CommandType(Enum):
# """X10 command types."""
#
# DIRECT = 0
# BROADCAST = 1
. Output only the next line. | X10_COMMAND_ALL_UNITS_OFF, |
Given the following code snippet before the placeholder: <|code_start|>
def housecode_to_byte(housecode):
"""Return the byte value of an X10 housecode."""
return HC_LOOKUP.get(housecode.lower())
def unitcode_to_byte(unitcode):
"""Return the byte value of an X10 unitcode."""
return UC_LOOKUP.get(unitcode)
def byte_to_housecode(bytecode):
"""Return an X10 housecode value from a byte value."""
hc = list(HC_LOOKUP.keys())[list(HC_LOOKUP.values()).index(bytecode)]
return hc.upper()
def byte_to_unitcode(bytecode):
"""Return an X10 unitcode value from a byte value."""
return list(UC_LOOKUP.keys())[list(UC_LOOKUP.values()).index(bytecode)]
def x10_command_type(command):
"""Return the X10 command type from an X10 command."""
command_type = X10CommandType.DIRECT
if command in [
X10_COMMAND_ALL_UNITS_OFF,
<|code_end|>
, predict the next line using imports from the current file:
from insteonplm.constants import (
HC_LOOKUP,
UC_LOOKUP,
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
X10CommandType,
)
and context including class names, function names, and sometimes code from other files:
# Path: insteonplm/constants.py
# HC_LOOKUP = {
# "a": 0x06,
# "b": 0x0E,
# "c": 0x02,
# "d": 0x0A,
# "e": 0x01,
# "f": 0x09,
# "g": 0x05,
# "h": 0x0D,
# "i": 0x07,
# "j": 0x0F,
# "k": 0x03,
# "l": 0x0B,
# "m": 0x00,
# "n": 0x08,
# "o": 0x04,
# "p": 0x0C,
# }
#
# UC_LOOKUP = {
# 1: 0x06,
# 2: 0x0E,
# 3: 0x02,
# 4: 0x0A,
# 5: 0x01,
# 6: 0x09,
# 7: 0x05,
# 8: 0x0D,
# 9: 0x07,
# 10: 0x0F,
# 11: 0x03,
# 12: 0x0B,
# 13: 0x00,
# 14: 0x08,
# 15: 0x04,
# 16: 0x0C,
# 20: 0x20, # All Units Off fake device
# 21: 0x21, # All Lights On fake device
# 22: 0x22,
# } # All Lights Off fake device
#
# X10_COMMAND_ALL_UNITS_OFF = 0x00
#
# X10_COMMAND_ALL_LIGHTS_ON = 0x01
#
# X10_COMMAND_ALL_LIGHTS_OFF = 0x06
#
# class X10CommandType(Enum):
# """X10 command types."""
#
# DIRECT = 0
# BROADCAST = 1
. Output only the next line. | X10_COMMAND_ALL_LIGHTS_ON, |
Based on the snippet: <|code_start|>
def housecode_to_byte(housecode):
"""Return the byte value of an X10 housecode."""
return HC_LOOKUP.get(housecode.lower())
def unitcode_to_byte(unitcode):
"""Return the byte value of an X10 unitcode."""
return UC_LOOKUP.get(unitcode)
def byte_to_housecode(bytecode):
"""Return an X10 housecode value from a byte value."""
hc = list(HC_LOOKUP.keys())[list(HC_LOOKUP.values()).index(bytecode)]
return hc.upper()
def byte_to_unitcode(bytecode):
"""Return an X10 unitcode value from a byte value."""
return list(UC_LOOKUP.keys())[list(UC_LOOKUP.values()).index(bytecode)]
def x10_command_type(command):
"""Return the X10 command type from an X10 command."""
command_type = X10CommandType.DIRECT
if command in [
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
<|code_end|>
, predict the immediate next line with the help of imports:
from insteonplm.constants import (
HC_LOOKUP,
UC_LOOKUP,
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
X10CommandType,
)
and context (classes, functions, sometimes code) from other files:
# Path: insteonplm/constants.py
# HC_LOOKUP = {
# "a": 0x06,
# "b": 0x0E,
# "c": 0x02,
# "d": 0x0A,
# "e": 0x01,
# "f": 0x09,
# "g": 0x05,
# "h": 0x0D,
# "i": 0x07,
# "j": 0x0F,
# "k": 0x03,
# "l": 0x0B,
# "m": 0x00,
# "n": 0x08,
# "o": 0x04,
# "p": 0x0C,
# }
#
# UC_LOOKUP = {
# 1: 0x06,
# 2: 0x0E,
# 3: 0x02,
# 4: 0x0A,
# 5: 0x01,
# 6: 0x09,
# 7: 0x05,
# 8: 0x0D,
# 9: 0x07,
# 10: 0x0F,
# 11: 0x03,
# 12: 0x0B,
# 13: 0x00,
# 14: 0x08,
# 15: 0x04,
# 16: 0x0C,
# 20: 0x20, # All Units Off fake device
# 21: 0x21, # All Lights On fake device
# 22: 0x22,
# } # All Lights Off fake device
#
# X10_COMMAND_ALL_UNITS_OFF = 0x00
#
# X10_COMMAND_ALL_LIGHTS_ON = 0x01
#
# X10_COMMAND_ALL_LIGHTS_OFF = 0x06
#
# class X10CommandType(Enum):
# """X10 command types."""
#
# DIRECT = 0
# BROADCAST = 1
. Output only the next line. | X10_COMMAND_ALL_LIGHTS_OFF, |
Based on the snippet: <|code_start|>"""Utility methods."""
def housecode_to_byte(housecode):
"""Return the byte value of an X10 housecode."""
return HC_LOOKUP.get(housecode.lower())
def unitcode_to_byte(unitcode):
"""Return the byte value of an X10 unitcode."""
return UC_LOOKUP.get(unitcode)
def byte_to_housecode(bytecode):
"""Return an X10 housecode value from a byte value."""
hc = list(HC_LOOKUP.keys())[list(HC_LOOKUP.values()).index(bytecode)]
return hc.upper()
def byte_to_unitcode(bytecode):
"""Return an X10 unitcode value from a byte value."""
return list(UC_LOOKUP.keys())[list(UC_LOOKUP.values()).index(bytecode)]
def x10_command_type(command):
"""Return the X10 command type from an X10 command."""
<|code_end|>
, predict the immediate next line with the help of imports:
from insteonplm.constants import (
HC_LOOKUP,
UC_LOOKUP,
X10_COMMAND_ALL_UNITS_OFF,
X10_COMMAND_ALL_LIGHTS_ON,
X10_COMMAND_ALL_LIGHTS_OFF,
X10CommandType,
)
and context (classes, functions, sometimes code) from other files:
# Path: insteonplm/constants.py
# HC_LOOKUP = {
# "a": 0x06,
# "b": 0x0E,
# "c": 0x02,
# "d": 0x0A,
# "e": 0x01,
# "f": 0x09,
# "g": 0x05,
# "h": 0x0D,
# "i": 0x07,
# "j": 0x0F,
# "k": 0x03,
# "l": 0x0B,
# "m": 0x00,
# "n": 0x08,
# "o": 0x04,
# "p": 0x0C,
# }
#
# UC_LOOKUP = {
# 1: 0x06,
# 2: 0x0E,
# 3: 0x02,
# 4: 0x0A,
# 5: 0x01,
# 6: 0x09,
# 7: 0x05,
# 8: 0x0D,
# 9: 0x07,
# 10: 0x0F,
# 11: 0x03,
# 12: 0x0B,
# 13: 0x00,
# 14: 0x08,
# 15: 0x04,
# 16: 0x0C,
# 20: 0x20, # All Units Off fake device
# 21: 0x21, # All Lights On fake device
# 22: 0x22,
# } # All Lights Off fake device
#
# X10_COMMAND_ALL_UNITS_OFF = 0x00
#
# X10_COMMAND_ALL_LIGHTS_ON = 0x01
#
# X10_COMMAND_ALL_LIGHTS_OFF = 0x06
#
# class X10CommandType(Enum):
# """X10 command types."""
#
# DIRECT = 0
# BROADCAST = 1
. Output only the next line. | command_type = X10CommandType.DIRECT |
Predict the next line after this snippet: <|code_start|> """Return a hexadecimal representation of the message flags."""
return binascii.hexlify(self.bytes).decode()
# pylint: disable=no-self-use
def _normalize(self, flags):
"""Take any format of flags and turn it into a hex string."""
norm = None
if isinstance(flags, MessageFlags):
norm = flags.bytes
elif isinstance(flags, bytearray):
norm = binascii.hexlify(flags)
elif isinstance(flags, int):
norm = bytes([flags])
elif isinstance(flags, bytes):
norm = binascii.hexlify(flags)
elif isinstance(flags, str):
flags = flags[0:2]
norm = binascii.hexlify(binascii.unhexlify(flags.lower()))
elif flags is None:
norm = None
else:
_LOGGER.warning("MessageFlags with unknown type %s: %r", type(flags), flags)
return norm
def _set_properties(self, flags):
"""Set the properties of the message flags based on a byte input."""
flagByte = self._normalize(flags)
if flagByte is not None:
self._messageType = (flagByte[0] & 0xE0) >> 5
<|code_end|>
using the current file's imports:
import logging
import binascii
from insteonplm.constants import (
MESSAGE_FLAG_EXTENDED_0X10,
MESSAGE_TYPE_ALL_LINK_BROADCAST,
MESSAGE_TYPE_ALL_LINK_CLEANUP,
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK,
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK,
MESSAGE_TYPE_BROADCAST_MESSAGE,
MESSAGE_TYPE_DIRECT_MESSAGE_ACK,
MESSAGE_TYPE_DIRECT_MESSAGE_NAK,
)
and any relevant context from other files:
# Path: insteonplm/constants.py
# MESSAGE_FLAG_EXTENDED_0X10 = 0x10
#
# MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
#
# MESSAGE_TYPE_BROADCAST_MESSAGE = 4
#
# MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
#
# MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
. Output only the next line. | self._extended = (flagByte[0] & MESSAGE_FLAG_EXTENDED_0X10) >> 4 |
Based on the snippet: <|code_start|>
@property
def isBroadcast(self):
"""Test if the message is a broadcast message type."""
return (
self._messageType & MESSAGE_TYPE_BROADCAST_MESSAGE
== MESSAGE_TYPE_BROADCAST_MESSAGE
)
@property
def isDirect(self):
"""Test if the message is a direct message type."""
direct = self._messageType == 0x00
if self.isDirectACK or self.isDirectNAK:
direct = True
return direct
@property
def isDirectACK(self):
"""Test if the message is a direct ACK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_ACK
@property
def isDirectNAK(self):
"""Test if the message is a direct NAK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_NAK
@property
def isAllLinkBroadcast(self):
"""Test if the message is an ALl-Link broadcast message type."""
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import binascii
from insteonplm.constants import (
MESSAGE_FLAG_EXTENDED_0X10,
MESSAGE_TYPE_ALL_LINK_BROADCAST,
MESSAGE_TYPE_ALL_LINK_CLEANUP,
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK,
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK,
MESSAGE_TYPE_BROADCAST_MESSAGE,
MESSAGE_TYPE_DIRECT_MESSAGE_ACK,
MESSAGE_TYPE_DIRECT_MESSAGE_NAK,
)
and context (classes, functions, sometimes code) from other files:
# Path: insteonplm/constants.py
# MESSAGE_FLAG_EXTENDED_0X10 = 0x10
#
# MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
#
# MESSAGE_TYPE_BROADCAST_MESSAGE = 4
#
# MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
#
# MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
. Output only the next line. | return self._messageType == MESSAGE_TYPE_ALL_LINK_BROADCAST |
Next line prediction: <|code_start|> self._messageType & MESSAGE_TYPE_BROADCAST_MESSAGE
== MESSAGE_TYPE_BROADCAST_MESSAGE
)
@property
def isDirect(self):
"""Test if the message is a direct message type."""
direct = self._messageType == 0x00
if self.isDirectACK or self.isDirectNAK:
direct = True
return direct
@property
def isDirectACK(self):
"""Test if the message is a direct ACK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_ACK
@property
def isDirectNAK(self):
"""Test if the message is a direct NAK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_NAK
@property
def isAllLinkBroadcast(self):
"""Test if the message is an ALl-Link broadcast message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_BROADCAST
@property
def isAllLinkCleanup(self):
"""Test if the message is a All-Link cleanup message type."""
<|code_end|>
. Use current file imports:
(import logging
import binascii
from insteonplm.constants import (
MESSAGE_FLAG_EXTENDED_0X10,
MESSAGE_TYPE_ALL_LINK_BROADCAST,
MESSAGE_TYPE_ALL_LINK_CLEANUP,
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK,
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK,
MESSAGE_TYPE_BROADCAST_MESSAGE,
MESSAGE_TYPE_DIRECT_MESSAGE_ACK,
MESSAGE_TYPE_DIRECT_MESSAGE_NAK,
))
and context including class names, function names, or small code snippets from other files:
# Path: insteonplm/constants.py
# MESSAGE_FLAG_EXTENDED_0X10 = 0x10
#
# MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
#
# MESSAGE_TYPE_BROADCAST_MESSAGE = 4
#
# MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
#
# MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
. Output only the next line. | return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP |
Given the code snippet: <|code_start|> def isDirect(self):
"""Test if the message is a direct message type."""
direct = self._messageType == 0x00
if self.isDirectACK or self.isDirectNAK:
direct = True
return direct
@property
def isDirectACK(self):
"""Test if the message is a direct ACK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_ACK
@property
def isDirectNAK(self):
"""Test if the message is a direct NAK message type."""
return self._messageType == MESSAGE_TYPE_DIRECT_MESSAGE_NAK
@property
def isAllLinkBroadcast(self):
"""Test if the message is an ALl-Link broadcast message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_BROADCAST
@property
def isAllLinkCleanup(self):
"""Test if the message is a All-Link cleanup message type."""
return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP
@property
def isAllLinkCleanupACK(self):
"""Test if the message is a All-LInk cleanup ACK message type."""
<|code_end|>
, generate the next line using the imports in this file:
import logging
import binascii
from insteonplm.constants import (
MESSAGE_FLAG_EXTENDED_0X10,
MESSAGE_TYPE_ALL_LINK_BROADCAST,
MESSAGE_TYPE_ALL_LINK_CLEANUP,
MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK,
MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK,
MESSAGE_TYPE_BROADCAST_MESSAGE,
MESSAGE_TYPE_DIRECT_MESSAGE_ACK,
MESSAGE_TYPE_DIRECT_MESSAGE_NAK,
)
and context (functions, classes, or occasionally code) from other files:
# Path: insteonplm/constants.py
# MESSAGE_FLAG_EXTENDED_0X10 = 0x10
#
# MESSAGE_TYPE_ALL_LINK_BROADCAST = 6
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP = 2
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK = 3
#
# MESSAGE_TYPE_ALL_LINK_CLEANUP_NAK = 7
#
# MESSAGE_TYPE_BROADCAST_MESSAGE = 4
#
# MESSAGE_TYPE_DIRECT_MESSAGE_ACK = 1
#
# MESSAGE_TYPE_DIRECT_MESSAGE_NAK = 5
. Output only the next line. | return self._messageType == MESSAGE_TYPE_ALL_LINK_CLEANUP_ACK |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.