Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Continue the code snippet: <|code_start|>
class Freshman(models.Model):
def __str__(self):
return self.user.username
user = models.OneToOneField(User, related_name='freshman')
<|code_end|>
. Use current file imports:
from django.db import models
from django.contrib.auth.models import User
from .club import Club
and context (classes, functions, or code) from other files:
# Path: apps/ot/models/club.py
# class Club(models.Model):
# """
# λμ리 μ 보
# """
#
# class Meta:
# verbose_name = 'λμ리 μ 보'
# verbose_name_plural = 'λμ리 μ 보(λ€)'
#
# def __str__(self):
# return str(self.name)
#
# name = models.CharField(
# max_length=63,
# verbose_name='λμ리 μ΄λ¦',
# )
# pages = models.TextField(
# blank=True,
# verbose_name='λμ리 νμ΄μ€λΆ/μ νλΈ νμ΄μ§ μ£Όμ',
# )
# one_line_intro = models.TextField(
# blank=True,
# verbose_name='λμ리 ν μ€ μκ°(λ©μΈ νλ©΄)',
# )
# intro = models.TextField(
# verbose_name='λμ리 μκ°',
# )
# is_band = models.BooleanField(
# default=False,
# verbose_name='λ°΄λ μ¬λΆ',
# )
#
# video_url1 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #1',
# )
# video_url2 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #2',
# )
. Output only the next line. | voted_clubs = models.ManyToManyField(Club, related_name='votes') |
Continue the code snippet: <|code_start|>
@method_decorator(login_required, name='dispatch')
@method_decorator(user_passes_test(vote_available), name='dispatch')
class TSizeView(FormView):
template_name = 'tsize.html'
<|code_end|>
. Use current file imports:
from django.views.generic import FormView, TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, user_passes_test
from django.db.models import Count
from ..forms import TSizeForm
from ..util import vote_available, is_tester
from ..models.club import Club
from ..models.user import Freshman
and context (classes, functions, or code) from other files:
# Path: apps/ot/forms.py
# class TSizeForm(ModelForm):
# class Meta:
# model = Freshman
# fields = ("tsize",)
#
# def __init__(self, *args, **kwargs):
# super(TSizeForm, self).__init__(*args, **kwargs)
# self.fields['tsize'].widget.attrs.update({'class': 'form-control'})
#
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
#
# def is_tester(user):
# if not user.is_authenticated or not hasattr(user, 'portal_info'):
# return False
#
# return user.portal_info.ku_std_no in (
# "20110208",
# "20180379",
# "20180419",
# "20180058",
# "20180634",
# "20180154",
# )
#
# Path: apps/ot/models/club.py
# class Club(models.Model):
# """
# λμ리 μ 보
# """
#
# class Meta:
# verbose_name = 'λμ리 μ 보'
# verbose_name_plural = 'λμ리 μ 보(λ€)'
#
# def __str__(self):
# return str(self.name)
#
# name = models.CharField(
# max_length=63,
# verbose_name='λμ리 μ΄λ¦',
# )
# pages = models.TextField(
# blank=True,
# verbose_name='λμ리 νμ΄μ€λΆ/μ νλΈ νμ΄μ§ μ£Όμ',
# )
# one_line_intro = models.TextField(
# blank=True,
# verbose_name='λμ리 ν μ€ μκ°(λ©μΈ νλ©΄)',
# )
# intro = models.TextField(
# verbose_name='λμ리 μκ°',
# )
# is_band = models.BooleanField(
# default=False,
# verbose_name='λ°΄λ μ¬λΆ',
# )
#
# video_url1 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #1',
# )
# video_url2 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #2',
# )
#
# Path: apps/ot/models/user.py
# class Freshman(models.Model):
# def __str__(self):
# return self.user.username
#
# user = models.OneToOneField(User, related_name='freshman')
# voted_clubs = models.ManyToManyField(Club, related_name='votes')
#
# sizes = (
# ('S', 'S'),
# ('M', 'M'),
# ('L', 'L'),
# ('XL', 'XL'),
# ('2XL', '2XL'),
# ('3XL', '3XL'),
# )
# tsize = models.CharField(null=False, max_length=5, choices=sizes)
#
# BAND_VOTE_LIMIT = 4
# NON_BAND_VOTE_LIMIT = 3
#
# def vote_limit_exceeded(self, is_band):
# return self.voted_clubs.filter(is_band=is_band).count() > 5
. Output only the next line. | form_class = TSizeForm |
Predict the next line after this snippet: <|code_start|>
@method_decorator(login_required, name='dispatch')
@method_decorator(user_passes_test(vote_available), name='dispatch')
class TSizeView(FormView):
template_name = 'tsize.html'
form_class = TSizeForm
success_url = '/ot/'
def form_valid(self, form):
if hasattr(self.request.user, 'freshman'):
self.request.user.freshman.tsize = form.cleaned_data['tsize']
self.request.user.freshman.save()
else:
freshman = form.save(commit=False)
freshman.user = self.request.user
freshman.save()
return super(TSizeView, self).form_valid(form)
def get_initial(self):
initial = super(TSizeView, self).get_initial()
if hasattr(self.request.user, 'freshman'):
initial['tsize'] = self.request.user.freshman.tsize
return initial
<|code_end|>
using the current file's imports:
from django.views.generic import FormView, TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, user_passes_test
from django.db.models import Count
from ..forms import TSizeForm
from ..util import vote_available, is_tester
from ..models.club import Club
from ..models.user import Freshman
and any relevant context from other files:
# Path: apps/ot/forms.py
# class TSizeForm(ModelForm):
# class Meta:
# model = Freshman
# fields = ("tsize",)
#
# def __init__(self, *args, **kwargs):
# super(TSizeForm, self).__init__(*args, **kwargs)
# self.fields['tsize'].widget.attrs.update({'class': 'form-control'})
#
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
#
# def is_tester(user):
# if not user.is_authenticated or not hasattr(user, 'portal_info'):
# return False
#
# return user.portal_info.ku_std_no in (
# "20110208",
# "20180379",
# "20180419",
# "20180058",
# "20180634",
# "20180154",
# )
#
# Path: apps/ot/models/club.py
# class Club(models.Model):
# """
# λμ리 μ 보
# """
#
# class Meta:
# verbose_name = 'λμ리 μ 보'
# verbose_name_plural = 'λμ리 μ 보(λ€)'
#
# def __str__(self):
# return str(self.name)
#
# name = models.CharField(
# max_length=63,
# verbose_name='λμ리 μ΄λ¦',
# )
# pages = models.TextField(
# blank=True,
# verbose_name='λμ리 νμ΄μ€λΆ/μ νλΈ νμ΄μ§ μ£Όμ',
# )
# one_line_intro = models.TextField(
# blank=True,
# verbose_name='λμ리 ν μ€ μκ°(λ©μΈ νλ©΄)',
# )
# intro = models.TextField(
# verbose_name='λμ리 μκ°',
# )
# is_band = models.BooleanField(
# default=False,
# verbose_name='λ°΄λ μ¬λΆ',
# )
#
# video_url1 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #1',
# )
# video_url2 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #2',
# )
#
# Path: apps/ot/models/user.py
# class Freshman(models.Model):
# def __str__(self):
# return self.user.username
#
# user = models.OneToOneField(User, related_name='freshman')
# voted_clubs = models.ManyToManyField(Club, related_name='votes')
#
# sizes = (
# ('S', 'S'),
# ('M', 'M'),
# ('L', 'L'),
# ('XL', 'XL'),
# ('2XL', '2XL'),
# ('3XL', '3XL'),
# )
# tsize = models.CharField(null=False, max_length=5, choices=sizes)
#
# BAND_VOTE_LIMIT = 4
# NON_BAND_VOTE_LIMIT = 3
#
# def vote_limit_exceeded(self, is_band):
# return self.voted_clubs.filter(is_band=is_band).count() > 5
. Output only the next line. | @method_decorator(user_passes_test(is_tester), name='dispatch') |
Predict the next line for this snippet: <|code_start|> form_class = TSizeForm
success_url = '/ot/'
def form_valid(self, form):
if hasattr(self.request.user, 'freshman'):
self.request.user.freshman.tsize = form.cleaned_data['tsize']
self.request.user.freshman.save()
else:
freshman = form.save(commit=False)
freshman.user = self.request.user
freshman.save()
return super(TSizeView, self).form_valid(form)
def get_initial(self):
initial = super(TSizeView, self).get_initial()
if hasattr(self.request.user, 'freshman'):
initial['tsize'] = self.request.user.freshman.tsize
return initial
@method_decorator(user_passes_test(is_tester), name='dispatch')
class ResultView(TemplateView):
template_name = 'result.html'
def get_context_data(self, **kwargs):
context = super(ResultView, self).get_context_data(**kwargs)
<|code_end|>
with the help of current file imports:
from django.views.generic import FormView, TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, user_passes_test
from django.db.models import Count
from ..forms import TSizeForm
from ..util import vote_available, is_tester
from ..models.club import Club
from ..models.user import Freshman
and context from other files:
# Path: apps/ot/forms.py
# class TSizeForm(ModelForm):
# class Meta:
# model = Freshman
# fields = ("tsize",)
#
# def __init__(self, *args, **kwargs):
# super(TSizeForm, self).__init__(*args, **kwargs)
# self.fields['tsize'].widget.attrs.update({'class': 'form-control'})
#
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
#
# def is_tester(user):
# if not user.is_authenticated or not hasattr(user, 'portal_info'):
# return False
#
# return user.portal_info.ku_std_no in (
# "20110208",
# "20180379",
# "20180419",
# "20180058",
# "20180634",
# "20180154",
# )
#
# Path: apps/ot/models/club.py
# class Club(models.Model):
# """
# λμ리 μ 보
# """
#
# class Meta:
# verbose_name = 'λμ리 μ 보'
# verbose_name_plural = 'λμ리 μ 보(λ€)'
#
# def __str__(self):
# return str(self.name)
#
# name = models.CharField(
# max_length=63,
# verbose_name='λμ리 μ΄λ¦',
# )
# pages = models.TextField(
# blank=True,
# verbose_name='λμ리 νμ΄μ€λΆ/μ νλΈ νμ΄μ§ μ£Όμ',
# )
# one_line_intro = models.TextField(
# blank=True,
# verbose_name='λμ리 ν μ€ μκ°(λ©μΈ νλ©΄)',
# )
# intro = models.TextField(
# verbose_name='λμ리 μκ°',
# )
# is_band = models.BooleanField(
# default=False,
# verbose_name='λ°΄λ μ¬λΆ',
# )
#
# video_url1 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #1',
# )
# video_url2 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #2',
# )
#
# Path: apps/ot/models/user.py
# class Freshman(models.Model):
# def __str__(self):
# return self.user.username
#
# user = models.OneToOneField(User, related_name='freshman')
# voted_clubs = models.ManyToManyField(Club, related_name='votes')
#
# sizes = (
# ('S', 'S'),
# ('M', 'M'),
# ('L', 'L'),
# ('XL', 'XL'),
# ('2XL', '2XL'),
# ('3XL', '3XL'),
# )
# tsize = models.CharField(null=False, max_length=5, choices=sizes)
#
# BAND_VOTE_LIMIT = 4
# NON_BAND_VOTE_LIMIT = 3
#
# def vote_limit_exceeded(self, is_band):
# return self.voted_clubs.filter(is_band=is_band).count() > 5
, which may contain function names, class names, or code. Output only the next line. | context['clubs'] = Club.objects.all().annotate(cnt=Count('votes')).order_by('-cnt') |
Given the code snippet: <|code_start|> if hasattr(self.request.user, 'freshman'):
self.request.user.freshman.tsize = form.cleaned_data['tsize']
self.request.user.freshman.save()
else:
freshman = form.save(commit=False)
freshman.user = self.request.user
freshman.save()
return super(TSizeView, self).form_valid(form)
def get_initial(self):
initial = super(TSizeView, self).get_initial()
if hasattr(self.request.user, 'freshman'):
initial['tsize'] = self.request.user.freshman.tsize
return initial
@method_decorator(user_passes_test(is_tester), name='dispatch')
class ResultView(TemplateView):
template_name = 'result.html'
def get_context_data(self, **kwargs):
context = super(ResultView, self).get_context_data(**kwargs)
context['clubs'] = Club.objects.all().annotate(cnt=Count('votes')).order_by('-cnt')
context['bands'] = Club.objects.filter(is_band=True).annotate(cnt=Count('votes')).order_by('-cnt')
context['non_bands'] = Club.objects.filter(is_band=False).annotate(cnt=Count('votes')).order_by('-cnt')
<|code_end|>
, generate the next line using the imports in this file:
from django.views.generic import FormView, TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, user_passes_test
from django.db.models import Count
from ..forms import TSizeForm
from ..util import vote_available, is_tester
from ..models.club import Club
from ..models.user import Freshman
and context (functions, classes, or occasionally code) from other files:
# Path: apps/ot/forms.py
# class TSizeForm(ModelForm):
# class Meta:
# model = Freshman
# fields = ("tsize",)
#
# def __init__(self, *args, **kwargs):
# super(TSizeForm, self).__init__(*args, **kwargs)
# self.fields['tsize'].widget.attrs.update({'class': 'form-control'})
#
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
#
# def is_tester(user):
# if not user.is_authenticated or not hasattr(user, 'portal_info'):
# return False
#
# return user.portal_info.ku_std_no in (
# "20110208",
# "20180379",
# "20180419",
# "20180058",
# "20180634",
# "20180154",
# )
#
# Path: apps/ot/models/club.py
# class Club(models.Model):
# """
# λμ리 μ 보
# """
#
# class Meta:
# verbose_name = 'λμ리 μ 보'
# verbose_name_plural = 'λμ리 μ 보(λ€)'
#
# def __str__(self):
# return str(self.name)
#
# name = models.CharField(
# max_length=63,
# verbose_name='λμ리 μ΄λ¦',
# )
# pages = models.TextField(
# blank=True,
# verbose_name='λμ리 νμ΄μ€λΆ/μ νλΈ νμ΄μ§ μ£Όμ',
# )
# one_line_intro = models.TextField(
# blank=True,
# verbose_name='λμ리 ν μ€ μκ°(λ©μΈ νλ©΄)',
# )
# intro = models.TextField(
# verbose_name='λμ리 μκ°',
# )
# is_band = models.BooleanField(
# default=False,
# verbose_name='λ°΄λ μ¬λΆ',
# )
#
# video_url1 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #1',
# )
# video_url2 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #2',
# )
#
# Path: apps/ot/models/user.py
# class Freshman(models.Model):
# def __str__(self):
# return self.user.username
#
# user = models.OneToOneField(User, related_name='freshman')
# voted_clubs = models.ManyToManyField(Club, related_name='votes')
#
# sizes = (
# ('S', 'S'),
# ('M', 'M'),
# ('L', 'L'),
# ('XL', 'XL'),
# ('2XL', '2XL'),
# ('3XL', '3XL'),
# )
# tsize = models.CharField(null=False, max_length=5, choices=sizes)
#
# BAND_VOTE_LIMIT = 4
# NON_BAND_VOTE_LIMIT = 3
#
# def vote_limit_exceeded(self, is_band):
# return self.voted_clubs.filter(is_band=is_band).count() > 5
. Output only the next line. | context['cnt_voted'] = Freshman.objects.all().count() |
Predict the next line after this snippet: <|code_start|>
class PostPagination(LimitOffsetPagination):
default_limit = POST_PER_PAGE
class PostViewSet(
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet
):
permission_classes = (
permissions.IsAdminUser,
)
pagination_class = PostPagination
def get_queryset(self):
role = self.request.query_params.get('role', None)
search = self.request.query_params.get('search', None)
<|code_end|>
using the current file's imports:
from rest_framework import mixins, viewsets, permissions
from rest_framework.pagination import LimitOffsetPagination
from django.db.models import Q
from apps.board.models import Post
from apps.board.serializers import PostSerializer, CreatePostSerializer, RetrivePostSerializer
from apps.board.constants import POST_PER_PAGE, BOARD_ROLE
from apps.manager.constants import *
and any relevant context from other files:
# Path: apps/board/models.py
# class Post(BasePost):
# """
# κ²μκΈμ ꡬνν λͺ¨λΈ.
# """
#
# board = models.ForeignKey(
# Board,
# verbose_name=_("λ±λ‘ κ²μν"))
#
# board_tab = models.ManyToManyField(
# BoardTab,
# blank=True,
# verbose_name=_("λ±λ‘ ν"))
#
# title = models.CharField(
# _("μ λͺ©"),
# max_length=128)
#
# tag = models.ForeignKey(
# Tag,
# verbose_name=_("νκ·Έ"),
# null=True, blank=True)
#
# is_notice = models.BooleanField(
# _("곡μ§κΈ"),
# default=False)
#
# class Meta:
# verbose_name = _('ν¬μ€νΈ')
# verbose_name_plural = _('ν¬μ€νΈ(λ€)')
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# # return os.path.join(self.board.get_absolute_url(), str(self.id))
# return self.board.get_absolute_url()+'/'+str(self.id)
#
# def get_first_tab(self):
# return self.board_tab.all().first()
#
# def pre_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄μ μ κ²μν μ κ·ΌκΆνμ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, PERM_ACCESS)
#
# def post_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄νμ λμΌ κΆνμ΄ κ²μνμλ μλμ§ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, permission)
#
# Path: apps/board/serializers.py
# class PostSerializer(serializers.ModelSerializer):
#
# attachedfile_set = AttachedFileSerializer(many=True, read_only=True)
# is_permitted_to_read = serializers.SerializerMethodField()
# is_permitted_to_edit = serializers.SerializerMethodField()
# is_permitted_to_delete = serializers.SerializerMethodField()
# absolute_url = serializers.SerializerMethodField()
#
# def get_request_user(self):
# user = None
# request = self.context.get("request")
# if request and hasattr(request, "user"):
# user = request.user
# return user
#
# def get_is_permitted_to_read(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_READ)
#
# def get_is_permitted_to_edit(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_EDIT)
#
# def get_is_permitted_to_delete(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_DELETE)
#
# def get_absolute_url(self, obj):
# return obj.get_absolute_url()
#
# class Meta:
# model = Post
# fields = (
# 'id',
# 'title',
# 'content',
# 'is_deleted',
# 'is_secret',
# 'is_permitted_to_read',
# 'is_permitted_to_edit',
# 'is_permitted_to_delete',
# 'absolute_url',
# 'board',
# 'attachedfile_set'
# )
# depth = 2
#
# class CreatePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# class RetrivePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# Path: apps/board/constants.py
# POST_PER_PAGE = 15
#
# BOARD_ROLE = {
# 'DEFAULT': 'DEFAULT',
# 'PROJECT': 'PROJECT',
# 'DEBATE': 'DEBATE',
# 'PLANBOOK': 'PLANBOOK',
# 'ARCHIVING':'ARCHIVING',
# 'WORKHOUR': 'WORKHOUR',
# 'SPONSOR': 'SPONSOR',
# 'SWIPER':'SWIPER',
# 'STORE': 'STORE',
# 'CONTACT':'CONTACT',
# }
. Output only the next line. | posts = Post.objects.filter(is_deleted=False) |
Next line prediction: <|code_start|> )
pagination_class = PostPagination
def get_queryset(self):
role = self.request.query_params.get('role', None)
search = self.request.query_params.get('search', None)
posts = Post.objects.filter(is_deleted=False)
# remove not permitted posts
posts_copy = posts.all()
for post in posts:
if not post.is_permitted(self.request.user, PERM_READ):
posts_copy = posts_copy.exclude(post.pk)
posts = posts_copy
if role is not None:
posts = posts.filter(board__role__exact=role)
if search is not None:
posts = posts.filter(
Q(title__icontains=search) |
Q(content__icontains=search)
)
return posts
def get_serializer_class(self):
if self.action == 'create':
return CreatePostSerializer
elif self.action == 'retrive':
return RetrivePostSerializer
else:
<|code_end|>
. Use current file imports:
(from rest_framework import mixins, viewsets, permissions
from rest_framework.pagination import LimitOffsetPagination
from django.db.models import Q
from apps.board.models import Post
from apps.board.serializers import PostSerializer, CreatePostSerializer, RetrivePostSerializer
from apps.board.constants import POST_PER_PAGE, BOARD_ROLE
from apps.manager.constants import *)
and context including class names, function names, or small code snippets from other files:
# Path: apps/board/models.py
# class Post(BasePost):
# """
# κ²μκΈμ ꡬνν λͺ¨λΈ.
# """
#
# board = models.ForeignKey(
# Board,
# verbose_name=_("λ±λ‘ κ²μν"))
#
# board_tab = models.ManyToManyField(
# BoardTab,
# blank=True,
# verbose_name=_("λ±λ‘ ν"))
#
# title = models.CharField(
# _("μ λͺ©"),
# max_length=128)
#
# tag = models.ForeignKey(
# Tag,
# verbose_name=_("νκ·Έ"),
# null=True, blank=True)
#
# is_notice = models.BooleanField(
# _("곡μ§κΈ"),
# default=False)
#
# class Meta:
# verbose_name = _('ν¬μ€νΈ')
# verbose_name_plural = _('ν¬μ€νΈ(λ€)')
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# # return os.path.join(self.board.get_absolute_url(), str(self.id))
# return self.board.get_absolute_url()+'/'+str(self.id)
#
# def get_first_tab(self):
# return self.board_tab.all().first()
#
# def pre_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄μ μ κ²μν μ κ·ΌκΆνμ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, PERM_ACCESS)
#
# def post_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄νμ λμΌ κΆνμ΄ κ²μνμλ μλμ§ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, permission)
#
# Path: apps/board/serializers.py
# class PostSerializer(serializers.ModelSerializer):
#
# attachedfile_set = AttachedFileSerializer(many=True, read_only=True)
# is_permitted_to_read = serializers.SerializerMethodField()
# is_permitted_to_edit = serializers.SerializerMethodField()
# is_permitted_to_delete = serializers.SerializerMethodField()
# absolute_url = serializers.SerializerMethodField()
#
# def get_request_user(self):
# user = None
# request = self.context.get("request")
# if request and hasattr(request, "user"):
# user = request.user
# return user
#
# def get_is_permitted_to_read(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_READ)
#
# def get_is_permitted_to_edit(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_EDIT)
#
# def get_is_permitted_to_delete(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_DELETE)
#
# def get_absolute_url(self, obj):
# return obj.get_absolute_url()
#
# class Meta:
# model = Post
# fields = (
# 'id',
# 'title',
# 'content',
# 'is_deleted',
# 'is_secret',
# 'is_permitted_to_read',
# 'is_permitted_to_edit',
# 'is_permitted_to_delete',
# 'absolute_url',
# 'board',
# 'attachedfile_set'
# )
# depth = 2
#
# class CreatePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# class RetrivePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# Path: apps/board/constants.py
# POST_PER_PAGE = 15
#
# BOARD_ROLE = {
# 'DEFAULT': 'DEFAULT',
# 'PROJECT': 'PROJECT',
# 'DEBATE': 'DEBATE',
# 'PLANBOOK': 'PLANBOOK',
# 'ARCHIVING':'ARCHIVING',
# 'WORKHOUR': 'WORKHOUR',
# 'SPONSOR': 'SPONSOR',
# 'SWIPER':'SWIPER',
# 'STORE': 'STORE',
# 'CONTACT':'CONTACT',
# }
. Output only the next line. | return PostSerializer |
Given snippet: <|code_start|> ):
permission_classes = (
permissions.IsAdminUser,
)
pagination_class = PostPagination
def get_queryset(self):
role = self.request.query_params.get('role', None)
search = self.request.query_params.get('search', None)
posts = Post.objects.filter(is_deleted=False)
# remove not permitted posts
posts_copy = posts.all()
for post in posts:
if not post.is_permitted(self.request.user, PERM_READ):
posts_copy = posts_copy.exclude(post.pk)
posts = posts_copy
if role is not None:
posts = posts.filter(board__role__exact=role)
if search is not None:
posts = posts.filter(
Q(title__icontains=search) |
Q(content__icontains=search)
)
return posts
def get_serializer_class(self):
if self.action == 'create':
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from rest_framework import mixins, viewsets, permissions
from rest_framework.pagination import LimitOffsetPagination
from django.db.models import Q
from apps.board.models import Post
from apps.board.serializers import PostSerializer, CreatePostSerializer, RetrivePostSerializer
from apps.board.constants import POST_PER_PAGE, BOARD_ROLE
from apps.manager.constants import *
and context:
# Path: apps/board/models.py
# class Post(BasePost):
# """
# κ²μκΈμ ꡬνν λͺ¨λΈ.
# """
#
# board = models.ForeignKey(
# Board,
# verbose_name=_("λ±λ‘ κ²μν"))
#
# board_tab = models.ManyToManyField(
# BoardTab,
# blank=True,
# verbose_name=_("λ±λ‘ ν"))
#
# title = models.CharField(
# _("μ λͺ©"),
# max_length=128)
#
# tag = models.ForeignKey(
# Tag,
# verbose_name=_("νκ·Έ"),
# null=True, blank=True)
#
# is_notice = models.BooleanField(
# _("곡μ§κΈ"),
# default=False)
#
# class Meta:
# verbose_name = _('ν¬μ€νΈ')
# verbose_name_plural = _('ν¬μ€νΈ(λ€)')
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# # return os.path.join(self.board.get_absolute_url(), str(self.id))
# return self.board.get_absolute_url()+'/'+str(self.id)
#
# def get_first_tab(self):
# return self.board_tab.all().first()
#
# def pre_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄μ μ κ²μν μ κ·ΌκΆνμ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, PERM_ACCESS)
#
# def post_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄νμ λμΌ κΆνμ΄ κ²μνμλ μλμ§ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, permission)
#
# Path: apps/board/serializers.py
# class PostSerializer(serializers.ModelSerializer):
#
# attachedfile_set = AttachedFileSerializer(many=True, read_only=True)
# is_permitted_to_read = serializers.SerializerMethodField()
# is_permitted_to_edit = serializers.SerializerMethodField()
# is_permitted_to_delete = serializers.SerializerMethodField()
# absolute_url = serializers.SerializerMethodField()
#
# def get_request_user(self):
# user = None
# request = self.context.get("request")
# if request and hasattr(request, "user"):
# user = request.user
# return user
#
# def get_is_permitted_to_read(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_READ)
#
# def get_is_permitted_to_edit(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_EDIT)
#
# def get_is_permitted_to_delete(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_DELETE)
#
# def get_absolute_url(self, obj):
# return obj.get_absolute_url()
#
# class Meta:
# model = Post
# fields = (
# 'id',
# 'title',
# 'content',
# 'is_deleted',
# 'is_secret',
# 'is_permitted_to_read',
# 'is_permitted_to_edit',
# 'is_permitted_to_delete',
# 'absolute_url',
# 'board',
# 'attachedfile_set'
# )
# depth = 2
#
# class CreatePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# class RetrivePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# Path: apps/board/constants.py
# POST_PER_PAGE = 15
#
# BOARD_ROLE = {
# 'DEFAULT': 'DEFAULT',
# 'PROJECT': 'PROJECT',
# 'DEBATE': 'DEBATE',
# 'PLANBOOK': 'PLANBOOK',
# 'ARCHIVING':'ARCHIVING',
# 'WORKHOUR': 'WORKHOUR',
# 'SPONSOR': 'SPONSOR',
# 'SWIPER':'SWIPER',
# 'STORE': 'STORE',
# 'CONTACT':'CONTACT',
# }
which might include code, classes, or functions. Output only the next line. | return CreatePostSerializer |
Given the code snippet: <|code_start|> permission_classes = (
permissions.IsAdminUser,
)
pagination_class = PostPagination
def get_queryset(self):
role = self.request.query_params.get('role', None)
search = self.request.query_params.get('search', None)
posts = Post.objects.filter(is_deleted=False)
# remove not permitted posts
posts_copy = posts.all()
for post in posts:
if not post.is_permitted(self.request.user, PERM_READ):
posts_copy = posts_copy.exclude(post.pk)
posts = posts_copy
if role is not None:
posts = posts.filter(board__role__exact=role)
if search is not None:
posts = posts.filter(
Q(title__icontains=search) |
Q(content__icontains=search)
)
return posts
def get_serializer_class(self):
if self.action == 'create':
return CreatePostSerializer
elif self.action == 'retrive':
<|code_end|>
, generate the next line using the imports in this file:
from rest_framework import mixins, viewsets, permissions
from rest_framework.pagination import LimitOffsetPagination
from django.db.models import Q
from apps.board.models import Post
from apps.board.serializers import PostSerializer, CreatePostSerializer, RetrivePostSerializer
from apps.board.constants import POST_PER_PAGE, BOARD_ROLE
from apps.manager.constants import *
and context (functions, classes, or occasionally code) from other files:
# Path: apps/board/models.py
# class Post(BasePost):
# """
# κ²μκΈμ ꡬνν λͺ¨λΈ.
# """
#
# board = models.ForeignKey(
# Board,
# verbose_name=_("λ±λ‘ κ²μν"))
#
# board_tab = models.ManyToManyField(
# BoardTab,
# blank=True,
# verbose_name=_("λ±λ‘ ν"))
#
# title = models.CharField(
# _("μ λͺ©"),
# max_length=128)
#
# tag = models.ForeignKey(
# Tag,
# verbose_name=_("νκ·Έ"),
# null=True, blank=True)
#
# is_notice = models.BooleanField(
# _("곡μ§κΈ"),
# default=False)
#
# class Meta:
# verbose_name = _('ν¬μ€νΈ')
# verbose_name_plural = _('ν¬μ€νΈ(λ€)')
#
# def __str__(self):
# return self.title
#
# def get_absolute_url(self):
# # return os.path.join(self.board.get_absolute_url(), str(self.id))
# return self.board.get_absolute_url()+'/'+str(self.id)
#
# def get_first_tab(self):
# return self.board_tab.all().first()
#
# def pre_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄μ μ κ²μν μ κ·ΌκΆνμ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, PERM_ACCESS)
#
# def post_permitted(self, user, permission):
# """
# κ²μκΈ κΆν νμΈ μ΄νμ λμΌ κΆνμ΄ κ²μνμλ μλμ§ νμΈνλ λ©μλ.
# """
# return self.board.is_permitted(user, permission)
#
# Path: apps/board/serializers.py
# class PostSerializer(serializers.ModelSerializer):
#
# attachedfile_set = AttachedFileSerializer(many=True, read_only=True)
# is_permitted_to_read = serializers.SerializerMethodField()
# is_permitted_to_edit = serializers.SerializerMethodField()
# is_permitted_to_delete = serializers.SerializerMethodField()
# absolute_url = serializers.SerializerMethodField()
#
# def get_request_user(self):
# user = None
# request = self.context.get("request")
# if request and hasattr(request, "user"):
# user = request.user
# return user
#
# def get_is_permitted_to_read(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_READ)
#
# def get_is_permitted_to_edit(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_EDIT)
#
# def get_is_permitted_to_delete(self, obj):
# return obj.is_permitted(self.get_request_user(), PERM_DELETE)
#
# def get_absolute_url(self, obj):
# return obj.get_absolute_url()
#
# class Meta:
# model = Post
# fields = (
# 'id',
# 'title',
# 'content',
# 'is_deleted',
# 'is_secret',
# 'is_permitted_to_read',
# 'is_permitted_to_edit',
# 'is_permitted_to_delete',
# 'absolute_url',
# 'board',
# 'attachedfile_set'
# )
# depth = 2
#
# class CreatePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# class RetrivePostSerializer(serializers.ModelSerializer):
# class Meta:
# model = Post
# fields = (
# 'title',
# 'content',
# 'board',
# )
#
# Path: apps/board/constants.py
# POST_PER_PAGE = 15
#
# BOARD_ROLE = {
# 'DEFAULT': 'DEFAULT',
# 'PROJECT': 'PROJECT',
# 'DEBATE': 'DEBATE',
# 'PLANBOOK': 'PLANBOOK',
# 'ARCHIVING':'ARCHIVING',
# 'WORKHOUR': 'WORKHOUR',
# 'SPONSOR': 'SPONSOR',
# 'SWIPER':'SWIPER',
# 'STORE': 'STORE',
# 'CONTACT':'CONTACT',
# }
. Output only the next line. | return RetrivePostSerializer |
Using the snippet: <|code_start|> class Meta:
verbose_name = _('κ·μ μΈνΈ')
verbose_name_plural = _('κ·μ μΈνΈ(λ€)')
def __str__(self):
first = self.rules.first()
if first:
return first.name
return _('(λΉμ΄μμ)')
def get_absolute_url(self):
return '/usc/rule/{}'.format(self.slug)
class Rule(models.Model):
"""
κ·μ .
"""
rule_set = models.ForeignKey(
RuleSet,
related_name='rules',
verbose_name=_('κ·μ μΈνΈ'))
name = models.CharField(
_('κ·μ λͺ
'),
max_length=64)
rule_type = models.CharField(
_('μ’
λ₯'),
<|code_end|>
, determine the next line of code. You have imports:
from django.db import models
from django.utils.translation import ugettext as _
from apps.rule.const import get_choices, get_verbose, RULE_TYPE, CHAPTER_TYPE, REVISION_TYPE
from itertools import chain
and context (class names, function names, or code) available:
# Path: apps/rule/const.py
# def get_choices(type_set):
# return list(type_set.values())
#
# def get_verbose(type_set, name):
# value_dict = dict(list(type_set.values()))
# return value_dict[name]
#
# RULE_TYPE = {
# 'CONSTITUTION': ('CONST', _('νμΉ')),
# 'BYLAW': ('BYLAW', _('μΈμΉ')),
# 'RULE': ('RULE', _('κ·μΉ')),
# 'ETC': ('ETC', _('κΈ°νκ·μ ')),
# }
#
# CHAPTER_TYPE = {
# 'PREAMBLE': ('PRE', _('μ λ¬Έ')),
# 'CHAPTER': ('CHAP', _('μ₯')),
# 'SECTION': ('SEC', _('μ ')),
# 'SUPPLEMENT': ('SUPPL', _('λΆμΉ')),
# }
#
# REVISION_TYPE = {
# 'ESTABLISH': ('ESTAB', _('μ μ ')),
# 'PARTIALLY': ('PART', _('μΌλΆκ°μ ')),
# 'FULLY': ('FULL', _('μ λΆκ°μ ')),
# }
. Output only the next line. | max_length=8, choices=get_choices(RULE_TYPE)) |
Here is a snippet: <|code_start|> def __str__(self):
first = self.rules.first()
if first:
return first.name
return _('(λΉμ΄μμ)')
def get_absolute_url(self):
return '/usc/rule/{}'.format(self.slug)
class Rule(models.Model):
"""
κ·μ .
"""
rule_set = models.ForeignKey(
RuleSet,
related_name='rules',
verbose_name=_('κ·μ μΈνΈ'))
name = models.CharField(
_('κ·μ λͺ
'),
max_length=64)
rule_type = models.CharField(
_('μ’
λ₯'),
max_length=8, choices=get_choices(RULE_TYPE))
@property
def rule_type_v(self):
<|code_end|>
. Write the next line using the current file imports:
from django.db import models
from django.utils.translation import ugettext as _
from apps.rule.const import get_choices, get_verbose, RULE_TYPE, CHAPTER_TYPE, REVISION_TYPE
from itertools import chain
and context from other files:
# Path: apps/rule/const.py
# def get_choices(type_set):
# return list(type_set.values())
#
# def get_verbose(type_set, name):
# value_dict = dict(list(type_set.values()))
# return value_dict[name]
#
# RULE_TYPE = {
# 'CONSTITUTION': ('CONST', _('νμΉ')),
# 'BYLAW': ('BYLAW', _('μΈμΉ')),
# 'RULE': ('RULE', _('κ·μΉ')),
# 'ETC': ('ETC', _('κΈ°νκ·μ ')),
# }
#
# CHAPTER_TYPE = {
# 'PREAMBLE': ('PRE', _('μ λ¬Έ')),
# 'CHAPTER': ('CHAP', _('μ₯')),
# 'SECTION': ('SEC', _('μ ')),
# 'SUPPLEMENT': ('SUPPL', _('λΆμΉ')),
# }
#
# REVISION_TYPE = {
# 'ESTABLISH': ('ESTAB', _('μ μ ')),
# 'PARTIALLY': ('PART', _('μΌλΆκ°μ ')),
# 'FULLY': ('FULL', _('μ λΆκ°μ ')),
# }
, which may include functions, classes, or code. Output only the next line. | return get_verbose(RULE_TYPE, self.rule_type) |
Based on the snippet: <|code_start|> class Meta:
verbose_name = _('κ·μ μΈνΈ')
verbose_name_plural = _('κ·μ μΈνΈ(λ€)')
def __str__(self):
first = self.rules.first()
if first:
return first.name
return _('(λΉμ΄μμ)')
def get_absolute_url(self):
return '/usc/rule/{}'.format(self.slug)
class Rule(models.Model):
"""
κ·μ .
"""
rule_set = models.ForeignKey(
RuleSet,
related_name='rules',
verbose_name=_('κ·μ μΈνΈ'))
name = models.CharField(
_('κ·μ λͺ
'),
max_length=64)
rule_type = models.CharField(
_('μ’
λ₯'),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.db import models
from django.utils.translation import ugettext as _
from apps.rule.const import get_choices, get_verbose, RULE_TYPE, CHAPTER_TYPE, REVISION_TYPE
from itertools import chain
and context (classes, functions, sometimes code) from other files:
# Path: apps/rule/const.py
# def get_choices(type_set):
# return list(type_set.values())
#
# def get_verbose(type_set, name):
# value_dict = dict(list(type_set.values()))
# return value_dict[name]
#
# RULE_TYPE = {
# 'CONSTITUTION': ('CONST', _('νμΉ')),
# 'BYLAW': ('BYLAW', _('μΈμΉ')),
# 'RULE': ('RULE', _('κ·μΉ')),
# 'ETC': ('ETC', _('κΈ°νκ·μ ')),
# }
#
# CHAPTER_TYPE = {
# 'PREAMBLE': ('PRE', _('μ λ¬Έ')),
# 'CHAPTER': ('CHAP', _('μ₯')),
# 'SECTION': ('SEC', _('μ ')),
# 'SUPPLEMENT': ('SUPPL', _('λΆμΉ')),
# }
#
# REVISION_TYPE = {
# 'ESTABLISH': ('ESTAB', _('μ μ ')),
# 'PARTIALLY': ('PART', _('μΌλΆκ°μ ')),
# 'FULLY': ('FULL', _('μ λΆκ°μ ')),
# }
. Output only the next line. | max_length=8, choices=get_choices(RULE_TYPE)) |
Using the snippet: <|code_start|> _('μ κ°μ μ’
λ₯'),
max_length=8, choices=get_choices(REVISION_TYPE))
@property
def revision_type_v(self):
return get_verbose(REVISION_TYPE, self.revision_type)
@revision_type_v.setter
def revision_type_verbose(self, value):
self.revision_type = value
date_resolved = models.DateField(
_('μκ²°μΌ'),
null=True, blank=True)
class Meta:
ordering = ['-date_resolved']
verbose_name = 'κ·μ '
verbose_name_plural = 'κ·μ (λ€)'
def __str__(self):
return "{} ({} {})".format(
self.name,
self.date_resolved,
get_verbose(REVISION_TYPE, self.revision_type))
@property
def d_chapters(self):
q = self.chapters.filter(parent_chapter=None)
ordering = ['PREAMBLE', 'CHAPTER', 'SECTION', 'SUPPLEMENT']
<|code_end|>
, determine the next line of code. You have imports:
from django.db import models
from django.utils.translation import ugettext as _
from apps.rule.const import get_choices, get_verbose, RULE_TYPE, CHAPTER_TYPE, REVISION_TYPE
from itertools import chain
and context (class names, function names, or code) available:
# Path: apps/rule/const.py
# def get_choices(type_set):
# return list(type_set.values())
#
# def get_verbose(type_set, name):
# value_dict = dict(list(type_set.values()))
# return value_dict[name]
#
# RULE_TYPE = {
# 'CONSTITUTION': ('CONST', _('νμΉ')),
# 'BYLAW': ('BYLAW', _('μΈμΉ')),
# 'RULE': ('RULE', _('κ·μΉ')),
# 'ETC': ('ETC', _('κΈ°νκ·μ ')),
# }
#
# CHAPTER_TYPE = {
# 'PREAMBLE': ('PRE', _('μ λ¬Έ')),
# 'CHAPTER': ('CHAP', _('μ₯')),
# 'SECTION': ('SEC', _('μ ')),
# 'SUPPLEMENT': ('SUPPL', _('λΆμΉ')),
# }
#
# REVISION_TYPE = {
# 'ESTABLISH': ('ESTAB', _('μ μ ')),
# 'PARTIALLY': ('PART', _('μΌλΆκ°μ ')),
# 'FULLY': ('FULL', _('μ λΆκ°μ ')),
# }
. Output only the next line. | chapter_type_list = [CHAPTER_TYPE[chapter_type][0] for chapter_type in ordering] |
Given the code snippet: <|code_start|>
class Rule(models.Model):
"""
κ·μ .
"""
rule_set = models.ForeignKey(
RuleSet,
related_name='rules',
verbose_name=_('κ·μ μΈνΈ'))
name = models.CharField(
_('κ·μ λͺ
'),
max_length=64)
rule_type = models.CharField(
_('μ’
λ₯'),
max_length=8, choices=get_choices(RULE_TYPE))
@property
def rule_type_v(self):
return get_verbose(RULE_TYPE, self.rule_type)
@rule_type_v.setter
def rule_type_verbose(self, value):
self.rule_type = value
revision_type = models.CharField(
_('μ κ°μ μ’
λ₯'),
<|code_end|>
, generate the next line using the imports in this file:
from django.db import models
from django.utils.translation import ugettext as _
from apps.rule.const import get_choices, get_verbose, RULE_TYPE, CHAPTER_TYPE, REVISION_TYPE
from itertools import chain
and context (functions, classes, or occasionally code) from other files:
# Path: apps/rule/const.py
# def get_choices(type_set):
# return list(type_set.values())
#
# def get_verbose(type_set, name):
# value_dict = dict(list(type_set.values()))
# return value_dict[name]
#
# RULE_TYPE = {
# 'CONSTITUTION': ('CONST', _('νμΉ')),
# 'BYLAW': ('BYLAW', _('μΈμΉ')),
# 'RULE': ('RULE', _('κ·μΉ')),
# 'ETC': ('ETC', _('κΈ°νκ·μ ')),
# }
#
# CHAPTER_TYPE = {
# 'PREAMBLE': ('PRE', _('μ λ¬Έ')),
# 'CHAPTER': ('CHAP', _('μ₯')),
# 'SECTION': ('SEC', _('μ ')),
# 'SUPPLEMENT': ('SUPPL', _('λΆμΉ')),
# }
#
# REVISION_TYPE = {
# 'ESTABLISH': ('ESTAB', _('μ μ ')),
# 'PARTIALLY': ('PART', _('μΌλΆκ°μ ')),
# 'FULLY': ('FULL', _('μ λΆκ°μ ')),
# }
. Output only the next line. | max_length=8, choices=get_choices(REVISION_TYPE)) |
Given the code snippet: <|code_start|>
router = routers.SimpleRouter()
# PostViewSet
router.register(
prefix=r'posts',
base_name='post',
<|code_end|>
, generate the next line using the imports in this file:
from rest_framework import routers
from apps.board.viewsets import PostViewSet
and context (functions, classes, or occasionally code) from other files:
# Path: apps/board/viewsets.py
# class PostViewSet(
# mixins.ListModelMixin,
# mixins.RetrieveModelMixin,
# mixins.CreateModelMixin,
# viewsets.GenericViewSet
# ):
#
# permission_classes = (
# permissions.IsAdminUser,
# )
# pagination_class = PostPagination
#
# def get_queryset(self):
# role = self.request.query_params.get('role', None)
# search = self.request.query_params.get('search', None)
# posts = Post.objects.filter(is_deleted=False)
#
# # remove not permitted posts
# posts_copy = posts.all()
# for post in posts:
# if not post.is_permitted(self.request.user, PERM_READ):
# posts_copy = posts_copy.exclude(post.pk)
# posts = posts_copy
#
# if role is not None:
# posts = posts.filter(board__role__exact=role)
# if search is not None:
# posts = posts.filter(
# Q(title__icontains=search) |
# Q(content__icontains=search)
# )
# return posts
#
# def get_serializer_class(self):
# if self.action == 'create':
# return CreatePostSerializer
# elif self.action == 'retrive':
# return RetrivePostSerializer
# else:
# return PostSerializer
. Output only the next line. | viewset=PostViewSet, |
Predict the next line for this snippet: <|code_start|>def vote_available(user):
if is_vote_period():
return is_freshman(user)
else:
return is_tester(user)
def is_freshman(user):
if not user.is_authenticated or not hasattr(user, 'portal_info'):
return False
portal_info = user.portal_info
return portal_info.ku_std_no[:4] == "2019" and portal_info.ku_acad_prog == "νμ¬"
def is_tester(user):
if not user.is_authenticated or not hasattr(user, 'portal_info'):
return False
return user.portal_info.ku_std_no in (
"20110208",
"20180379",
"20180419",
"20180058",
"20180634",
"20180154",
)
def is_vote_period():
<|code_end|>
with the help of current file imports:
from django.utils import timezone
from .models.vote import VotePolicy
and context from other files:
# Path: apps/ot/models/vote.py
# class VotePolicy(models.Model):
# """
# ν¬ν μμ/μ’
λ£ μκ°, μλΉμ€/ν
μ€νΈ μ¬λΆ λ±μ μ½λλ°°ν¬ μμ΄ μμ νλλ‘ dbμ μ μ₯
# """
# is_test = models.BooleanField()
# start = models.DateTimeField()
# end = models.DateTimeField()
, which may contain function names, class names, or code. Output only the next line. | if VotePolicy.objects.count() == 0: |
Predict the next line after this snippet: <|code_start|>
class ClubListView(ListView):
template_name = 'club_list.html'
context_object_name = 'clubs'
def get_queryset(self):
if 'is_band' in self.request.GET:
if self.request.GET['is_band'] == '0':
<|code_end|>
using the current file's imports:
from django.views.generic import ListView, DetailView
from django.http import Http404
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from ..models.club import Club
from ..util import vote_available
and any relevant context from other files:
# Path: apps/ot/models/club.py
# class Club(models.Model):
# """
# λμ리 μ 보
# """
#
# class Meta:
# verbose_name = 'λμ리 μ 보'
# verbose_name_plural = 'λμ리 μ 보(λ€)'
#
# def __str__(self):
# return str(self.name)
#
# name = models.CharField(
# max_length=63,
# verbose_name='λμ리 μ΄λ¦',
# )
# pages = models.TextField(
# blank=True,
# verbose_name='λμ리 νμ΄μ€λΆ/μ νλΈ νμ΄μ§ μ£Όμ',
# )
# one_line_intro = models.TextField(
# blank=True,
# verbose_name='λμ리 ν μ€ μκ°(λ©μΈ νλ©΄)',
# )
# intro = models.TextField(
# verbose_name='λμ리 μκ°',
# )
# is_band = models.BooleanField(
# default=False,
# verbose_name='λ°΄λ μ¬λΆ',
# )
#
# video_url1 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #1',
# )
# video_url2 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #2',
# )
#
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
. Output only the next line. | queryset = Club.objects.filter(is_band=False) |
Here is a snippet: <|code_start|>
def get_queryset(self):
if 'is_band' in self.request.GET:
if self.request.GET['is_band'] == '0':
queryset = Club.objects.filter(is_band=False)
elif self.request.GET['is_band'] == '1':
queryset = Club.objects.filter(is_band=True)
else:
raise Http404
else:
queryset = Club.objects.all()
sorted_queryset = list()
for pk in [20, 18, 15, 17, 9, 21, 16, 8, 23, 13, 11, 10, 14]:
sorted_queryset.append(queryset.get(pk=pk))
return sorted_queryset
# return queryset.order_by('?')
class ClubDetailView(DetailView):
template_name = 'club_detail.html'
model = Club
def get_context_data(self, **kwargs):
context = super(ClubDetailView, self).get_context_data(**kwargs)
user = self.request.user
<|code_end|>
. Write the next line using the current file imports:
from django.views.generic import ListView, DetailView
from django.http import Http404
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from ..models.club import Club
from ..util import vote_available
and context from other files:
# Path: apps/ot/models/club.py
# class Club(models.Model):
# """
# λμ리 μ 보
# """
#
# class Meta:
# verbose_name = 'λμ리 μ 보'
# verbose_name_plural = 'λμ리 μ 보(λ€)'
#
# def __str__(self):
# return str(self.name)
#
# name = models.CharField(
# max_length=63,
# verbose_name='λμ리 μ΄λ¦',
# )
# pages = models.TextField(
# blank=True,
# verbose_name='λμ리 νμ΄μ€λΆ/μ νλΈ νμ΄μ§ μ£Όμ',
# )
# one_line_intro = models.TextField(
# blank=True,
# verbose_name='λμ리 ν μ€ μκ°(λ©μΈ νλ©΄)',
# )
# intro = models.TextField(
# verbose_name='λμ리 μκ°',
# )
# is_band = models.BooleanField(
# default=False,
# verbose_name='λ°΄λ μ¬λΆ',
# )
#
# video_url1 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #1',
# )
# video_url2 = models.CharField(
# blank=True,
# max_length=63,
# verbose_name='λμ리 μκ° λΉλμ€ μ£Όμ #2',
# )
#
# Path: apps/ot/util.py
# def vote_available(user):
# if is_vote_period():
# return is_freshman(user)
# else:
# return is_tester(user)
, which may include functions, classes, or code. Output only the next line. | available = vote_available(user) |
Predict the next line for this snippet: <|code_start|> self.index_regex = index_regex
self.file_handler = self._open(path)
self.offset_dict = self.file_handler.offset_dict
def close(self):
"""Close the internal file handler."""
self.file_handler.close()
def _open(self, path_or_file):
"""
Open a file like object resp. a wrapper for a file like object.
Arguments:
path (str): path to the mzml file
Returns:
file_handler: instance of
:py:class:`~pymzml.file_classes.standardGzip.StandardGzip`,
:py:class:`~pymzml.file_classes.indexedGzip.IndexedGzip` or
:py:class:`~pymzml.file_classes.standardMzml.StandardMzml`,
based on the file ending of 'path'
"""
if isinstance(path_or_file, BytesIO):
return bytesMzml.BytesMzml(
path_or_file, self.encoding, self.build_index_from_scratch
)
if path_or_file.endswith(".gz"):
if self._indexed_gzip(path_or_file):
return indexedGzip.IndexedGzip(path_or_file, self.encoding)
else:
<|code_end|>
with the help of current file imports:
from io import BytesIO
from pymzml.file_classes import indexedGzip, standardGzip, standardMzml, bytesMzml
from pymzml.utils import GSGR
and context from other files:
# Path: pymzml/file_classes/standardGzip.py
# class StandardGzip(object):
# def __init__(self, path, encoding):
# def close(self):
# def _build_index(self):
# def read(self, size=-1):
# def __getitem__(self, identifier):
#
# Path: pymzml/file_classes/standardMzml.py
# class StandardMzml(object):
# def __init__(self, path, encoding, build_index_from_scratch=False, index_regex=None):
# def get_binary_file_handler(self):
# def get_file_handler(self, encoding):
# def __getitem__(self, identifier):
# def _binary_search(self, target_index):
# def _build_index(self, from_scratch=False):
# def _build_index_from_scratch(self, seeker):
# def get_data_indices(fh, chunksize=8192, lookback_size=100):
# def _interpol_search(self, target_index, chunk_size=8, fallback_cutoff=100):
# def _read_to_spec_end(self, seeker, chunks_to_read=8):
# def _read_extremes(self):
# def _search_linear(self, seeker, index, chunk_size=8):
# def _search_string_identifier(self, search_string, chunk_size=8):
# def _read_until_tag_end(self, seeker, max_search_len=12, byte_mode=False):
# def read(self, size=-1):
# def close(self):
, which may contain function names, class names, or code. Output only the next line. | return standardGzip.StandardGzip(path_or_file, self.encoding) |
Given snippet: <|code_start|> self.file_handler = self._open(path)
self.offset_dict = self.file_handler.offset_dict
def close(self):
"""Close the internal file handler."""
self.file_handler.close()
def _open(self, path_or_file):
"""
Open a file like object resp. a wrapper for a file like object.
Arguments:
path (str): path to the mzml file
Returns:
file_handler: instance of
:py:class:`~pymzml.file_classes.standardGzip.StandardGzip`,
:py:class:`~pymzml.file_classes.indexedGzip.IndexedGzip` or
:py:class:`~pymzml.file_classes.standardMzml.StandardMzml`,
based on the file ending of 'path'
"""
if isinstance(path_or_file, BytesIO):
return bytesMzml.BytesMzml(
path_or_file, self.encoding, self.build_index_from_scratch
)
if path_or_file.endswith(".gz"):
if self._indexed_gzip(path_or_file):
return indexedGzip.IndexedGzip(path_or_file, self.encoding)
else:
return standardGzip.StandardGzip(path_or_file, self.encoding)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from io import BytesIO
from pymzml.file_classes import indexedGzip, standardGzip, standardMzml, bytesMzml
from pymzml.utils import GSGR
and context:
# Path: pymzml/file_classes/standardGzip.py
# class StandardGzip(object):
# def __init__(self, path, encoding):
# def close(self):
# def _build_index(self):
# def read(self, size=-1):
# def __getitem__(self, identifier):
#
# Path: pymzml/file_classes/standardMzml.py
# class StandardMzml(object):
# def __init__(self, path, encoding, build_index_from_scratch=False, index_regex=None):
# def get_binary_file_handler(self):
# def get_file_handler(self, encoding):
# def __getitem__(self, identifier):
# def _binary_search(self, target_index):
# def _build_index(self, from_scratch=False):
# def _build_index_from_scratch(self, seeker):
# def get_data_indices(fh, chunksize=8192, lookback_size=100):
# def _interpol_search(self, target_index, chunk_size=8, fallback_cutoff=100):
# def _read_to_spec_end(self, seeker, chunks_to_read=8):
# def _read_extremes(self):
# def _search_linear(self, seeker, index, chunk_size=8):
# def _search_string_identifier(self, search_string, chunk_size=8):
# def _read_until_tag_end(self, seeker, max_search_len=12, byte_mode=False):
# def read(self, size=-1):
# def close(self):
which might include code, classes, or functions. Output only the next line. | return standardMzml.StandardMzml( |
Based on the snippet: <|code_start|> self.spec = self.Run[2548]
def test_scan_time(self):
scan_time = self.spec.scan_time_in_minutes()
scan_time2 = self.spec.scan_time_in_minutes()
self.assertIsNotNone(scan_time)
self.assertIsInstance(scan_time, float)
self.assertEqual(round(scan_time, 4), round(28.96722412109367, 4))
self.assertEqual(scan_time, scan_time2)
def test_select_precursors(self):
selected_precursor = self.spec.selected_precursors
self.assertIsInstance(selected_precursor[0], dict)
self.assertIsInstance(selected_precursor[0]["mz"], float)
self.assertIsInstance(selected_precursor[0]["i"], float)
self.assertIsInstance(selected_precursor[0]["charge"], int)
self.assertEqual(
selected_precursor, [{"mz": 443.711242675781, "i": 0.0, "charge": 2, 'precursor id': None}]
)
def test_ion_mode(self):
assert self.spec['positive scan'] is True
def test_ion_mode_non_existent(self):
assert self.spec['negative scan'] is None
@unittest.skipIf(pymzml.spec.DECON_DEP is False, "ms_deisotope was not installed")
def test_deconvolute_peaks(self):
charge = 3
test_mz = 430.313
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import os
import unittest
import pymzml
import pymzml.run as run
import test_file_paths
import numpy as np
from pymzml.spec import PROTON
and context (classes, functions, sometimes code) from other files:
# Path: pymzml/spec.py
# PROTON = 1.00727646677
. Output only the next line. | arr = np.array([(test_mz, 100), (test_mz + PROTON / charge, 49)]) |
Continue the code snippet: <|code_start|> # sys.exit(1)
if float_type == "32-bit float":
f_type = "f"
elif float_type == "64-bit float":
f_type = "d"
fmt = "{endian}{array_length}{float_type}".format(
endian="<", array_length=d_array_length, float_type=f_type
)
ret_data = unpack(fmt, dec_data)
else:
ret_data = []
return ret_data
def _decodeNumpress_to_array(self, data, compression):
"""
Decode golomb-rice encoded data (aka numpress encoded data).
Arguments:
data (str) : Encoded data string
compression (str) : Decompression algorithm to be used
(valid are 'ms-np-linear', 'ms-np-pic', 'ms-np-slof')
Returns:
array (list): Returns the unpacked data as an array of floats.
"""
result = []
comp_ms_tags = [self.calling_instance.OT[comp]["id"] for comp in compression]
data = np.frombuffer(data, dtype=np.uint8)
if "MS:1002312" in comp_ms_tags:
<|code_end|>
. Use current file imports:
import math
import re
import sys
import warnings
import xml.etree.ElementTree as ElementTree
import zlib
import numpy as np
from base64 import b64decode as b64dec
from collections import defaultdict as ddict
from functools import lru_cache
from operator import itemgetter as itemgetter
from struct import unpack
from ms_deisotope.deconvolution import deconvolute_peaks
from ms_peak_picker import simple_peak
from . import regex_patterns
from .decoder import MSDecoder
and context (classes, functions, or code) from other files:
# Path: pymzml/decoder.py
# def _decode(data, comp, d_array_length, f_type, d_type):
# def __init__(self, nb_workers=2):
# def pool_decode(self, data, callback):
# def _error_callback(self, result):
# class Decoder:
# ZE_POOL = Pool(processes=2)
. Output only the next line. | result = MSDecoder.decode_linear(data) |
Continue the code snippet: <|code_start|> """Remove all references."""
self.left = None
self.right = None
self.key = None
self.value = None
def height(node):
return node.balance if node is not None else -1
def jsw_single(root, direction):
other_side = 1 - direction
save = root[other_side]
root[other_side] = save[direction]
save[direction] = root
rlh = height(root.left)
rrh = height(root.right)
slh = height(save[other_side])
root.balance = max(rlh, rrh) + 1
save.balance = max(slh, root.balance) + 1
return save
def jsw_double(root, direction):
other_side = 1 - direction
root[other_side] = jsw_single(root[other_side], other_side)
return jsw_single(root, direction)
<|code_end|>
. Use current file imports:
from .abctree import ABCTree
from array import array
and context (classes, functions, or code) from other files:
# Path: cache-simulator/cache_model_evaluation/bintrees/abctree.py
# PYPY = hasattr(sys, 'pypy_version_info')
# class _ABCTree(object):
# class CPYTHON_ABCTree(_ABCTree):
# class PYPY_ABCTree(CPYTHON_ABCTree):
# def __repr__(self):
# def copy(self):
# def __contains__(self, key):
# def __len__(self):
# def __min__(self):
# def __max__(self):
# def __and__(self, other):
# def __or__(self, other):
# def __sub__(self, other):
# def __xor__(self, other):
# def discard(self, key):
# def is_empty(self):
# def keys(self, reverse=False):
# def __reversed__(self):
# def values(self, reverse=False):
# def items(self, reverse=False):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def __delitem__(self, key):
# def remove_items(self, keys):
# def key_slice(self, start_key, end_key, reverse=False):
# def value_slice(self, start_key, end_key, reverse=False):
# def item_slice(self, start_key, end_key, reverse=False):
# def __getstate__(self):
# def __setstate__(self, state):
# def set_default(self, key, default=None):
# def update(self, *args):
# def from_keys(cls, iterable, value=None):
# def get(self, key, default=None):
# def pop(self, key, *args):
# def prev_key(self, key):
# def succ_key(self, key):
# def floor_key(self, key):
# def ceiling_key(self, key):
# def pop_min(self):
# def pop_max(self):
# def min_key(self):
# def max_key(self):
# def nsmallest(self, n, pop=False):
# def nlargest(self, n, pop=False):
# def intersection(self, *trees):
# def union(self, *trees):
# def difference(self, *trees):
# def symmetric_difference(self, tree):
# def is_subset(self, tree):
# def is_superset(self, tree):
# def is_disjoint(self, tree):
# def _build_sets(trees):
# def _multi_tree_get(trees, key):
# def __init__(self, items=None):
# def clear(self):
# def _clear(node):
# def count(self):
# def get_value(self, key):
# def pop_item(self):
# def foreach(self, func, order=0):
# def _traverse(node):
# def min_item(self):
# def max_item(self):
# def succ_item(self, key):
# def prev_item(self, key):
# def floor_item(self, key):
# def ceiling_item(self, key):
# def iter_items(self, start_key=None, end_key=None, reverse=False):
# def _iter_items_forward(self, start_key=None, end_key=None):
# def _iter_items_backward(self, start_key=None, end_key=None):
# def _iter_items(self, left=attrgetter("left"), right=attrgetter("right"), start_key=None, end_key=None):
# def _get_in_range_func(self, start_key, end_key):
# def iter_items(self, start_key=None, end_key=None, reverse=False):
. Output only the next line. | class AVLTree(ABCTree): |
Continue the code snippet: <|code_start|> """T.is_empty() -> False if T contains any items else True"""
return self.count == 0
def keys(self, reverse=False):
"""T.keys([reverse]) -> an iterator over the keys of T, in ascending
order if reverse is True, iterate in descending order, reverse defaults
to False
"""
return (item[0] for item in self.iter_items(reverse=reverse))
__iter__ = keys
def __reversed__(self):
return self.keys(reverse=True)
def values(self, reverse=False):
"""T.values([reverse]) -> an iterator over the values of T, in ascending order
if reverse is True, iterate in descending order, reverse defaults to False
"""
return (item[1] for item in self.iter_items(reverse=reverse))
def items(self, reverse=False):
"""T.items([reverse]) -> an iterator over the (key, value) items of T,
in ascending order if reverse is True, iterate in descending order,
reverse defaults to False
"""
return self.iter_items(reverse=reverse)
def __getitem__(self, key):
"""T.__getitem__(y) <==> x[y]"""
if isinstance(key, slice):
<|code_end|>
. Use current file imports:
import sys
from .treeslice import TreeSlice
from operator import attrgetter
and context (classes, functions, or code) from other files:
# Path: cache-simulator/cache_model_evaluation/bintrees/treeslice.py
# class TreeSlice(object):
# __slots__ = ['_tree', '_start', '_stop']
#
# def __init__(self, tree, start, stop):
# self._tree = tree
# self._start = start
# self._stop = stop
#
# def __repr__(self):
# tpl = "%s({%s})" % (self._tree.__class__.__name__, '%s')
# return tpl % ", ".join( ("%r: %r" % item for item in self.items()) )
#
# def __contains__(self, key):
# if self._is_in_range(key):
# return key in self._tree
# else:
# return False
#
# def _is_in_range(self, key):
# if self._start is not None and key < self._start:
# return False
# if self._stop is not None and key >= self._stop:
# return False
# return True
#
# def __getitem__(self, key):
# if isinstance(key, slice):
# return self._sub_slice(key.start, key.stop)
# if self._is_in_range(key):
# return self._tree[key]
# else:
# raise KeyError(key)
#
# def _sub_slice(self, start, stop):
# def newstart():
# if start is None:
# return self._start
# elif self._start is None:
# return start
# else:
# return max(start, self._start)
#
# def newstop():
# if stop is None:
# return self._stop
# elif self._stop is None:
# return stop
# else:
# return min(stop, self._stop)
#
# return TreeSlice(self._tree, newstart(), newstop())
#
# def keys(self):
# return self._tree.key_slice(self._start, self._stop)
# __iter__ = keys
#
# def values(self):
# return self._tree.value_slice(self._start, self._stop)
#
# def items(self):
# return self._tree.iter_items(self._start, self._stop)
. Output only the next line. | return TreeSlice(self, key.start, key.stop)
|
Predict the next line for this snippet: <|code_start|>
class Node(object):
"""Internal object, represents a tree node."""
__slots__ = ['key', 'value', 'left', 'right']
def __init__(self, key, value):
self.key = key
self.value = value
self.left = None
self.right = None
def __getitem__(self, key):
"""N.__getitem__(key) <==> x[key], where key is 0 (left) or 1 (right)."""
return self.left if key == 0 else self.right
def __setitem__(self, key, value):
"""N.__setitem__(key, value) <==> x[key]=value, where key is 0 (left) or 1 (right)."""
if key == 0:
self.left = value
else:
self.right = value
def free(self):
"""Set references to None."""
self.left = None
self.right = None
self.value = None
self.key = None
<|code_end|>
with the help of current file imports:
from .abctree import ABCTree
and context from other files:
# Path: cache-simulator/cache_model_evaluation/bintrees/abctree.py
# PYPY = hasattr(sys, 'pypy_version_info')
# class _ABCTree(object):
# class CPYTHON_ABCTree(_ABCTree):
# class PYPY_ABCTree(CPYTHON_ABCTree):
# def __repr__(self):
# def copy(self):
# def __contains__(self, key):
# def __len__(self):
# def __min__(self):
# def __max__(self):
# def __and__(self, other):
# def __or__(self, other):
# def __sub__(self, other):
# def __xor__(self, other):
# def discard(self, key):
# def is_empty(self):
# def keys(self, reverse=False):
# def __reversed__(self):
# def values(self, reverse=False):
# def items(self, reverse=False):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def __delitem__(self, key):
# def remove_items(self, keys):
# def key_slice(self, start_key, end_key, reverse=False):
# def value_slice(self, start_key, end_key, reverse=False):
# def item_slice(self, start_key, end_key, reverse=False):
# def __getstate__(self):
# def __setstate__(self, state):
# def set_default(self, key, default=None):
# def update(self, *args):
# def from_keys(cls, iterable, value=None):
# def get(self, key, default=None):
# def pop(self, key, *args):
# def prev_key(self, key):
# def succ_key(self, key):
# def floor_key(self, key):
# def ceiling_key(self, key):
# def pop_min(self):
# def pop_max(self):
# def min_key(self):
# def max_key(self):
# def nsmallest(self, n, pop=False):
# def nlargest(self, n, pop=False):
# def intersection(self, *trees):
# def union(self, *trees):
# def difference(self, *trees):
# def symmetric_difference(self, tree):
# def is_subset(self, tree):
# def is_superset(self, tree):
# def is_disjoint(self, tree):
# def _build_sets(trees):
# def _multi_tree_get(trees, key):
# def __init__(self, items=None):
# def clear(self):
# def _clear(node):
# def count(self):
# def get_value(self, key):
# def pop_item(self):
# def foreach(self, func, order=0):
# def _traverse(node):
# def min_item(self):
# def max_item(self):
# def succ_item(self, key):
# def prev_item(self, key):
# def floor_item(self, key):
# def ceiling_item(self, key):
# def iter_items(self, start_key=None, end_key=None, reverse=False):
# def _iter_items_forward(self, start_key=None, end_key=None):
# def _iter_items_backward(self, start_key=None, end_key=None):
# def _iter_items(self, left=attrgetter("left"), right=attrgetter("right"), start_key=None, end_key=None):
# def _get_in_range_func(self, start_key, end_key):
# def iter_items(self, start_key=None, end_key=None, reverse=False):
, which may contain function names, class names, or code. Output only the next line. | class BinaryTree(ABCTree): |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test extensions"""
CREATE_STMT = "CREATE EXTENSION pg_trgm"
TRGM_COMMENT = "text similarity measurement and index searching based on " \
"trigrams"
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, sometimes code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class ExtensionToMapTestCase(DatabaseToMapTestCase): |
Predict the next line for this snippet: <|code_start|> dbmap = self.to_map(["CREATE EXTENSION plperl"])
assert dbmap['extension plperl'] == {
'schema': 'pg_catalog', 'version': '1.0',
'description': "PL/Perl procedural language"}
assert 'language plperl' not in dbmap
def test_map_extension_schema(self):
"Map an existing extension"
VERS = self.base_version()
dbmap = self.to_map(["CREATE SCHEMA s1", CREATE_STMT + " SCHEMA s1"])
assert dbmap['extension pg_trgm'] == {
'schema': 's1', 'version': VERS, 'description': TRGM_COMMENT}
def test_map_extension_plpython3u(self):
"Test a function created with extension other than plpgsql/plperl"
# See issue #103
dbmap = self.to_map(["CREATE EXTENSION plpython3u",
"CREATE FUNCTION test() RETURNS int AS "
"'return 1' LANGUAGE plpython3u"])
assert 'extension plpython3u' in dbmap
class ExtensionToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation for input extensions"""
def test_create_extension_simple(self):
"Create a extension that didn't exist"
inmap = self.std_map()
inmap.update({'extension pg_trgm': {'schema': 'sd'}})
sql = self.to_sql(inmap)
<|code_end|>
with the help of current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may contain function names, class names, or code. Output only the next line. | assert fix_indent(sql[0]) == CREATE_STMT + " SCHEMA sd" |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test audit columns"""
CREATE_STMT = "CREATE TABLE t1 (c1 integer, c2 text)"
FUNC_SRC1 = """
BEGIN
NEW.modified_by_user = SESSION_USER;
NEW.modified_timestamp = CURRENT_TIMESTAMP;
RETURN NEW;
END"""
FUNC_SRC2 = """
BEGIN
NEW.updated = CURRENT_TIMESTAMP;
RETURN NEW;
END"""
<|code_end|>
with the help of current file imports:
import pytest
from pyrseas.testutils import AugmentToMapTestCase
and context from other files:
# Path: pyrseas/testutils.py
# class AugmentToMapTestCase(PyrseasTestCase):
#
# def to_map(self, stmts, augmap):
# """Apply an augment map and return a map of the updated database.
#
# :param stmts: list of SQL statements to execute
# :param augmap: dictionary describing the augmentations
# :return: dictionary of the updated database
# """
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# self.config_options(schemas=[], tables=[], no_owner=True,
# no_privs=True, multiple_files=False)
# db = AugmentDatabase(self.cfg)
# return db.apply(augmap)
, which may contain function names, class names, or code. Output only the next line. | class AuditColumnsTestCase(AugmentToMapTestCase): |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""Test rules"""
CREATE_TABLE_STMT = "CREATE TABLE sd.t1 (c1 integer, c2 text)"
CREATE_STMT = "CREATE RULE r1 AS ON %s TO sd.t1 DO %s"
COMMENT_STMT = "COMMENT ON RULE r1 ON sd.t1 IS 'Test rule r1'"
<|code_end|>
. Use current file imports:
(from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent)
and context including class names, function names, or small code snippets from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class RuleToMapTestCase(DatabaseToMapTestCase): |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test text search objects"""
CREATE_TSC_STMT = "CREATE TEXT SEARCH CONFIGURATION sd.tsc1 (PARSER = tsp1)"
CREATE_TSD_STMT = "CREATE TEXT SEARCH DICTIONARY sd.tsd1 (TEMPLATE = simple, "\
"stopwords = 'english')"
CREATE_TSP_STMT = "CREATE TEXT SEARCH PARSER sd.tsp1 (START = prsd_start, " \
"GETTOKEN = prsd_nexttoken, END = prsd_end, LEXTYPES = prsd_lextype, " \
"HEADLINE = prsd_headline)"
CREATE_TST_STMT = "CREATE TEXT SEARCH TEMPLATE sd.tst1 (INIT = dsimple_init, "\
"LEXIZE = dsimple_lexize)"
COMMENT_TSC_STMT = "COMMENT ON TEXT SEARCH CONFIGURATION sd.tsc1 IS " \
"'Test configuration tsc1'"
COMMENT_TSD_STMT = "COMMENT ON TEXT SEARCH DICTIONARY sd.tsd1 IS " \
"'Test dictionary tsd1'"
COMMENT_TSP_STMT = "COMMENT ON TEXT SEARCH PARSER sd.tsp1 IS " \
"'Test parser tsp1'"
COMMENT_TST_STMT = "COMMENT ON TEXT SEARCH TEMPLATE sd.tst1 IS " \
"'Test template tst1'"
<|code_end|>
. Write the next line using the current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may include functions, classes, or code. Output only the next line. | class TextSearchConfigToMapTestCase(DatabaseToMapTestCase): |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
"""Test operators"""
CREATE_STMT = "CREATE OPERATOR sd.+ (PROCEDURE = textcat, LEFTARG = text, " \
"RIGHTARG = text)"
COMMENT_STMT = "COMMENT ON OPERATOR sd.+(text, text) IS 'Test operator +'"
<|code_end|>
, predict the next line using imports from the current file:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context including class names, function names, and sometimes code from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class OperatorToMapTestCase(DatabaseToMapTestCase): |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test dbtoyaml and yamltodb using autodoc schema but I/O to/from a directory
Same as test_autodoc.py but with directory tree instead of a single YAML file.
See http://cvs.pgfoundry.org/cgi-bin/cvsweb.cgi/~checkout~/autodoc/autodoc/
regressdatabase.sql?rev=1.2
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from pyrseas.testutils import DbMigrateTestCase
and context:
# Path: pyrseas/testutils.py
# class DbMigrateTestCase(TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.srcdb = _connect_clear(TEST_DBNAME_SRC)
# cls.db = _connect_clear(TEST_DBNAME)
# progdir = os.path.abspath(os.path.dirname(__file__))
# cls.dbtoyaml = os.path.join(progdir, 'dbtoyaml.py')
# cls.yamltodb = os.path.join(progdir, 'yamltodb.py')
# cls.tmpdir = TEST_DIR
# if not os.path.exists(cls.tmpdir):
# os.mkdir(cls.tmpdir)
#
# def add_public_schema(self, db):
# db.execute("CREATE SCHEMA IF NOT EXISTS public")
# db.execute("ALTER SCHEMA public OWNER TO postgres")
# db.execute("COMMENT ON SCHEMA public IS "
# "'standard public schema'")
# db.execute("DROP SCHEMA IF EXISTS sd")
# db.conn.commit()
#
# def remove_public_schema(self, db):
# db.execute("DROP SCHEMA IF EXISTS public CASCADE")
# db.conn.commit()
#
# @classmethod
# def remove_tempfiles(cls, prefix):
# remove_temp_files(cls.tmpdir, prefix)
#
# def execute_script(self, path, scriptname):
# scriptfile = os.path.join(os.path.abspath(os.path.dirname(path)),
# scriptname)
# lines = []
# with open(scriptfile, 'r') as fd:
# lines = [line.strip() for line in fd if line != '\n' and
# not line.startswith('--')]
# self.srcdb.execute_commit(' '.join(lines))
#
# def tempfile_path(self, filename):
# return os.path.join(self.tmpdir, filename)
#
# def _db_params(self):
# args = []
# if self.db.host is not None:
# args.append("--host=%s" % self.db.host)
# if self.db.port is not None:
# args.append("--port=%d " % self.db.port)
# if self.db.user is not None:
# args.append("--username=%s" % self.db.user)
# return args
#
# def lines(self, the_file):
# with open(the_file) as f:
# lines = f.readlines()
# return lines
#
# def run_pg_dump(self, dumpfile, srcdb=False, incldata=False):
# """Run pg_dump using special scripts or directly (on Travis-CI)
#
# :param dumpfile: path to the pg_dump output file
# :param srcdb: run against source database
# """
# if TRAVIS:
# pg_dumpver = 'pg_dump'
# else:
# v = self.srcdb._version
# pg_dumpver = "pg_dump%d%d" % (v // 10000,
# (v - v // 10000 * 10000) // 100)
# if sys.platform == 'win32':
# pg_dumpver += '.bat'
# dbname = self.srcdb.name if srcdb else self.db.name
# args = [pg_dumpver]
# args.extend(self._db_params())
# if not incldata:
# args.extend(['-s'])
# args.extend(['-f', dumpfile, dbname])
# subprocess.check_call(args)
#
# def invoke(self, args):
# args.insert(0, sys.executable)
# path = [os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))]
# path.append(os.path.abspath(os.path.join(os.path.dirname(
# yaml.__file__), '..')))
# env = os.environ.copy()
# env.update({'PYTHONPATH': os.pathsep.join(path)})
# subprocess.check_call(args, env=env)
#
# def create_yaml(self, yamlfile='', srcdb=False):
# dbname = self.srcdb.name if srcdb else self.db.name
# args = [self.dbtoyaml]
# args.extend(self._db_params())
# if yamlfile:
# args.extend(['-o', yamlfile, dbname])
# else:
# args.extend(['-r', TEST_DIR, '-m', dbname])
# self.invoke(args)
#
# def migrate_target(self, yamlfile, outfile):
# args = [self.yamltodb]
# args.extend(self._db_params())
# if yamlfile:
# args.extend(['-u', '-o', outfile, self.db.name, yamlfile])
# else:
# args.extend(['-u', '-o', outfile, '-r', TEST_DIR, '-m',
# self.db.name])
# self.invoke(args)
which might include code, classes, or functions. Output only the next line. | class AutodocTestCase(DbMigrateTestCase): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test loading of data from and into static tables"""
CREATE_STMT = "CREATE TABLE t1 (c1 integer, c2 text)"
FILE_PATH = 'table.t1.data'
TABLE_DATA = [(1, 'abc'), (2, 'def'), (3, 'ghi')]
TABLE_DATA2 = [(1, 'abc', 'row 1'), (3, 'ghi', 'row 2'), (2, 'def', 'row 3'),
(3, 'def', 'row 4')]
<|code_end|>
, generate the next line using the imports in this file:
import os
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase
and context (functions, classes, or occasionally code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
. Output only the next line. | class StaticTableToMapTestCase(DatabaseToMapTestCase): |
Using the snippet: <|code_start|> recs = []
with open(os.path.join(self.cfg['files']['data_path'],
"schema.sd", FILE_PATH)) as f:
for line in f:
(c1, c2) = line.split(',')
recs.append((int(c1), c2.rstrip()))
assert recs == TABLE_DATA
def test_copy_static_table_pk(self):
"Copy a table that has a primary key"
self.db.execute("CREATE TABLE t1 (c1 integer, c2 char(3), c3 text,"
"PRIMARY KEY (c2, c1))")
for row in TABLE_DATA2:
self.db.execute("INSERT INTO t1 VALUES (%s, %s, %s)", row)
cfg = {'datacopy': {'schema sd': ['t1']}}
dbmap = self.to_map([], config=cfg)
assert dbmap['schema sd']['table t1'] == {
'columns': [{'c1': {'type': 'integer', 'not_null': True}},
{'c2': {'type': 'character(3)', 'not_null': True}},
{'c3': {'type': 'text'}}],
'primary_key': {'t1_pkey': {'columns': ['c2', 'c1']}}}
recs = []
with open(os.path.join(self.cfg['files']['data_path'],
"schema.sd", FILE_PATH)) as f:
for line in f:
(c1, c2, c3) = line.split(',')
recs.append((int(c1), c2, c3.rstrip()))
assert recs == sorted(TABLE_DATA2)
<|code_end|>
, determine the next line of code. You have imports:
import os
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase
and context (class names, function names, or code) available:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
. Output only the next line. | class StaticTableToSqlTestCase(InputMapToSqlTestCase): |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
"""Test operator families"""
CREATE_STMT = "CREATE OPERATOR FAMILY sd.of1 USING btree"
COMMENT_STMT = "COMMENT ON OPERATOR FAMILY sd.of1 USING btree IS " \
"'Test operator family of1'"
<|code_end|>
, predict the next line using imports from the current file:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context including class names, function names, and sometimes code from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class OperatorFamilyToMapTestCase(DatabaseToMapTestCase): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test operator families"""
CREATE_STMT = "CREATE OPERATOR FAMILY sd.of1 USING btree"
COMMENT_STMT = "COMMENT ON OPERATOR FAMILY sd.of1 USING btree IS " \
"'Test operator family of1'"
class OperatorFamilyToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing operators"""
superuser = True
def test_map_operfam(self):
"Map an operator family"
dbmap = self.to_map([CREATE_STMT])
assert dbmap['schema sd']['operator family of1 using btree'] == {}
def test_map_operfam_comment(self):
"Map an operator family comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema sd']['operator family of1 using btree'][
'description'] == 'Test operator family of1'
<|code_end|>
. Use current file imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, or code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class OperatorFamilyToSqlTestCase(InputMapToSqlTestCase): |
Predict the next line for this snippet: <|code_start|>CREATE_STMT = "CREATE OPERATOR FAMILY sd.of1 USING btree"
COMMENT_STMT = "COMMENT ON OPERATOR FAMILY sd.of1 USING btree IS " \
"'Test operator family of1'"
class OperatorFamilyToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing operators"""
superuser = True
def test_map_operfam(self):
"Map an operator family"
dbmap = self.to_map([CREATE_STMT])
assert dbmap['schema sd']['operator family of1 using btree'] == {}
def test_map_operfam_comment(self):
"Map an operator family comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema sd']['operator family of1 using btree'][
'description'] == 'Test operator family of1'
class OperatorFamilyToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input operators"""
def test_create_operfam(self):
"Create an operator family"
inmap = self.std_map()
inmap['schema sd'].update({'operator family of1 using btree': {}})
sql = self.to_sql(inmap)
<|code_end|>
with the help of current file imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may contain function names, class names, or code. Output only the next line. | assert fix_indent(sql[0]) == CREATE_STMT |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""Test materialized views"""
CREATE_TABLE = "CREATE TABLE t1 (c1 INTEGER, c2 TEXT, c3 INTEGER)"
VIEW_STMT = "SELECT c1, c3 * 2 AS mc3 FROM t1"
CREATE_STMT = "CREATE MATERIALIZED VIEW sd.mv1 AS " + VIEW_STMT
COMMENT_STMT = "COMMENT ON MATERIALIZED VIEW sd.mv1 IS 'Test matview mv1'"
VIEW_DEFN = " SELECT t1.c1,\n t1.c3 * 2 AS mc3\n FROM sd.t1;"
<|code_end|>
. Use current file imports:
(import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent)
and context including class names, function names, or small code snippets from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class MatViewToMapTestCase(DatabaseToMapTestCase): |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""Test object privileges
The majority of other tests exclude access privileges. These
explicitly request it. In addition, the roles 'user1' and 'user2'
are created if they don't exist.
"""
CREATE_TABLE = "CREATE TABLE t1 (c1 integer, c2 text)"
SOURCE1 = "SELECT 'dummy'::text"
CREATE_FUNC = "CREATE FUNCTION f1() RETURNS text LANGUAGE sql IMMUTABLE AS " \
"$_$%s$_$" % SOURCE1
CREATE_FDW = "CREATE FOREIGN DATA WRAPPER fdw1"
CREATE_FS = "CREATE SERVER fs1 FOREIGN DATA WRAPPER fdw1"
GRANT_SELECT = "GRANT SELECT ON TABLE sd.t1 TO %s"
GRANT_INSUPD = "GRANT INSERT, UPDATE ON TABLE sd.t1 TO %s"
def check_extra_users(db):
"Check existence of extra test users"
for user in ['user1', 'user2']:
row = db.fetchone("SELECT 1 FROM pg_roles WHERE rolname = %s", (user,))
if row is None:
db.execute_commit("CREATE ROLE %s" % user)
<|code_end|>
. Use current file imports:
(from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase)
and context including class names, function names, or small code snippets from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
. Output only the next line. | class PrivilegeToMapTestCase(DatabaseToMapTestCase): |
Given the code snippet: <|code_start|> {'PUBLIC': ['usage']}]})
assert dbmap['foreign data wrapper fdw1'] == expmap
def test_map_server(self):
"Map a foreign server with a GRANT"
stmts = [CREATE_FDW, CREATE_FS,
"GRANT USAGE ON FOREIGN SERVER fs1 TO user1"]
dbmap = self.to_map(stmts, no_privs=False, superuser=True)
expmap = {'privileges': [{self.db.user: ['usage']},
{'user1': ['usage']}]}
assert dbmap['foreign data wrapper fdw1']['server fs1'] == expmap
def test_map_foreign_table(self):
"Map a foreign table with various GRANTs"
if self.db.version < 90100:
self.skipTest('Only available on PG 9.1')
stmts = [CREATE_FDW, CREATE_FS,
"CREATE FOREIGN TABLE ft1 (c1 integer, c2 text) SERVER fs1",
"GRANT SELECT ON ft1 TO PUBLIC",
"GRANT INSERT, UPDATE ON ft1 TO user1"]
dbmap = self.to_map(stmts, no_privs=False, superuser=True)
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}], 'server': 'fs1',
'privileges': [{self.db.user: ['all']},
{'PUBLIC': ['select']},
{'user1': ['insert', 'update']}]}
assert dbmap['schema sd']['foreign table ft1'] == \
self.sort_privileges(expmap)
<|code_end|>
, generate the next line using the imports in this file:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase
and context (functions, classes, or occasionally code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
. Output only the next line. | class PrivilegeToSqlTestCase(InputMapToSqlTestCase): |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""Test indexes"""
CREATE_TABLE_STMT = "CREATE TABLE t1 (c1 integer, c2 text)"
CREATE_STMT = "CREATE INDEX t1_idx ON t1 (c1)"
COMMENT_STMT = "COMMENT ON INDEX sd.t1_idx IS 'Test index t1_idx'"
<|code_end|>
. Use current file imports:
(import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent)
and context including class names, function names, or small code snippets from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class IndexToMapTestCase(DatabaseToMapTestCase): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test text search objects"""
CREATE_FDW_STMT = "CREATE FOREIGN DATA WRAPPER fdw1"
CREATE_FS_STMT = "CREATE SERVER fs1 FOREIGN DATA WRAPPER fdw1"
CREATE_UM_STMT = "CREATE USER MAPPING FOR PUBLIC SERVER fs1"
CREATE_FT_STMT = "CREATE FOREIGN TABLE sd.ft1 (c1 integer, c2 text) SERVER fs1"
DROP_FDW_STMT = "DROP FOREIGN DATA WRAPPER IF EXISTS fdw1"
DROP_FS_STMT = "DROP SERVER IF EXISTS fs1"
DROP_UM_STMT = "DROP USER MAPPING IF EXISTS FOR PUBLIC SERVER fs1"
COMMENT_FDW_STMT = "COMMENT ON FOREIGN DATA WRAPPER fdw1 IS " \
"'Test foreign data wrapper fdw1'"
COMMENT_FS_STMT = "COMMENT ON SERVER fs1 IS 'Test server fs1'"
<|code_end|>
. Use current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, or code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class ForeignDataWrapperToMapTestCase(DatabaseToMapTestCase): |
Continue the code snippet: <|code_start|> """Test mapping of existing foreign data wrappers"""
superuser = True
def test_map_fd_wrapper(self):
"Map an existing foreign data wrapper"
dbmap = self.to_map([CREATE_FDW_STMT])
assert dbmap['foreign data wrapper fdw1'] == {}
def test_map_wrapper_validator(self):
"Map a foreign data wrapper with a validator function"
dbmap = self.to_map(["CREATE FOREIGN DATA WRAPPER fdw1 "
"VALIDATOR postgresql_fdw_validator"])
assert dbmap['foreign data wrapper fdw1'] == {
'validator': 'postgresql_fdw_validator'}
def test_map_wrapper_options(self):
"Map a foreign data wrapper with options"
dbmap = self.to_map(["CREATE FOREIGN DATA WRAPPER fdw1 "
"OPTIONS (debug 'true')"])
assert dbmap['foreign data wrapper fdw1'] == {
'options': ['debug=true']}
def test_map_fd_wrapper_comment(self):
"Map a foreign data wrapper with a comment"
dbmap = self.to_map([CREATE_FDW_STMT, COMMENT_FDW_STMT])
assert dbmap['foreign data wrapper fdw1']['description'] == \
'Test foreign data wrapper fdw1'
<|code_end|>
. Use current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, or code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class ForeignDataWrapperToSqlTestCase(InputMapToSqlTestCase): |
Given the code snippet: <|code_start|>
def test_map_wrapper_validator(self):
"Map a foreign data wrapper with a validator function"
dbmap = self.to_map(["CREATE FOREIGN DATA WRAPPER fdw1 "
"VALIDATOR postgresql_fdw_validator"])
assert dbmap['foreign data wrapper fdw1'] == {
'validator': 'postgresql_fdw_validator'}
def test_map_wrapper_options(self):
"Map a foreign data wrapper with options"
dbmap = self.to_map(["CREATE FOREIGN DATA WRAPPER fdw1 "
"OPTIONS (debug 'true')"])
assert dbmap['foreign data wrapper fdw1'] == {
'options': ['debug=true']}
def test_map_fd_wrapper_comment(self):
"Map a foreign data wrapper with a comment"
dbmap = self.to_map([CREATE_FDW_STMT, COMMENT_FDW_STMT])
assert dbmap['foreign data wrapper fdw1']['description'] == \
'Test foreign data wrapper fdw1'
class ForeignDataWrapperToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation for input foreign data wrappers"""
def test_create_fd_wrapper(self):
"Create a foreign data wrapper that didn't exist"
inmap = self.std_map()
inmap.update({'foreign data wrapper fdw1': {}})
sql = self.to_sql(inmap)
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (functions, classes, or occasionally code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | assert fix_indent(sql[0]) == CREATE_FDW_STMT |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test sequences"""
CREATE_STMT = "CREATE SEQUENCE seq1"
CREATE_STMT_FULL = "CREATE SEQUENCE sd.seq1 %sSTART WITH 1 INCREMENT BY 1 " \
"NO MINVALUE NO MAXVALUE CACHE 1"
COMMENT_STMT = "COMMENT ON SEQUENCE sd.seq1 IS 'Test sequence seq1'"
<|code_end|>
using the current file's imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and any relevant context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class SequenceToMapTestCase(DatabaseToMapTestCase): |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test sequences"""
CREATE_STMT = "CREATE SEQUENCE seq1"
CREATE_STMT_FULL = "CREATE SEQUENCE sd.seq1 %sSTART WITH 1 INCREMENT BY 1 " \
"NO MINVALUE NO MAXVALUE CACHE 1"
COMMENT_STMT = "COMMENT ON SEQUENCE sd.seq1 IS 'Test sequence seq1'"
class SequenceToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of created sequences"""
def test_map_sequence_simple(self):
"Map a created sequence"
dbmap = self.to_map([CREATE_STMT])
expmap = {'start_value': 1, 'increment_by': 1, 'max_value': None,
'min_value': None, 'cache_value': 1}
assert dbmap['schema sd']['sequence seq1'] == expmap
def test_map_sequence_comment(self):
"Map a sequence with a comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema sd']['sequence seq1']['description'] == \
'Test sequence seq1'
<|code_end|>
. Write the next line using the current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may include functions, classes, or code. Output only the next line. | class SequenceToSqlTestCase(InputMapToSqlTestCase): |
Using the snippet: <|code_start|>
class SequenceToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of created sequences"""
def test_map_sequence_simple(self):
"Map a created sequence"
dbmap = self.to_map([CREATE_STMT])
expmap = {'start_value': 1, 'increment_by': 1, 'max_value': None,
'min_value': None, 'cache_value': 1}
assert dbmap['schema sd']['sequence seq1'] == expmap
def test_map_sequence_comment(self):
"Map a sequence with a comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema sd']['sequence seq1']['description'] == \
'Test sequence seq1'
class SequenceToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input sequences"""
def test_create_sequence_simple(self):
"Create a sequence"
inmap = self.std_map()
inmap['schema sd'].update({'sequence seq1': {
'start_value': 1, 'increment_by': 1, 'max_value': None,
'min_value': None, 'cache_value': 1, 'data_type': 'integer'}})
sql = self.to_sql(inmap)
mod = 'AS integer ' if self.db.version >= 100000 else ''
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (class names, function names, or code) available:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | assert fix_indent(sql[0]) == CREATE_STMT_FULL % mod |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test functions"""
SOURCE1 = "SELECT 'dummy'::text"
CREATE_STMT1 = "CREATE FUNCTION sd.f1() RETURNS text LANGUAGE sql IMMUTABLE " \
"AS $_$%s$_$" % SOURCE1
SOURCE2 = "SELECT GREATEST($1, $2)"
CREATE_STMT2 = "CREATE FUNCTION sd.f1(integer, integer) RETURNS integer " \
"LANGUAGE sql IMMUTABLE AS $_$%s$_$" % SOURCE2
COMMENT_STMT = "COMMENT ON FUNCTION sd.f1(integer, integer) IS " \
"'Test function f1'"
SOURCE3 = "SELECT * FROM generate_series($1, $2)"
CREATE_STMT3 = "CREATE FUNCTION f2(integer, integer) RETURNS SETOF integer " \
"ROWS 20 LANGUAGE sql IMMUTABLE AS $_$%s$_$" % SOURCE3
SOURCE4 = "SELECT $1 + $2"
CREATE_STMT4 = "CREATE FUNCTION fadd(integer, integer) RETURNS integer " \
"LANGUAGE sql IMMUTABLE AS $_$%s$_$" % SOURCE4
SOURCE5 = "SELECT $1 - $2"
CREATE_STMT5 = "CREATE FUNCTION fsub(integer, integer) RETURNS integer " \
"LANGUAGE sql IMMUTABLE AS $_$%s$_$" % SOURCE5
<|code_end|>
. Write the next line using the current file imports:
import pytest
from inspect import cleandoc
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may include functions, classes, or code. Output only the next line. | class FunctionToMapTestCase(DatabaseToMapTestCase): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test triggers"""
FUNC_SRC = "BEGIN NEW.c3 := CURRENT_DATE; RETURN NEW; END"
FUNC_INSTEAD_SRC = "BEGIN INSERT INTO t1 VALUES (NEW.c1, NEW.c2, now()); " \
"RETURN NULL; END"
CREATE_TABLE_STMT = "CREATE TABLE sd.t1 (c1 integer, c2 text, " \
"c3 date)"
CREATE_TABLE_STMT2 = "CREATE TABLE t1 (c1 integer, c2 text, " \
"c3 text, tsidx tsvector)"
CREATE_FUNC_STMT = "CREATE FUNCTION sd.f1() RETURNS trigger LANGUAGE plpgsql" \
" AS $_$%s$_$" % FUNC_SRC
CREATE_STMT = "CREATE TRIGGER tr1 BEFORE INSERT OR UPDATE ON sd.t1 " \
"FOR EACH ROW EXECUTE PROCEDURE sd.f1()"
COMMENT_STMT = "COMMENT ON TRIGGER tr1 ON sd.t1 IS 'Test trigger tr1'"
<|code_end|>
. Use current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, or code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class TriggerToMapTestCase(DatabaseToMapTestCase): |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test tables"""
CREATE_STMT = "CREATE TABLE sd.t1 (c1 integer, c2 text)"
COMMENT_STMT = "COMMENT ON TABLE sd.t1 IS 'Test table t1'"
CREATE_STOR_PARAMS = CREATE_STMT + \
" WITH (fillfactor=90, autovacuum_enabled=false)"
<|code_end|>
using the current file's imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and any relevant context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class TableToMapTestCase(DatabaseToMapTestCase): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test domains"""
CREATEFUNC_STMT = ("CREATE FUNCTION sd.dc1(integer) RETURNS bool LANGUAGE sql "
"IMMUTABLE AS $_$select true$_$")
CREATE_STMT = "CREATE DOMAIN sd.d1 AS integer"
DROP_STMT = "DROP DOMAIN IF EXISTS d1"
COMMENT_STMT = "COMMENT ON DOMAIN d1 IS 'Test domain d1'"
<|code_end|>
. Use current file imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, or code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class DomainToMapTestCase(DatabaseToMapTestCase): |
Based on the snippet: <|code_start|> 'expression': 'sd.dc1(VALUE)',
'depends_on': ['function dc1(integer)']}}}
assert dbmap['schema sd']['domain d1'] == expmap
class DomainToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input domains"""
def test_create_domain_simple(self):
"Create a simple domain"
inmap = self.std_map()
inmap['schema sd'].update({'domain d1': {'type': 'integer'}})
sql = self.to_sql(inmap)
assert sql == [CREATE_STMT]
def test_create_domain_default(self):
"Create a domain with a DEFAULT and NOT NULL"
inmap = self.std_map()
inmap['schema sd'].update({'domain d1': {
'type': 'integer', 'not_null': True, 'default': 0}})
sql = self.to_sql(inmap)
assert sql == [CREATE_STMT + " NOT NULL DEFAULT 0"]
def test_create_domain_check(self):
"Create a domain with a CHECK constraint"
inmap = self.std_map()
inmap['schema sd'].update({'domain d1': {
'type': 'integer', 'check_constraints': {'d1_check': {
'expression': '(VALUE >= 1888)'}}}})
sql = self.to_sql(inmap)
<|code_end|>
, predict the immediate next line with the help of imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, sometimes code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | assert fix_indent(sql[0]) == CREATE_STMT |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test collations
These tests require that the locale fr_FR.utf8 (or equivalent) be installed.
"""
if sys.platform == 'win32':
COLL = 'French_France.1252'
else:
COLL = 'fr_FR.UTF-8'
CREATE_STMT = "CREATE COLLATION sd.c1 (LC_COLLATE = '%s', LC_CTYPE = '%s')" % (
COLL, COLL)
COMMENT_STMT = "COMMENT ON COLLATION sd.c1 IS 'Test collation c1'"
<|code_end|>
, generate the next line using the imports in this file:
import sys
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (functions, classes, or occasionally code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class CollationToMapTestCase(DatabaseToMapTestCase): |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test views"""
CREATE_STMT = "CREATE VIEW sd.v1 AS SELECT now()::date AS today"
CREATE_TBL = "CREATE TABLE sd.t1 (c1 integer, c2 text, c3 integer)"
CREATE_STMT2 = "CREATE VIEW sd.v1 AS SELECT c1, c3 * 2 AS c2 FROM t1"
COMMENT_STMT = "COMMENT ON VIEW sd.v1 IS 'Test view v1'"
VIEW_DEFN = " SELECT now()::date AS today;"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
which might include code, classes, or functions. Output only the next line. | class ViewToMapTestCase(DatabaseToMapTestCase): |
Based on the snippet: <|code_start|>"""Test schemas"""
CREATE_STMT = "CREATE SCHEMA s1"
COMMENT_STMT = "COMMENT ON SCHEMA s1 IS 'Test schema s1'"
class SchemaToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of created schemas"""
def test_map_schema(self):
"Map a created schema"
dbmap = self.to_map([CREATE_STMT])
assert dbmap['schema s1'] == {}
def test_map_schema_comment(self):
"Map a schema comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema s1'] == {'description': 'Test schema s1'}
def test_map_select_schema(self):
"Map a single schema when three schemas exist"
stmts = [CREATE_STMT, "CREATE SCHEMA s2", "CREATE SCHEMA s3"]
dbmap = self.to_map(stmts, schemas=['s2'])
assert 'schema s1' not in dbmap
assert dbmap['schema s2'] == {}
assert 'schema s3' not in dbmap
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase
and context (classes, functions, sometimes code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
. Output only the next line. | class SchemaToSqlTestCase(InputMapToSqlTestCase): |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test tablespaces
These tests require the existence of tablespaces ts1 and ts2.
They should be owned by the user running the tests or the user should
have been granted CREATE (or ALL) privileges on the tablespaces.
"""
CREATE_TABLE = "CREATE TABLE sd.t1 (c1 integer, c2 text) TABLESPACE ts1"
<|code_end|>
using the current file's imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and any relevant context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class ToMapTestCase(DatabaseToMapTestCase): |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test object ownership
The majority of other tests exclude owner information. These
explicitly request it.
"""
CREATE_TABLE = "CREATE TABLE sd.t1 (c1 integer, c2 text)"
SOURCE1 = "SELECT 'dummy'::text"
SOURCE2 = "SELECT $1 * $2"
CREATE_FUNC = "CREATE FUNCTION sd.f1() RETURNS text LANGUAGE sql IMMUTABLE " \
"AS $_$%s$_$" % SOURCE1
CREATE_TYPE = "CREATE TYPE sd.t1 AS (x integer, y integer)"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
which might include code, classes, or functions. Output only the next line. | class OwnerToMapTestCase(DatabaseToMapTestCase): |
Given the code snippet: <|code_start|>CREATE_TYPE = "CREATE TYPE sd.t1 AS (x integer, y integer)"
class OwnerToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of object owner information"""
def test_map_type(self):
"Map a composite type"
dbmap = self.to_map([CREATE_TYPE], no_owner=False)
expmap = {'attributes': [{'x': {'type': 'integer'}},
{'y': {'type': 'integer'}}],
'owner': self.db.user}
assert dbmap['schema sd']['type t1'] == expmap
def test_map_table(self):
"Map a table"
dbmap = self.to_map([CREATE_TABLE], no_owner=False)
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}],
'owner': self.db.user}
assert dbmap['schema sd']['table t1'] == expmap
def test_map_function(self):
"Map a function"
dbmap = self.to_map([CREATE_FUNC], no_owner=False)
expmap = {'language': 'sql', 'returns': 'text', 'owner': self.db.user,
'source': SOURCE1, 'volatility': 'immutable'}
assert dbmap['schema sd']['function f1()'] == expmap
<|code_end|>
, generate the next line using the imports in this file:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (functions, classes, or occasionally code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class OwnerToSqlTestCase(InputMapToSqlTestCase): |
Predict the next line for this snippet: <|code_start|> 'owner': self.db.user}
assert dbmap['schema sd']['type t1'] == expmap
def test_map_table(self):
"Map a table"
dbmap = self.to_map([CREATE_TABLE], no_owner=False)
expmap = {'columns': [{'c1': {'type': 'integer'}},
{'c2': {'type': 'text'}}],
'owner': self.db.user}
assert dbmap['schema sd']['table t1'] == expmap
def test_map_function(self):
"Map a function"
dbmap = self.to_map([CREATE_FUNC], no_owner=False)
expmap = {'language': 'sql', 'returns': 'text', 'owner': self.db.user,
'source': SOURCE1, 'volatility': 'immutable'}
assert dbmap['schema sd']['function f1()'] == expmap
class OwnerToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation of owner object information"""
def test_create_type(self):
"Create a composite type"
inmap = self.std_map()
inmap['schema sd'].update({'type t1': {
'attributes': [{'x': {'type': 'integer'}},
{'y': {'type': 'integer'}}],
'owner': self.db.user}})
sql = self.to_sql(inmap)
<|code_end|>
with the help of current file imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may contain function names, class names, or code. Output only the next line. | assert fix_indent(sql[0]) == CREATE_TYPE |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""Test event triggers"""
FUNC_SRC = "BEGIN RAISE NOTICE 'Command % executed', tg_tag; END"
CREATE_FUNC_STMT = "CREATE FUNCTION sd.f1() RETURNS event_trigger " \
"LANGUAGE plpgsql AS $_$%s$_$" % FUNC_SRC
CREATE_STMT = "CREATE EVENT TRIGGER et1 ON ddl_command_end %s" \
"EXECUTE PROCEDURE sd.f1()"
DROP_TABLE_STMT = "DROP TABLE IF EXISTS t1"
DROP_FUNC_STMT = "DROP FUNCTION IF EXISTS f1()"
COMMENT_STMT = "COMMENT ON EVENT TRIGGER et1 IS 'Test event trigger et1'"
<|code_end|>
. Use current file imports:
(from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent)
and context including class names, function names, or small code snippets from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class EventTriggerToMapTestCase(DatabaseToMapTestCase): |
Based on the snippet: <|code_start|> 'enabled': True, 'event': 'ddl_command_end',
'tags': ['CREATE TABLE', 'CREATE VIEW'], 'procedure': 'sd.f1()'}
def test_map_event_trigger_comment(self):
"Map a trigger comment"
stmts = [CREATE_FUNC_STMT, CREATE_STMT % '', COMMENT_STMT]
dbmap = self.to_map(stmts)
assert dbmap['event trigger et1']['description'] == \
'Test event trigger et1'
class EventTriggerToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input triggers"""
def setUp(self):
super(self.__class__, self).setUp()
if self.db.version < 90000:
if not self.db.is_plpgsql_installed():
self.db.execute_commit("CREATE LANGUAGE plpgsql")
def test_create_event_trigger_simple(self):
"Create a simple event trigger"
inmap = self.std_map(plpgsql_installed=True)
inmap['schema sd'].update({'function f1()': {
'language': 'plpgsql', 'returns': 'event_trigger',
'source': FUNC_SRC}})
inmap.update({'event trigger et1': {
'enabled': True, 'event': 'ddl_command_end',
'procedure': 'sd.f1()'}})
sql = self.to_sql(inmap)
<|code_end|>
, predict the immediate next line with the help of imports:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (classes, functions, sometimes code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | assert fix_indent(sql[0]) == CREATE_FUNC_STMT |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test enums and other types"""
CREATE_COMPOSITE_STMT = "CREATE TYPE sd.t1 AS " \
"(x integer, y integer, z integer)"
CREATE_ENUM_STMT = "CREATE TYPE sd.t1 AS ENUM ('red', 'green', 'blue')"
CREATE_SHELL_STMT = "CREATE TYPE sd.t1"
CREATE_RANGE_STMT = "CREATE TYPE sd.t1 AS RANGE (SUBTYPE = smallint)"
CREATE_FUNC_IN = "CREATE FUNCTION sd.t1textin(cstring) RETURNS t1 " \
"LANGUAGE internal IMMUTABLE STRICT AS $$textin$$"
CREATE_FUNC_OUT = "CREATE FUNCTION sd.t1textout(sd.t1) RETURNS cstring " \
"LANGUAGE internal IMMUTABLE STRICT AS $$textout$$"
CREATE_TYPE_STMT = "CREATE TYPE t1 (INPUT = t1textin, OUTPUT = t1textout)"
COMMENT_STMT = "COMMENT ON TYPE t1 IS 'Test type t1'"
<|code_end|>
with the help of current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may contain function names, class names, or code. Output only the next line. | class CompositeToMapTestCase(DatabaseToMapTestCase): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test languages"""
CREATE_STMT = "CREATE LANGUAGE plperl"
DROP_STMT = "DROP LANGUAGE IF EXISTS plperl CASCADE"
COMMENT_STMT = "COMMENT ON LANGUAGE plperl IS 'Test language PL/Perl'"
<|code_end|>
, determine the next line of code. You have imports:
import pytest
import psycopg2
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase
and context (class names, function names, or code) available:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
. Output only the next line. | class LanguageToMapTestCase(DatabaseToMapTestCase): |
Based on the snippet: <|code_start|> """Test mapping of existing languages"""
def test_map_language(self):
"Map an existing language"
if self.db.version >= 90100:
self.skipTest('Only available before PG 9.1')
dbmap = self.to_map([DROP_STMT, CREATE_STMT])
assert dbmap['language plperl'] == {'trusted': True}
def test_map_language_comment(self):
"Map a language with a comment"
if self.db.version >= 90100:
self.skipTest('Only available before PG 9.1')
dbmap = self.to_map([DROP_STMT, CREATE_STMT, COMMENT_STMT],
superuser=True)
assert dbmap['language plperl']['description'] == \
'Test language PL/Perl'
def test_map_language_bug_103(self):
"Test a function created with language other than plpgsql/plperl"
try:
self.to_map(["CREATE OR REPLACE LANGUAGE plpython3u"])
except psycopg2.OperationalError as e:
self.skipTest("plpython3 installation failed: %s" % e)
m = self.to_map(["CREATE FUNCTION test103() RETURNS int AS "
"'return 1' LANGUAGE plpython3u"])
self.to_map(["DROP LANGUAGE plpython3u CASCADE"])
assert 'language plpython3u' in m
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
import psycopg2
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase
and context (classes, functions, sometimes code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
. Output only the next line. | class LanguageToSqlTestCase(InputMapToSqlTestCase): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test casts"""
SOURCE = "SELECT CAST($1::int AS boolean)"
CREATE_FUNC = "CREATE FUNCTION int2_bool(smallint) RETURNS boolean " \
"LANGUAGE sql IMMUTABLE AS $_$%s$_$" % SOURCE
CREATE_DOMAIN = "CREATE DOMAIN d1 AS integer"
CREATE_STMT1 = "CREATE CAST (smallint AS boolean) WITH FUNCTION " \
"sd.int2_bool(smallint)"
CREATE_STMT3 = "CREATE CAST (d1 AS integer) WITH INOUT AS IMPLICIT"
DROP_STMT = "DROP CAST IF EXISTS (smallint AS boolean)"
COMMENT_STMT = "COMMENT ON CAST (smallint AS boolean) IS 'Test cast 1'"
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (class names, function names, or code) available:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class CastToMapTestCase(DatabaseToMapTestCase): |
Using the snippet: <|code_start|>CREATE_STMT3 = "CREATE CAST (d1 AS integer) WITH INOUT AS IMPLICIT"
DROP_STMT = "DROP CAST IF EXISTS (smallint AS boolean)"
COMMENT_STMT = "COMMENT ON CAST (smallint AS boolean) IS 'Test cast 1'"
class CastToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing casts"""
def test_map_cast_function(self):
"Map a cast with a function"
dbmap = self.to_map([CREATE_FUNC, CREATE_STMT1], superuser=True)
expmap = {'function': 'sd.int2_bool(smallint)', 'context': 'explicit',
'method': 'function'}
assert dbmap['cast (smallint as boolean)'] == expmap
def test_map_cast_inout(self):
"Map a cast with INOUT"
dbmap = self.to_map([CREATE_DOMAIN, CREATE_STMT3])
expmap = {'context': 'implicit', 'method': 'inout',
'depends_on': ['domain d1']}
assert dbmap['cast (sd.d1 as integer)'] == expmap
def test_map_cast_comment(self):
"Map a cast comment"
dbmap = self.to_map([CREATE_FUNC, CREATE_STMT1, COMMENT_STMT],
superuser=True)
assert dbmap['cast (smallint as boolean)']['description'] == \
'Test cast 1'
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (class names, function names, or code) available:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class CastToSqlTestCase(InputMapToSqlTestCase): |
Given the following code snippet before the placeholder: <|code_start|> expmap = {'function': 'sd.int2_bool(smallint)', 'context': 'explicit',
'method': 'function'}
assert dbmap['cast (smallint as boolean)'] == expmap
def test_map_cast_inout(self):
"Map a cast with INOUT"
dbmap = self.to_map([CREATE_DOMAIN, CREATE_STMT3])
expmap = {'context': 'implicit', 'method': 'inout',
'depends_on': ['domain d1']}
assert dbmap['cast (sd.d1 as integer)'] == expmap
def test_map_cast_comment(self):
"Map a cast comment"
dbmap = self.to_map([CREATE_FUNC, CREATE_STMT1, COMMENT_STMT],
superuser=True)
assert dbmap['cast (smallint as boolean)']['description'] == \
'Test cast 1'
class CastToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input casts"""
def test_create_cast_function(self):
"Create a cast with a function"
stmts = [DROP_STMT, CREATE_FUNC]
inmap = self.std_map()
inmap.update({'cast (smallint as boolean)': {
'function': 'sd.int2_bool(smallint)', 'context': 'explicit',
'method': 'function'}})
sql = self.to_sql(inmap, stmts)
<|code_end|>
, predict the next line using imports from the current file:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context including class names, function names, and sometimes code from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | assert fix_indent(sql[0]) == CREATE_STMT1 |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test conversions"""
CREATE_STMT = "CREATE CONVERSION sd.c1 FOR 'LATIN1' TO 'UTF8' " \
"FROM iso8859_1_to_utf8"
DROP_STMT = "DROP CONVERSION IF EXISTS sd.c1"
COMMENT_STMT = "COMMENT ON CONVERSION sd.c1 IS 'Test conversion c1'"
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (class names, function names, or code) available:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class ConversionToMapTestCase(DatabaseToMapTestCase): |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test conversions"""
CREATE_STMT = "CREATE CONVERSION sd.c1 FOR 'LATIN1' TO 'UTF8' " \
"FROM iso8859_1_to_utf8"
DROP_STMT = "DROP CONVERSION IF EXISTS sd.c1"
COMMENT_STMT = "COMMENT ON CONVERSION sd.c1 IS 'Test conversion c1'"
class ConversionToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing conversions"""
def test_map_conversion1(self):
"Map a conversion"
dbmap = self.to_map([CREATE_STMT])
expmap = {'source_encoding': 'LATIN1', 'dest_encoding': 'UTF8',
'function': 'iso8859_1_to_utf8'}
assert dbmap['schema sd']['conversion c1'] == expmap
def test_map_conversion_comment(self):
"Map a conversion comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema sd']['conversion c1']['description'] == \
'Test conversion c1'
<|code_end|>
. Write the next line using the current file imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
, which may include functions, classes, or code. Output only the next line. | class ConversionToSqlTestCase(InputMapToSqlTestCase): |
Using the snippet: <|code_start|>COMMENT_STMT = "COMMENT ON CONVERSION sd.c1 IS 'Test conversion c1'"
class ConversionToMapTestCase(DatabaseToMapTestCase):
"""Test mapping of existing conversions"""
def test_map_conversion1(self):
"Map a conversion"
dbmap = self.to_map([CREATE_STMT])
expmap = {'source_encoding': 'LATIN1', 'dest_encoding': 'UTF8',
'function': 'iso8859_1_to_utf8'}
assert dbmap['schema sd']['conversion c1'] == expmap
def test_map_conversion_comment(self):
"Map a conversion comment"
dbmap = self.to_map([CREATE_STMT, COMMENT_STMT])
assert dbmap['schema sd']['conversion c1']['description'] == \
'Test conversion c1'
class ConversionToSqlTestCase(InputMapToSqlTestCase):
"""Test SQL generation from input conversions"""
def test_create_conversion(self):
"Create a conversion"
inmap = self.std_map()
inmap['schema sd'].update({'conversion c1': {
'source_encoding': 'LATIN1', 'dest_encoding': 'UTF8',
'function': 'iso8859_1_to_utf8'}})
sql = self.to_sql(inmap)
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (class names, function names, or code) available:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | assert fix_indent(sql[0]) == CREATE_STMT |
Given the code snippet: <|code_start|> ('INTERVAL MINUTE TO SECOND', 'interval minute to second'),
('INTERVAL SECOND(3)', 'interval second(3)'),
('INTERVAL HOUR TO SECOND(5)', 'interval hour to second(5)'),
('BOOLEAN', 'boolean'),
('POINT', 'point'),
('LINE', 'line'),
('LSEG', 'lseg'),
('BOX', 'box'),
('PATH', 'path'),
('POLYGON', 'polygon'),
('CIRCLE', 'circle'),
('cidr', 'cidr'),
('inet', 'inet'),
('macaddr', 'macaddr'),
('BIT(2)', 'bit(2)'),
('BIT VARYING(100)', 'bit varying(100)'),
('BIT', 'bit(1)'),
('BIT VARYING', 'bit varying'),
('tsvector', 'tsvector'),
('tsquery', 'tsquery'),
('UUID', 'uuid'),
('XML', 'xml'),
('JSON', 'json')]
CREATE_STMT1 = "CREATE TABLE t1 (c1 integer, c2 text)"
CREATE_STMT2 = "CREATE TABLE t1 (c1 integer, c2 text, c3 date)"
CREATE_STMT3 = "CREATE TABLE t1 (c1 integer, c2 text, c3 date, c4 text)"
DROP_COL_STMT = "ALTER TABLE t1 DROP COLUMN c3"
<|code_end|>
, generate the next line using the imports in this file:
from pyrseas.testutils import DatabaseToMapTestCase
from pyrseas.testutils import InputMapToSqlTestCase, fix_indent
and context (functions, classes, or occasionally code) from other files:
# Path: pyrseas/testutils.py
# class DatabaseToMapTestCase(PyrseasTestCase):
# """Base class for "database to map" test cases"""
#
# superuser = False
#
# def to_map(self, stmts, config={}, schemas=[], tables=[], no_owner=True,
# no_privs=True, superuser=False, multiple_files=False):
# """Execute statements and return a database map.
#
# :param stmts: list of SQL statements to execute
# :param config: dictionary of configuration information
# :param schemas: list of schemas to map
# :param tables: list of tables to map
# :param no_owner: exclude object owner information
# :param no_privs: exclude privilege information
# :param superuser: must be superuser to run
# :param multiple_files: emulate --multiple_files option
# :return: possibly trimmed map of database
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
# if multiple_files:
# self.cfg.merge({'files': {'metadata_path': os.path.join(
# TEST_DIR, self.cfg['repository']['metadata'])}})
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, tables=tables, no_owner=no_owner,
# no_privs=no_privs, multiple_files=multiple_files)
# self.cfg.merge(config)
# return self.database().to_map()
#
# def yaml_load(self, filename, subdir=None):
# """Read a file in the metadata_path and process it with YAML load
#
# :param filename: name of the file
# :param subdir: name of a subdirectory where the file is located
# :return: YAML dictionary
# """
# with open(os.path.join(self.cfg['files']['metadata_path'],
# subdir or '', filename), 'r') as f:
# inmap = f.read()
# return yaml.safe_load(inmap)
#
# def remove_tempfiles(self):
# remove_temp_files(TEST_DIR)
#
# @staticmethod
# def sort_privileges(data):
# try:
# sorted_privlist = []
# for sortedItem in sorted([list(i.keys())[0]
# for i in data['privileges']]):
# sorted_privlist.append(
# [item for item in data['privileges']
# if list(item.keys())[0] == sortedItem][0])
# data['privileges'] = sorted_privlist
# finally:
# return data
#
# Path: pyrseas/testutils.py
# class InputMapToSqlTestCase(PyrseasTestCase):
# """Base class for "input map to SQL" test cases"""
#
# superuser = False
#
# def to_sql(self, inmap, stmts=None, config={}, superuser=False, schemas=[],
# revert=False, quote_reserved=False):
# """Execute statements and compare database to input map.
#
# :param inmap: dictionary defining target database
# :param stmts: list of SQL database setup statements
# :param config: dictionary of configuration information
# :param superuser: indicates test requires superuser privilege
# :param schemas: list of schemas to diff
# :param revert: generate statements to back out changes
# :param quote_reserved: fetch reserved words
# :return: list of SQL statements
# """
# if (self.superuser or superuser) and not self.db.is_superuser():
# self.skipTest("Must be a superuser to run this test")
# if stmts:
# for stmt in stmts:
# self.db.execute(stmt)
# self.db.conn.commit()
#
# if 'datacopy' in config:
# self.cfg.merge({'files': {'data_path': os.path.join(
# TEST_DIR, self.cfg['repository']['data'])}})
# self.config_options(schemas=schemas, revert=revert),
# self.cfg.merge(config)
# return self.database().diff_map(inmap, quote_reserved=quote_reserved)
#
# def std_map(self, plpgsql_installed=False):
# "Return a standard schema map for the default database"
# base = {'schema sd': {
# 'owner': self.db.user,
# 'privileges': []}}
# base.update({'extension plpgsql': {
# 'schema': 'pg_catalog', 'owner': PG_OWNER,
# 'description': "PL/pgSQL procedural language"}})
# return base
#
# def fix_indent(stmt):
# "Fix specifications which are in a new line with indentation"
# return stmt.replace(' ', ' ').replace(' ', ' ').replace('\n ', ' '). \
# replace('( ', '(')
. Output only the next line. | class ColumnToMapTestCase(DatabaseToMapTestCase): |
Predict the next line after this snippet: <|code_start|>LEAP_CYCLE_DAYS = 1461
YEAR_DAYS = 365
HAVE_30_DAYS = (4, 6, 9, 11)
HAVE_31_DAYS = (1, 3, 5, 7, 8, 10, 12)
def legal_date(year, month, day):
'''Check if this is a legal date in the Gregorian calendar'''
if month == 2:
daysinmonth = 29 if isleap(year) else 28
else:
daysinmonth = 30 if month in HAVE_30_DAYS else 31
if not 0 < day <= daysinmonth:
raise ValueError("Month {} doesn't have a day {}".format(month, day))
return True
def to_jd2(year, month, day):
'''Gregorian to Julian Day Count for years between 1801-2099'''
# http://quasar.as.utexas.edu/BillInfo/JulianDatesG.html
legal_date(year, month, day)
if month <= 2:
year = year - 1
month = month + 12
<|code_end|>
using the current file's imports:
from calendar import isleap, monthrange
from datetime import date
from .utils import floor, jwday, monthcalendarhelper
and any relevant context from other files:
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
. Output only the next line. | a = floor(year / 100) |
Given snippet: <|code_start|> yearday = wjd - to_jd(year, 1, 1)
leap = isleap(year)
if yearday < 58 + leap:
leap_adj = 0
elif leap:
leap_adj = 1
else:
leap_adj = 2
month = floor((((yearday + leap_adj) * 12) + 373) / 367)
day = int(wjd - to_jd(year, month, 1)) + 1
return (year, month, day)
def month_length(year, month):
'''Calculate the length of a month in the Gregorian calendar'''
return monthrange(year, month)[1]
def monthcalendar(year, month):
'''
Return a list of lists that describe the calender for one month. Each inner
list have 7 items, one for each weekday, starting with Sunday. These items
are either ``None`` or an integer, counting from 1 to the number of days in
the month.
For Gregorian, this is very similiar to the built-in :meth:``calendar.monthcalendar``.
'''
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from calendar import isleap, monthrange
from datetime import date
from .utils import floor, jwday, monthcalendarhelper
and context:
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
which might include code, classes, or functions. Output only the next line. | start_weekday = jwday(to_jd(year, month, 1)) |
Based on the snippet: <|code_start|>
if yearday < 58 + leap:
leap_adj = 0
elif leap:
leap_adj = 1
else:
leap_adj = 2
month = floor((((yearday + leap_adj) * 12) + 373) / 367)
day = int(wjd - to_jd(year, month, 1)) + 1
return (year, month, day)
def month_length(year, month):
'''Calculate the length of a month in the Gregorian calendar'''
return monthrange(year, month)[1]
def monthcalendar(year, month):
'''
Return a list of lists that describe the calender for one month. Each inner
list have 7 items, one for each weekday, starting with Sunday. These items
are either ``None`` or an integer, counting from 1 to the number of days in
the month.
For Gregorian, this is very similiar to the built-in :meth:``calendar.monthcalendar``.
'''
start_weekday = jwday(to_jd(year, month, 1))
monthlen = month_length(year, month)
<|code_end|>
, predict the immediate next line with the help of imports:
from calendar import isleap, monthrange
from datetime import date
from .utils import floor, jwday, monthcalendarhelper
and context (classes, functions, sometimes code) from other files:
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
. Output only the next line. | return monthcalendarhelper(start_weekday, monthlen) |
Continue the code snippet: <|code_start|> month = floor(e - 1)
else:
month = floor(e - 13)
if month > 2:
year = floor(c - 4716)
else:
year = floor(c - 4715)
day = b - d - floor(30.6001 * e)
return (year, month, day)
def to_jd(year, month, day):
'''Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)'''
legal_date(year, month, day)
# Algorithm as given in Meeus, Astronomical Algorithms, Chapter 7, page 61
if month <= 2:
year -= 1
month += 12
return (floor((365.25 * (year + 4716))) + floor((30.6001 * (month + 1))) + day) - 1524.5
def from_gregorian(year, month, day):
'''Convert a Gregorian date to a Julian date.'''
return from_jd(gregorian_to_jd(year, month, day))
def to_gregorian(year, month, day):
'''Convert a Julian date to a Gregorian date.'''
<|code_end|>
. Use current file imports:
from .gregorian import from_jd as gregorian_from_jd
from .gregorian import to_jd as gregorian_to_jd
from .utils import floor, jwday, monthcalendarhelper
and context (classes, functions, or code) from other files:
# Path: src/convertdate/gregorian.py
# def from_jd(jd):
# '''Return Gregorian date in a (Y, M, D) tuple'''
# wjd = floor(jd - 0.5) + 0.5
# depoch = wjd - EPOCH
#
# quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS)
# dqc = depoch % INTERCALATION_CYCLE_DAYS
#
# cent = floor(dqc / LEAP_SUPPRESSION_DAYS)
# dcent = dqc % LEAP_SUPPRESSION_DAYS
#
# quad = floor(dcent / LEAP_CYCLE_DAYS)
# dquad = dcent % LEAP_CYCLE_DAYS
#
# yindex = floor(dquad / YEAR_DAYS)
# year = quadricent * INTERCALATION_CYCLE_YEARS + cent * LEAP_SUPPRESSION_YEARS + quad * LEAP_CYCLE_YEARS + yindex
#
# if not (cent == 4 or yindex == 4):
# year += 1
#
# yearday = wjd - to_jd(year, 1, 1)
#
# leap = isleap(year)
#
# if yearday < 58 + leap:
# leap_adj = 0
# elif leap:
# leap_adj = 1
# else:
# leap_adj = 2
#
# month = floor((((yearday + leap_adj) * 12) + 373) / 367)
# day = int(wjd - to_jd(year, month, 1)) + 1
#
# return (year, month, day)
#
# Path: src/convertdate/gregorian.py
# def to_jd(year, month, day):
# '''Convert gregorian date to julian day count.'''
# legal_date(year, month, day)
#
# if month <= 2:
# leap_adj = 0
# elif isleap(year):
# leap_adj = -1
# else:
# leap_adj = -2
#
# return (
# EPOCH
# - 1
# + (YEAR_DAYS * (year - 1))
# + floor((year - 1) / LEAP_CYCLE_YEARS)
# + (-floor((year - 1) / LEAP_SUPPRESSION_YEARS))
# + floor((year - 1) / INTERCALATION_CYCLE_YEARS)
# + floor((((367 * month) - 362) / 12) + leap_adj + day)
# )
#
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
. Output only the next line. | return gregorian_from_jd(to_jd(year, month, day)) |
Given the code snippet: <|code_start|> b = a + 1524
c = floor((b - 122.1) / 365.25)
d = floor(365.25 * c)
e = floor((b - d) / 30.6001)
if e < 14:
month = floor(e - 1)
else:
month = floor(e - 13)
if month > 2:
year = floor(c - 4716)
else:
year = floor(c - 4715)
day = b - d - floor(30.6001 * e)
return (year, month, day)
def to_jd(year, month, day):
'''Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)'''
legal_date(year, month, day)
# Algorithm as given in Meeus, Astronomical Algorithms, Chapter 7, page 61
if month <= 2:
year -= 1
month += 12
return (floor((365.25 * (year + 4716))) + floor((30.6001 * (month + 1))) + day) - 1524.5
def from_gregorian(year, month, day):
'''Convert a Gregorian date to a Julian date.'''
<|code_end|>
, generate the next line using the imports in this file:
from .gregorian import from_jd as gregorian_from_jd
from .gregorian import to_jd as gregorian_to_jd
from .utils import floor, jwday, monthcalendarhelper
and context (functions, classes, or occasionally code) from other files:
# Path: src/convertdate/gregorian.py
# def from_jd(jd):
# '''Return Gregorian date in a (Y, M, D) tuple'''
# wjd = floor(jd - 0.5) + 0.5
# depoch = wjd - EPOCH
#
# quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS)
# dqc = depoch % INTERCALATION_CYCLE_DAYS
#
# cent = floor(dqc / LEAP_SUPPRESSION_DAYS)
# dcent = dqc % LEAP_SUPPRESSION_DAYS
#
# quad = floor(dcent / LEAP_CYCLE_DAYS)
# dquad = dcent % LEAP_CYCLE_DAYS
#
# yindex = floor(dquad / YEAR_DAYS)
# year = quadricent * INTERCALATION_CYCLE_YEARS + cent * LEAP_SUPPRESSION_YEARS + quad * LEAP_CYCLE_YEARS + yindex
#
# if not (cent == 4 or yindex == 4):
# year += 1
#
# yearday = wjd - to_jd(year, 1, 1)
#
# leap = isleap(year)
#
# if yearday < 58 + leap:
# leap_adj = 0
# elif leap:
# leap_adj = 1
# else:
# leap_adj = 2
#
# month = floor((((yearday + leap_adj) * 12) + 373) / 367)
# day = int(wjd - to_jd(year, month, 1)) + 1
#
# return (year, month, day)
#
# Path: src/convertdate/gregorian.py
# def to_jd(year, month, day):
# '''Convert gregorian date to julian day count.'''
# legal_date(year, month, day)
#
# if month <= 2:
# leap_adj = 0
# elif isleap(year):
# leap_adj = -1
# else:
# leap_adj = -2
#
# return (
# EPOCH
# - 1
# + (YEAR_DAYS * (year - 1))
# + floor((year - 1) / LEAP_CYCLE_YEARS)
# + (-floor((year - 1) / LEAP_SUPPRESSION_YEARS))
# + floor((year - 1) / INTERCALATION_CYCLE_YEARS)
# + floor((((367 * month) - 362) / 12) + leap_adj + day)
# )
#
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
. Output only the next line. | return from_jd(gregorian_to_jd(year, month, day)) |
Given the following code snippet before the placeholder: <|code_start|>HAVE_30_DAYS = (4, 6, 9, 11)
HAVE_31_DAYS = (1, 3, 5, 7, 8, 10, 12)
def leap(year):
return year % 4 == 0
def month_length(year, month):
if month == 2:
daysinmonth = 29 if leap(year) else 28
else:
daysinmonth = 30 if month in HAVE_30_DAYS else 31
return daysinmonth
def legal_date(year, month, day):
'''Check if this is a legal date in the Julian calendar'''
daysinmonth = month_length(year, month)
if not 0 < day <= daysinmonth:
raise ValueError("Month {} doesn't have a day {}".format(month, day))
return True
def from_jd(jd):
'''Calculate Julian calendar date from Julian day'''
jd += 0.5
<|code_end|>
, predict the next line using imports from the current file:
from .gregorian import from_jd as gregorian_from_jd
from .gregorian import to_jd as gregorian_to_jd
from .utils import floor, jwday, monthcalendarhelper
and context including class names, function names, and sometimes code from other files:
# Path: src/convertdate/gregorian.py
# def from_jd(jd):
# '''Return Gregorian date in a (Y, M, D) tuple'''
# wjd = floor(jd - 0.5) + 0.5
# depoch = wjd - EPOCH
#
# quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS)
# dqc = depoch % INTERCALATION_CYCLE_DAYS
#
# cent = floor(dqc / LEAP_SUPPRESSION_DAYS)
# dcent = dqc % LEAP_SUPPRESSION_DAYS
#
# quad = floor(dcent / LEAP_CYCLE_DAYS)
# dquad = dcent % LEAP_CYCLE_DAYS
#
# yindex = floor(dquad / YEAR_DAYS)
# year = quadricent * INTERCALATION_CYCLE_YEARS + cent * LEAP_SUPPRESSION_YEARS + quad * LEAP_CYCLE_YEARS + yindex
#
# if not (cent == 4 or yindex == 4):
# year += 1
#
# yearday = wjd - to_jd(year, 1, 1)
#
# leap = isleap(year)
#
# if yearday < 58 + leap:
# leap_adj = 0
# elif leap:
# leap_adj = 1
# else:
# leap_adj = 2
#
# month = floor((((yearday + leap_adj) * 12) + 373) / 367)
# day = int(wjd - to_jd(year, month, 1)) + 1
#
# return (year, month, day)
#
# Path: src/convertdate/gregorian.py
# def to_jd(year, month, day):
# '''Convert gregorian date to julian day count.'''
# legal_date(year, month, day)
#
# if month <= 2:
# leap_adj = 0
# elif isleap(year):
# leap_adj = -1
# else:
# leap_adj = -2
#
# return (
# EPOCH
# - 1
# + (YEAR_DAYS * (year - 1))
# + floor((year - 1) / LEAP_CYCLE_YEARS)
# + (-floor((year - 1) / LEAP_SUPPRESSION_YEARS))
# + floor((year - 1) / INTERCALATION_CYCLE_YEARS)
# + floor((((367 * month) - 362) / 12) + leap_adj + day)
# )
#
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
. Output only the next line. | a = floor(jd) |
Given snippet: <|code_start|> return (year, month, day)
def to_jd(year, month, day):
'''Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)'''
legal_date(year, month, day)
# Algorithm as given in Meeus, Astronomical Algorithms, Chapter 7, page 61
if month <= 2:
year -= 1
month += 12
return (floor((365.25 * (year + 4716))) + floor((30.6001 * (month + 1))) + day) - 1524.5
def from_gregorian(year, month, day):
'''Convert a Gregorian date to a Julian date.'''
return from_jd(gregorian_to_jd(year, month, day))
def to_gregorian(year, month, day):
'''Convert a Julian date to a Gregorian date.'''
return gregorian_from_jd(to_jd(year, month, day))
def monthcalendar(year, month):
'''
Returns a matrix representing a monthβs calendar. Each row represents a week;
days outside of the month are represented by zeros. Each week begins with Sunday.
'''
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .gregorian import from_jd as gregorian_from_jd
from .gregorian import to_jd as gregorian_to_jd
from .utils import floor, jwday, monthcalendarhelper
and context:
# Path: src/convertdate/gregorian.py
# def from_jd(jd):
# '''Return Gregorian date in a (Y, M, D) tuple'''
# wjd = floor(jd - 0.5) + 0.5
# depoch = wjd - EPOCH
#
# quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS)
# dqc = depoch % INTERCALATION_CYCLE_DAYS
#
# cent = floor(dqc / LEAP_SUPPRESSION_DAYS)
# dcent = dqc % LEAP_SUPPRESSION_DAYS
#
# quad = floor(dcent / LEAP_CYCLE_DAYS)
# dquad = dcent % LEAP_CYCLE_DAYS
#
# yindex = floor(dquad / YEAR_DAYS)
# year = quadricent * INTERCALATION_CYCLE_YEARS + cent * LEAP_SUPPRESSION_YEARS + quad * LEAP_CYCLE_YEARS + yindex
#
# if not (cent == 4 or yindex == 4):
# year += 1
#
# yearday = wjd - to_jd(year, 1, 1)
#
# leap = isleap(year)
#
# if yearday < 58 + leap:
# leap_adj = 0
# elif leap:
# leap_adj = 1
# else:
# leap_adj = 2
#
# month = floor((((yearday + leap_adj) * 12) + 373) / 367)
# day = int(wjd - to_jd(year, month, 1)) + 1
#
# return (year, month, day)
#
# Path: src/convertdate/gregorian.py
# def to_jd(year, month, day):
# '''Convert gregorian date to julian day count.'''
# legal_date(year, month, day)
#
# if month <= 2:
# leap_adj = 0
# elif isleap(year):
# leap_adj = -1
# else:
# leap_adj = -2
#
# return (
# EPOCH
# - 1
# + (YEAR_DAYS * (year - 1))
# + floor((year - 1) / LEAP_CYCLE_YEARS)
# + (-floor((year - 1) / LEAP_SUPPRESSION_YEARS))
# + floor((year - 1) / INTERCALATION_CYCLE_YEARS)
# + floor((((367 * month) - 362) / 12) + leap_adj + day)
# )
#
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
which might include code, classes, or functions. Output only the next line. | start_weekday = jwday(to_jd(year, month, 1)) |
Predict the next line after this snippet: <|code_start|>
def to_jd(year, month, day):
'''Convert to Julian day using astronomical years (0 = 1 BC, -1 = 2 BC)'''
legal_date(year, month, day)
# Algorithm as given in Meeus, Astronomical Algorithms, Chapter 7, page 61
if month <= 2:
year -= 1
month += 12
return (floor((365.25 * (year + 4716))) + floor((30.6001 * (month + 1))) + day) - 1524.5
def from_gregorian(year, month, day):
'''Convert a Gregorian date to a Julian date.'''
return from_jd(gregorian_to_jd(year, month, day))
def to_gregorian(year, month, day):
'''Convert a Julian date to a Gregorian date.'''
return gregorian_from_jd(to_jd(year, month, day))
def monthcalendar(year, month):
'''
Returns a matrix representing a monthβs calendar. Each row represents a week;
days outside of the month are represented by zeros. Each week begins with Sunday.
'''
start_weekday = jwday(to_jd(year, month, 1))
monthlen = month_length(year, month)
<|code_end|>
using the current file's imports:
from .gregorian import from_jd as gregorian_from_jd
from .gregorian import to_jd as gregorian_to_jd
from .utils import floor, jwday, monthcalendarhelper
and any relevant context from other files:
# Path: src/convertdate/gregorian.py
# def from_jd(jd):
# '''Return Gregorian date in a (Y, M, D) tuple'''
# wjd = floor(jd - 0.5) + 0.5
# depoch = wjd - EPOCH
#
# quadricent = floor(depoch / INTERCALATION_CYCLE_DAYS)
# dqc = depoch % INTERCALATION_CYCLE_DAYS
#
# cent = floor(dqc / LEAP_SUPPRESSION_DAYS)
# dcent = dqc % LEAP_SUPPRESSION_DAYS
#
# quad = floor(dcent / LEAP_CYCLE_DAYS)
# dquad = dcent % LEAP_CYCLE_DAYS
#
# yindex = floor(dquad / YEAR_DAYS)
# year = quadricent * INTERCALATION_CYCLE_YEARS + cent * LEAP_SUPPRESSION_YEARS + quad * LEAP_CYCLE_YEARS + yindex
#
# if not (cent == 4 or yindex == 4):
# year += 1
#
# yearday = wjd - to_jd(year, 1, 1)
#
# leap = isleap(year)
#
# if yearday < 58 + leap:
# leap_adj = 0
# elif leap:
# leap_adj = 1
# else:
# leap_adj = 2
#
# month = floor((((yearday + leap_adj) * 12) + 373) / 367)
# day = int(wjd - to_jd(year, month, 1)) + 1
#
# return (year, month, day)
#
# Path: src/convertdate/gregorian.py
# def to_jd(year, month, day):
# '''Convert gregorian date to julian day count.'''
# legal_date(year, month, day)
#
# if month <= 2:
# leap_adj = 0
# elif isleap(year):
# leap_adj = -1
# else:
# leap_adj = -2
#
# return (
# EPOCH
# - 1
# + (YEAR_DAYS * (year - 1))
# + floor((year - 1) / LEAP_CYCLE_YEARS)
# + (-floor((year - 1) / LEAP_SUPPRESSION_YEARS))
# + floor((year - 1) / INTERCALATION_CYCLE_YEARS)
# + floor((((367 * month) - 362) / 12) + leap_adj + day)
# )
#
# Path: src/convertdate/utils.py
# def floor(x):
# return int(math.floor(x))
#
# def jwday(j):
# '''Calculate day of week from Julian day. Consider using ``calendar.weekday``!'''
# return math.trunc((j + 0.5)) % 7
#
# def monthcalendarhelper(start_weekday, month_length):
# end_weekday = start_weekday + (month_length - 1) % 7
#
# lpad = (start_weekday + 1) % 7
# rpad = (5 - end_weekday % 7) % 6
#
# days = [None] * lpad + list(range(1, 1 + month_length)) + rpad * [None]
#
# return [days[i : i + 7] for i in range(0, len(days), 7)]
. Output only the next line. | return monthcalendarhelper(start_weekday, monthlen) |
Using the snippet: <|code_start|># django.contrib.aderit.generic_utils.models -- django models extensions
#
# Copyright (C) 2012 Aderit srl
#
# Author: Marco Bardelli <marco.bardelli@aderit.it>, <bardelli.marco@gmail.com>
#
# This file is part of DjangoContribAderit.
#
# DjangoContribAderit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DjangoContribAderit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DjangoContribAderit. If not, see <http://www.gnu.org/licenses/>.
'''Generic model fields'''
class GenericPhoneField(CharField):
description = _("Phone number")
def formfield(self, **kwargs):
defaults = {
<|code_end|>
, determine the next line of code. You have imports:
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from django.contrib.aderit.generic_utils.forms import fields as forms_fields
and context (class names, function names, or code) available:
# Path: django/contrib/aderit/generic_utils/forms/fields.py
# PHONE_REGEX = r'^\+?[0-9]{3}[0-9 ]{5,18}$'
# class GenericPhoneField(RegexField):
# def __init__(self, regex=PHONE_REGEX, max_length=None, min_length=None,
# error_message=None, *args, **kwargs):
. Output only the next line. | 'form_class': forms_fields.GenericPhoneField |
Predict the next line after this snippet: <|code_start|>
PAGINATION = getattr(settings, 'NEWS_PAGINATED_BY', 5)
urlpatterns = patterns(
'',
url(r'^$',
<|code_end|>
using the current file's imports:
from django.conf.urls.defaults import url, patterns
from django.conf import settings
from django.views.generic import ListView, DetailView
from django.contrib.aderit.news.models import NewsItem
and any relevant context from other files:
# Path: django/contrib/aderit/news/models.py
# class NewsItem(models.Model):
# title = models.CharField(max_length=100)
# slug = models.SlugField(help_text=_('A slug is used as part of the URL '
# 'for this article. '
# 'It is recommended to use the '
# 'default value if possible.'))
# pub_date = models.DateField(blank=True, null=True,
# help_text=_('YYYY-MM-DD -- Leave blank '
# 'if you don\'t want the article '
# 'to appear on the site yet.'))
# snippet = models.TextField(blank=True,
# help_text=_('Snippets are used as '
# 'a preview for this '
# 'article (in sidebars, etc).'))
# body = models.TextField(blank=True)
#
# def __unicode__(self):
# return self.title
#
# class Meta:
# verbose_name = _("News")
# verbose_name_plural = _("News")
# ordering = ['-pub_date']
# unique_together = (('slug', 'pub_date'), )
. Output only the next line. | ListView.as_view(model=NewsItem, |
Given the following code snippet before the placeholder: <|code_start|> with pytest.raises(ValueError) as excinfo:
tbmodels.Model.from_wannier_files(
hr_file=sample("silicon_hr.dat"),
win_file=sample("silicon.win"),
xyz_file=sample("silicon_centres.xyz"),
pos_kind="whatever",
)
assert "Invalid value 'whatever' for 'pos_kind'" in str(excinfo.value)
def test_wsvec_blocks_missing(sample):
"""
Check that a wsvec file with entire missing entries raises KeyError.
In this case, the individual blocks in the wsvec file are complete,
but entire blocks are missing.
"""
with pytest.raises(KeyError):
tbmodels.Model.from_wannier_files(
hr_file=sample("bi_hr.dat"),
wsvec_file=sample("bi_wsvec_blocks_missing.dat"),
xyz_file=sample("bi_centres.xyz"),
win_file=sample("bi.win"),
)
def test_wsvec_blocks_incomplete(sample):
"""
Check that a wsvec file with incomplete blocks raises an error.
"""
<|code_end|>
, predict the next line using imports from the current file:
import pytest
import numpy as np
import tbmodels
from parameters import KPT
from tbmodels.exceptions import TbmodelsException
and context including class names, function names, and sometimes code from other files:
# Path: tbmodels/exceptions.py
# class TbmodelsException(click.ClickException):
# """
# Custom exception class for TBmodels errors. This class should be
# used only for exception with a well-known cause, not for unexpected
# "crashes". For example, it can be used for malformed or incompatible
# inputs, truncated or missing input files, and similar errors.
#
# Errors which use this exception class will be formatted in the CLI as::
#
# Error: [<exception marker name>] <error message>
# """
#
# exit_code = 3
#
# def __init__(self, message: str, exception_marker: ExceptionMarker):
# super().__init__(message)
# self.exception_marker = exception_marker
#
# def format_message(self):
# return f"[{self.exception_marker.name}] {super().format_message()}"
. Output only the next line. | with pytest.raises(TbmodelsException) as excinfo: |
Given snippet: <|code_start|>
@pytest.mark.parametrize("pos_kind", ["wannier", "nearest_atom"])
@pytest.mark.parametrize(
"prefix, distance_ratio_threshold", [("silicon", 2.0), ("bi", 1)]
)
def test_cli_parse( # pylint: disable=too-many-arguments
models_equal,
prefix,
sample,
pos_kind,
cli_sparsity_arguments,
cli_verbosity_argument,
modify_reference_model_sparsity,
distance_ratio_threshold,
):
"""Test the 'parse' command with different 'prefix' and 'pos_kind'."""
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
if distance_ratio_threshold is None:
distance_ratio_arguments = []
distance_ratio_kwargs = {}
else:
distance_ratio_arguments = [
"--distance-ratio-threshold",
str(distance_ratio_threshold),
]
distance_ratio_kwargs = {
"distance_ratio_threshold": distance_ratio_threshold
}
run = runner.invoke(
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import tempfile
import pytest
import tbmodels
from click.testing import CliRunner
from tbmodels._cli import cli
and context:
# Path: tbmodels/_cli.py
# @click.group()
# @click.version_option(version=tbmodels_version)
# def cli():
# pass
which might include code, classes, or functions. Output only the next line. | cli, |
Given the code snippet: <|code_start|>#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the 'eigenvals' command.
"""
@pytest.mark.parametrize(
"kpoints_file_name", ["kpoints.hdf5", "silicon_eigenvals.hdf5"]
)
def test_cli_eigenvals(sample, cli_verbosity_argument, kpoints_file_name):
"""
Test the 'eigenvals' command.
"""
samples_dir = sample("cli_eigenvals")
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
<|code_end|>
, generate the next line using the imports in this file:
import os
import tempfile
import pytest
import numpy as np
import bands_inspect as bi
from click.testing import CliRunner
from tbmodels._cli import cli
and context (functions, classes, or occasionally code) from other files:
# Path: tbmodels/_cli.py
# @click.group()
# @click.version_option(version=tbmodels_version)
# def cli():
# pass
. Output only the next line. | cli, |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""Tests the 'slice' CLI command."""
@pytest.mark.parametrize(
"slice_idx", [(3, 1, 2), (0, 1, 4, 2, 3, 5, 6, 8, 7, 9, 10, 11, 12)]
)
def test_cli_slice(
models_equal,
slice_idx,
sample,
cli_sparsity_arguments,
cli_verbosity_argument,
modify_reference_model_sparsity,
):
"""
Check that using the CLI to slice a tight-binding model produces
the same result as using the `slice_orbitals` method.
"""
runner = CliRunner()
input_file = sample("InAs_nosym.hdf5")
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
<|code_end|>
. Use current file imports:
import tempfile
import pytest
import tbmodels
from click.testing import CliRunner
from tbmodels._cli import cli
and context (classes, functions, or code) from other files:
# Path: tbmodels/_cli.py
# @click.group()
# @click.version_option(version=tbmodels_version)
# def cli():
# pass
. Output only the next line. | cli, |
Here is a snippet: <|code_start|>
# --------------------------FIXTURES-------------------------------------#
@pytest.fixture
def test_name(request):
"""Returns (module_name, function_name[args]) for a given test"""
return (
request.module.__name__,
request._parent_request._pyfuncitem.name, # pylint: disable=protected-access
)
@pytest.fixture
def compare_data(test_name):
"""Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function."""
def inner(compare_fct, data, tag=None):
dir_name, file_name = test_name
file_name += tag or ""
cache_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "regression_data", dir_name
)
os.makedirs(cache_dir, exist_ok=True)
file_name_full = os.path.join(cache_dir, file_name)
try:
val = load(file_name_full)
except OSError as exc:
<|code_end|>
. Write the next line using the current file imports:
import os
import operator
import itertools
import pytest
import numpy as np
import tbmodels
from functools import partial
from collections import ChainMap
from numpy.testing import assert_allclose, assert_equal
from tbmodels.io import save, load
and context from other files:
# Path: tbmodels/io.py
# def load(file_path):
, which may include functions, classes, or code. Output only the next line. | save(data, file_name_full) |
Using the snippet: <|code_start|># pylint: disable=redefined-outer-name
# --------------------------FIXTURES-------------------------------------#
@pytest.fixture
def test_name(request):
"""Returns (module_name, function_name[args]) for a given test"""
return (
request.module.__name__,
request._parent_request._pyfuncitem.name, # pylint: disable=protected-access
)
@pytest.fixture
def compare_data(test_name):
"""Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function."""
def inner(compare_fct, data, tag=None):
dir_name, file_name = test_name
file_name += tag or ""
cache_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "regression_data", dir_name
)
os.makedirs(cache_dir, exist_ok=True)
file_name_full = os.path.join(cache_dir, file_name)
try:
<|code_end|>
, determine the next line of code. You have imports:
import os
import operator
import itertools
import pytest
import numpy as np
import tbmodels
from functools import partial
from collections import ChainMap
from numpy.testing import assert_allclose, assert_equal
from tbmodels.io import save, load
and context (class names, function names, or code) available:
# Path: tbmodels/io.py
# def load(file_path):
. Output only the next line. | val = load(file_name_full) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the 'symmetrize' CLI command.
"""
def test_cli_symmetrize(
models_close,
sample,
cli_sparsity_arguments,
cli_verbosity_argument,
modify_reference_model_sparsity,
):
"""
Test the 'symmetrize' command.
"""
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
<|code_end|>
. Use current file imports:
import tempfile
import tbmodels
from click.testing import CliRunner
from tbmodels._cli import cli
and context (classes, functions, or code) from other files:
# Path: tbmodels/_cli.py
# @click.group()
# @click.version_option(version=tbmodels_version)
# def cli():
# pass
. Output only the next line. | cli, |
Predict the next line after this snippet: <|code_start|>
class DevPlot():
def add_axes(self, plot, label):
xaxis = LinearAxis()
yaxis = LinearAxis()
yaxis.axis_label = label
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
return plot
def plot(self, job, typename):
<|code_end|>
using the current file's imports:
import sys
import numpy
from tacc_stats.analysis.gen import utils
from bokeh.palettes import d3
from bokeh.layouts import gridplot
from bokeh.models import HoverTool, ColumnDataSource, Plot, Grid, DataRange1d, LinearAxis
from bokeh.models.glyphs import Step
and any relevant context from other files:
# Path: tacc_stats/analysis/gen/utils.py
# class utils():
# def __init__(self, job):
# freq_list = {"intel_snb" : 2.7, "intel_ivb" : 2.8, "intel_hsw" : 2.3,
# "intel_bdw" : 2.6, "intel_knl" : 1.4, "intel_skx" : 2.1,
# "intel_8pmc3" : 2.7, "intel_4pmc3" : 2.7}
# imc_list = ["intel_snb_imc", "intel_ivb_imc", "intel_hsw_imc",
# "intel_bdw_imc", "intel_knl_mc_dclk", "intel_skx_imc"]
# cha_list = ["intel_knl_cha", "intel_skx_cha"]
# self.job = job
# self.nhosts = len(job.hosts.keys())
# self.hostnames = sorted(job.hosts.keys())
# self.wayness = int(job.acct['cores'])/int(job.acct['nodes'])
# self.hours = ((job.times[:] - job.times[0])/3600.).astype(float)
# self.t = job.times
# self.nt = len(job.times)
# self.dt = (job.times[-1] - job.times[0]).astype(float)
# for typename in job.schemas.keys():
# if typename in freq_list:
# self.pmc = typename
# self.freq = freq_list[typename]
# if typename in imc_list:
# self.imc = typename
# if typename in cha_list:
# self.cha = typename
#
# def get_type(self, typename, aggregate = True):
# if typename == "imc": typename = self.imc
# if typename == "pmc": typename = self.pmc
# if typename == "cha": typename = self.cha
# if not typename: return
#
# schema = self.job.schemas[typename]
# stats = {}
# for hostname, host in self.job.hosts.items():
# if aggregate:
# stats[hostname] = 0
# for devname in host.stats[typename]:
# stats[hostname] += host.stats[typename][devname].astype(float)
# else:
# stats[hostname] = {}
# for devname in host.stats[typename]:
# stats[hostname][devname] = host.stats[typename][devname].astype(float)
# return schema, stats
. Output only the next line. | u = utils.utils(job) |
Given snippet: <|code_start|> for i in range(0, m):
A[i, j] *= e.mult
if "MSR_DRAM_ENERGY_STATUS" == schema.keys()[j]:
for i in range(0, m):
A[i, j] *= 0.0153/0.06104
return A
def logoverflow(self, host_name, type_name, dev_name, key_name):
if type_name not in self.overflows:
self.overflows[type_name] = dict()
if dev_name not in self.overflows[type_name]:
self.overflows[type_name][dev_name] = dict()
if key_name not in self.overflows[type_name][dev_name]:
self.overflows[type_name][dev_name][key_name] = []
self.overflows[type_name][dev_name][key_name].append(host_name)
def process_stats(self):
for host in self.hosts.values():
host.stats = {}
for type_name, raw_type_stats in host.raw_stats.items():
stats = host.stats[type_name] = {}
schema = self.schemas[type_name]
for dev_name, raw_dev_stats in raw_type_stats.items():
try:
stats[dev_name] = self.process_dev_stats(host, type_name, schema,
dev_name, raw_dev_stats)
except:
continue
del host.raw_stats
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import datetime, errno, glob, numpy, os, sys, time, gzip
import re
import string
from tacc_stats.pickler import amd64_pmc, intel_process
and context:
# Path: tacc_stats/pickler/amd64_pmc.py
# def perf_event(event_select, unit_mask):
# def core_to_sock(c):
# def process_host(host, times):
# def process_job(job):
#
# Path: tacc_stats/pickler/intel_process.py
# def CORE_PERF_EVENT(event_select, unit_mask):
# def CORE_PERF_EVENT1(event_select, unit_mask):
# def CBOX_PERF_EVENT(event, umask):
# def HAU_PERF_EVENT(event, umask):
# def IMC_PERF_EVENT(event, umask):
# def IMC_PERF_EVENT_SKX(event, umask):
# def PCU_PERF_EVENT(event):
# def QPI_PERF_EVENT(event, umask):
# def R2PCI_PERF_EVENT(event, umask):
# def WTM_PERF_EVENT(event, umask):
# def WTMUNC_PERF_EVENT(event, umask):
# def KNL_EDC_UCLK_PERF_EVENT(event, umask):
# def KNL_EDC_ECLK_PERF_EVENT(event, umask):
# def KNL_MC_UCLK_PERF_EVENT(event, umask):
# def KNL_MC_DCLK_PERF_EVENT(event, umask):
# def __init__(self, job, name, event_map):
# def register(self,host):
# def format_knl(job, typename):
# def process_job(job):
# class reformat_counters:
which might include code, classes, or functions. Output only the next line. | amd64_pmc.process_job(self) |
Given the code snippet: <|code_start|> A[i, j] *= e.mult
if "MSR_DRAM_ENERGY_STATUS" == schema.keys()[j]:
for i in range(0, m):
A[i, j] *= 0.0153/0.06104
return A
def logoverflow(self, host_name, type_name, dev_name, key_name):
if type_name not in self.overflows:
self.overflows[type_name] = dict()
if dev_name not in self.overflows[type_name]:
self.overflows[type_name][dev_name] = dict()
if key_name not in self.overflows[type_name][dev_name]:
self.overflows[type_name][dev_name][key_name] = []
self.overflows[type_name][dev_name][key_name].append(host_name)
def process_stats(self):
for host in self.hosts.values():
host.stats = {}
for type_name, raw_type_stats in host.raw_stats.items():
stats = host.stats[type_name] = {}
schema = self.schemas[type_name]
for dev_name, raw_dev_stats in raw_type_stats.items():
try:
stats[dev_name] = self.process_dev_stats(host, type_name, schema,
dev_name, raw_dev_stats)
except:
continue
del host.raw_stats
amd64_pmc.process_job(self)
<|code_end|>
, generate the next line using the imports in this file:
import datetime, errno, glob, numpy, os, sys, time, gzip
import re
import string
from tacc_stats.pickler import amd64_pmc, intel_process
and context (functions, classes, or occasionally code) from other files:
# Path: tacc_stats/pickler/amd64_pmc.py
# def perf_event(event_select, unit_mask):
# def core_to_sock(c):
# def process_host(host, times):
# def process_job(job):
#
# Path: tacc_stats/pickler/intel_process.py
# def CORE_PERF_EVENT(event_select, unit_mask):
# def CORE_PERF_EVENT1(event_select, unit_mask):
# def CBOX_PERF_EVENT(event, umask):
# def HAU_PERF_EVENT(event, umask):
# def IMC_PERF_EVENT(event, umask):
# def IMC_PERF_EVENT_SKX(event, umask):
# def PCU_PERF_EVENT(event):
# def QPI_PERF_EVENT(event, umask):
# def R2PCI_PERF_EVENT(event, umask):
# def WTM_PERF_EVENT(event, umask):
# def WTMUNC_PERF_EVENT(event, umask):
# def KNL_EDC_UCLK_PERF_EVENT(event, umask):
# def KNL_EDC_ECLK_PERF_EVENT(event, umask):
# def KNL_MC_UCLK_PERF_EVENT(event, umask):
# def KNL_MC_DCLK_PERF_EVENT(event, umask):
# def __init__(self, job, name, event_map):
# def register(self,host):
# def format_knl(job, typename):
# def process_job(job):
# class reformat_counters:
. Output only the next line. | intel_process.process_job(self) |
Based on the snippet: <|code_start|>
class MasterPlot():
def add_axes(self, plot, label):
xaxis = LinearAxis()
yaxis = LinearAxis()
yaxis.axis_label = label
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
return plot
def plot(self, job):
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import numpy
import time
from tacc_stats.analysis.gen import utils
from bokeh.palettes import d3
from bokeh.layouts import gridplot
from bokeh.models import HoverTool, ColumnDataSource, Plot, Grid, DataRange1d, LinearAxis
from bokeh.models.glyphs import Step
and context (classes, functions, sometimes code) from other files:
# Path: tacc_stats/analysis/gen/utils.py
# class utils():
# def __init__(self, job):
# freq_list = {"intel_snb" : 2.7, "intel_ivb" : 2.8, "intel_hsw" : 2.3,
# "intel_bdw" : 2.6, "intel_knl" : 1.4, "intel_skx" : 2.1,
# "intel_8pmc3" : 2.7, "intel_4pmc3" : 2.7}
# imc_list = ["intel_snb_imc", "intel_ivb_imc", "intel_hsw_imc",
# "intel_bdw_imc", "intel_knl_mc_dclk", "intel_skx_imc"]
# cha_list = ["intel_knl_cha", "intel_skx_cha"]
# self.job = job
# self.nhosts = len(job.hosts.keys())
# self.hostnames = sorted(job.hosts.keys())
# self.wayness = int(job.acct['cores'])/int(job.acct['nodes'])
# self.hours = ((job.times[:] - job.times[0])/3600.).astype(float)
# self.t = job.times
# self.nt = len(job.times)
# self.dt = (job.times[-1] - job.times[0]).astype(float)
# for typename in job.schemas.keys():
# if typename in freq_list:
# self.pmc = typename
# self.freq = freq_list[typename]
# if typename in imc_list:
# self.imc = typename
# if typename in cha_list:
# self.cha = typename
#
# def get_type(self, typename, aggregate = True):
# if typename == "imc": typename = self.imc
# if typename == "pmc": typename = self.pmc
# if typename == "cha": typename = self.cha
# if not typename: return
#
# schema = self.job.schemas[typename]
# stats = {}
# for hostname, host in self.job.hosts.items():
# if aggregate:
# stats[hostname] = 0
# for devname in host.stats[typename]:
# stats[hostname] += host.stats[typename][devname].astype(float)
# else:
# stats[hostname] = {}
# for devname in host.stats[typename]:
# stats[hostname][devname] = host.stats[typename][devname].astype(float)
# return schema, stats
. Output only the next line. | u = utils.utils(job) |
Given the code snippet: <|code_start|> def run(self, filelist):
if not filelist:
print("Please specify a job file list.")
sys.exit()
pool = multiprocessing.Pool(processes = self.processes)
metrics = pool.map(_unwrap, zip([self]*len(filelist), filelist))
#metrics = map(_unwrap, zip([self]*len(filelist), filelist))
return metrics
# Compute metric
def compute_metrics(self, jobpath):
try:
with open(jobpath, 'rb') as fd:
try: job = p.load(fd)
except UnicodeDecodeError as e:
try:
job = p.load(fd, encoding = "latin1") # Python2 Compatibility
except: return jobpath, None
except MemoryError as e:
print('File ' + jobpath + ' to large to load')
return jobpath, None
except IOError as e:
print('File ' + jobpath + ' not found')
return jobpath, None
except EOFError as e:
print('End of file error for: ' + jobpath)
return jobpath, None
except:
return jobpath, None
<|code_end|>
, generate the next line using the imports in this file:
import sys
import operator, traceback
import pickle as p
import multiprocessing
from tacc_stats.analysis.gen import utils
from numpy import diff, amax, zeros, maximum, mean, isnan, trapz
and context (functions, classes, or occasionally code) from other files:
# Path: tacc_stats/analysis/gen/utils.py
# class utils():
# def __init__(self, job):
# freq_list = {"intel_snb" : 2.7, "intel_ivb" : 2.8, "intel_hsw" : 2.3,
# "intel_bdw" : 2.6, "intel_knl" : 1.4, "intel_skx" : 2.1,
# "intel_8pmc3" : 2.7, "intel_4pmc3" : 2.7}
# imc_list = ["intel_snb_imc", "intel_ivb_imc", "intel_hsw_imc",
# "intel_bdw_imc", "intel_knl_mc_dclk", "intel_skx_imc"]
# cha_list = ["intel_knl_cha", "intel_skx_cha"]
# self.job = job
# self.nhosts = len(job.hosts.keys())
# self.hostnames = sorted(job.hosts.keys())
# self.wayness = int(job.acct['cores'])/int(job.acct['nodes'])
# self.hours = ((job.times[:] - job.times[0])/3600.).astype(float)
# self.t = job.times
# self.nt = len(job.times)
# self.dt = (job.times[-1] - job.times[0]).astype(float)
# for typename in job.schemas.keys():
# if typename in freq_list:
# self.pmc = typename
# self.freq = freq_list[typename]
# if typename in imc_list:
# self.imc = typename
# if typename in cha_list:
# self.cha = typename
#
# def get_type(self, typename, aggregate = True):
# if typename == "imc": typename = self.imc
# if typename == "pmc": typename = self.pmc
# if typename == "cha": typename = self.cha
# if not typename: return
#
# schema = self.job.schemas[typename]
# stats = {}
# for hostname, host in self.job.hosts.items():
# if aggregate:
# stats[hostname] = 0
# for devname in host.stats[typename]:
# stats[hostname] += host.stats[typename][devname].astype(float)
# else:
# stats[hostname] = {}
# for devname in host.stats[typename]:
# stats[hostname][devname] = host.stats[typename][devname].astype(float)
# return schema, stats
. Output only the next line. | u = utils.utils(job) |
Here is a snippet: <|code_start|>
gcd_value = 0
n = dataflow.get_task_count()
div = numpy.random.exponential(0.25)
div = n + int(div * n)
rv_list = numpy.random.multinomial(sum_rv, numpy.ones(n) / div)
__get_non_zero_list(rv_list)
# Modify the two last integers of the list to get a gcd equal to 1
if gcd_value != 1:
logging.info("recalculate GCD")
while reduce(gcd, [gcd_value, rv_list[-1], rv_list[-2]]) != 1:
rv_list[-1] -= 1
rv_list[-2] += 1
shuffle(rv_list)
for task in dataflow.get_task_list():
dataflow.set_repetition_factor(task, rv_list[task])
return
def __generate_rates(dataflow, c_param):
"""Generate weights of the dataflow.
"""
logging.info("Generate task wight")
k = 0
lcm_value = 1
for task in dataflow.get_task_list():
<|code_end|>
. Write the next line using the current file imports:
from fractions import gcd
from Turbine.calc.lcm import lcm
from random import shuffle, randint, sample
import logging
import numpy
and context from other files:
# Path: Turbine/calc/lcm.py
# def lcm(a, b):
# return abs(a * b) / gcd(a, b)
, which may include functions, classes, or code. Output only the next line. | lcm_value = lcm(lcm_value, dataflow.get_repetition_factor(task)) |
Predict the next line after this snippet: <|code_start|> if not __test_coef(coef, [dataflow.get_initial_marking(arc)]):
dataflow.set_initial_marking(arc, int(ceil(dataflow.get_initial_marking(arc) * coef)))
else:
dataflow.set_initial_marking(arc, int(dataflow.get_initial_marking(arc) * coef))
if dataflow.is_sdf:
dataflow.set_prod_rate(arc, int(dataflow.get_prod_rate(arc) * coef))
dataflow.set_cons_rate(arc, int(dataflow.get_cons_rate(arc) * coef))
if dataflow.is_csdf:
dataflow.set_prod_rate_list(arc, [int(x * coef) for x in dataflow.get_prod_rate_list(arc)])
dataflow.set_cons_rate_list(arc, [int(x * coef) for x in dataflow.get_cons_rate_list(arc)])
if dataflow.is_pcg:
dataflow.set_ini_prod_rate_list(arc, [int(x * coef) for x in dataflow.get_ini_prod_rate_list(arc)])
dataflow.set_ini_cons_rate_list(arc, [int(x * coef) for x in dataflow.get_ini_cons_rate_list(arc)])
dataflow.set_threshold_list(arc, [int(x * coef) for x in dataflow.get_threshold_list(arc)])
dataflow.set_ini_threshold_list(arc, [int(x * coef) for x in dataflow.get_ini_threshold_list(arc)])
def get_normalized_vector(dataflow):
"""Compute the normalization vector of an un-normalize graph.
Return
------
Return the the vector of coefficient for normalize the graph.
"""
coef_list = {}
lcm_rf = 1
lcm_post_mult = 1
for task in dataflow.get_task_list():
<|code_end|>
using the current file's imports:
from fractions import Fraction
from math import ceil
from numpy.random.mtrand import randint
from Turbine.calc.lcm import lcm
and any relevant context from other files:
# Path: Turbine/calc/lcm.py
# def lcm(a, b):
# return abs(a * b) / gcd(a, b)
. Output only the next line. | lcm_rf = lcm(lcm_rf, dataflow.get_repetition_factor(task)) |
Given snippet: <|code_start|> self._dotGraph.add_node(dot_buf)
if self.dataflow.is_sdf:
self._dotGraph.add_edge(pydot.Edge(dot_prod, dot_buf, headlabel=self.dataflow.get_prod_rate(arc)))
self._dotGraph.add_edge(pydot.Edge(dot_buf, dot_cons, headlabel=self.dataflow.get_cons_rate(arc)))
if self.dataflow.is_csdf:
self._dotGraph.add_edge(
pydot.Edge(dot_prod, dot_buf, headlabel=str(self.dataflow.get_prod_rate_list(arc))))
self._dotGraph.add_edge(
pydot.Edge(dot_buf, dot_cons, headlabel=str(self.dataflow.get_cons_rate_list(arc))))
def write_dot(self, name=None):
if name is None:
name = self.dataflow.name
self._dotGraph.write_dot(name + '.dot')
def write_pdf(self, name=None):
if name is None:
name = self.dataflow.name
self._dotGraph.write_pdf(name + '.pdf')
def write_jpeg(self, name=None):
if name is None:
name = self.dataflow.name
self._dotGraph.write_jpeg(name + '.jpeg')
if __name__ == "__main__":
file_path_bs = "simpl_BlackScholes.tur"
# file_path_jpeg = "../../experimentations/indus/JPEG2000.tur"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pydot
from Turbine.file_parser.turbine_parser import read_tur_file
and context:
# Path: Turbine/file_parser/turbine_parser.py
# def read_tur_file(filename):
# open_file = open(filename, "r")
# name, dataflow_type = __readline(open_file).replace("\n", "").split(" ")
# if dataflow_type == "SDF" or dataflow_type == "SDFG":
# dataflow = SDF(name)
# elif dataflow_type == "CSDF" or dataflow_type == "CSDFG":
# dataflow = CSDF(name)
# elif dataflow_type == "PCG":
# dataflow = PCG(name)
#
# task_nb, arc_nb = __readline(open_file).split(" ")
# for i in xrange(int(task_nb)):
# line = __readline(open_file).replace("\n", "")
# task_name, repetition_factor, str_duration = line.split(" ")
# task = dataflow.add_task(task_name)
# dataflow.set_repetition_factor(task, int(repetition_factor))
# if ";" in str_duration:
# str_ini_duration, str_duration = str_duration.split(";")
# ini_duration_list = [float(i) for i in str_ini_duration.split(",")]
# dataflow.set_ini_phase_count(task, len(ini_duration_list))
# dataflow.set_ini_phase_duration_list(task, ini_duration_list)
#
# duration_list = [float(i) for i in str_duration.split(",")]
# if isinstance(dataflow, SDF):
# dataflow.set_task_duration(task, duration_list[0])
# if isinstance(dataflow, CSDF):
# dataflow.set_phase_count(task, len(duration_list))
# dataflow.set_phase_duration_list(task, duration_list)
#
# for i in xrange(int(arc_nb)):
# line = __readline(open_file).replace("\n", "")
# str_arc, str_m0, str_prod, str_cons = line.split(" ")
# source = dataflow.get_task_by_name(str_arc.split(",")[0][1:])
# target = dataflow.get_task_by_name(str_arc.split(",")[1][:-1])
# m0 = int(str_m0)
# arc = dataflow.add_arc(source, target)
# dataflow.set_initial_marking(arc, m0)
#
# if "PCG" in dataflow_type:
# if dataflow.get_ini_phase_count(source) == 0:
# dataflow.set_ini_prod_rate_list(arc, [])
# if dataflow.get_ini_phase_count(target) == 0:
# dataflow.set_ini_cons_rate_list(arc, [])
# dataflow.set_ini_threshold_list(arc, [])
#
# if ";" in str_prod:
# str_prod_ini, str_prod = str_prod.split(";")
# prod_ini = [int(i) for i in str_prod_ini.split(",")]
# dataflow.set_ini_prod_rate_list(arc, prod_ini)
#
# prod = [int(i) for i in str_prod.split(",")]
# if isinstance(dataflow, SDF):
# dataflow.set_prod_rate(arc, prod[0])
# if isinstance(dataflow, CSDF):
# dataflow.set_prod_rate_list(arc, prod)
#
# if ";" in str_cons:
# str_cons_ini, str_cons = str_cons.split(";")
# str_cons_ini = str_cons_ini.split(",")
# cons_ini = []
# ini_threshold = []
# for value in str_cons_ini:
# if ":" in value:
# value, threshold = value.split(":")
# ini_threshold.append(int(threshold))
# else:
# ini_threshold.append(int(value))
# cons_ini.append(int(value))
#
# dataflow.set_ini_cons_rate_list(arc, cons_ini)
# if isinstance(dataflow, PCG):
# dataflow.set_ini_threshold_list(arc, ini_threshold)
#
# str_cons = str_cons.split(",")
# cons = []
# threshold = []
# for value in str_cons:
# if ":" in value:
# value, threshold_str = value.split(":")
# threshold.append(int(threshold_str))
# else:
# threshold.append(int(value))
# cons.append(int(value))
#
# if isinstance(dataflow, SDF):
# dataflow.set_cons_rate(arc, cons[0])
# if isinstance(dataflow, CSDF):
# dataflow.set_cons_rate_list(arc, cons)
# if isinstance(dataflow, PCG):
# dataflow.set_threshold_list(arc, threshold)
#
# return dataflow
which might include code, classes, or functions. Output only the next line. | dataflow = read_tur_file(file_path_bs) |
Using the snippet: <|code_start|> )
property_names = cfml_format.find_by_selector(
"source.cfml.script meta.tag.property.name.cfml"
)
if len(properties) != len(property_names):
return
sorted_properties = [
r
for r, name in sorted(
zip(properties, property_names), key=lambda x: cfml_format.view.substr(x[1])
)
]
replacements = [
(r, cfml_format.view.substr(sorted_r))
for r, sorted_r in zip(properties, sorted_properties)
]
return replacements
def normalize_builtin_functions(cfml_format):
setting = cfml_format.get_setting("normalize_builtin_functions")
substitutions = []
if setting is None:
return substitutions
function_name_map = {
<|code_end|>
, determine the next line of code. You have imports:
import sublime
from ..cfml_plugins import basecompletions
and context (class names, function names, or code) available:
# Path: src/cfml_plugins.py
# def _plugin_loaded():
. Output only the next line. | funct.lower(): funct for funct in basecompletions.basecompletions.function_names |
Based on the snippet: <|code_start|>
directory = [
"applicationcfc",
"basecompletions",
"cfcs",
"cfdocs",
"custom_tags",
"dotpaths",
"entities",
"fw1",
"in_file_completions",
"testbox",
]
plugins = []
for p in directory:
m = importlib.import_module(".plugins_." + p, __package__)
globals()[p] = m
for a in dir(m):
v = m.__dict__[a]
if a.endswith("Command"):
globals()[a] = v
elif a == "CFMLPlugin":
try:
<|code_end|>
, predict the immediate next line with the help of imports:
import importlib
from .plugins_.plugin import CFMLPlugin
and context (classes, functions, sometimes code) from other files:
# Path: src/plugins_/plugin.py
# class CFMLPlugin:
# def get_completion_docs(self, cfml_view):
# return None
#
# def get_completions(self, cfml_view):
# return None
#
# def get_goto_cfml_file(self, cfml_view):
# return None
#
# def get_inline_documentation(self, cfml_view, doc_type):
# return None
#
# def get_method_preview(self, cfml_view):
# return None
. Output only the next line. | if v.__bases__ and issubclass(v, CFMLPlugin): |
Predict the next line after this snippet: <|code_start|># Copyright (C) 2013-2016 Julien Vitay <julien.vitay@gmail.com>,
# Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
def extract_randomdist(description):
" Extracts RandomDistribution objects from all variables"
rk_rand = 0
random_objects = []
for variable in description['variables']:
# Equation
eq = variable['eq']
# Dependencies
dependencies = []
# Search for all distributions
<|code_end|>
using the current file's imports:
import ANNarchy.core.Global as Global
import re
import sympy
from ANNarchy.core.Random import available_distributions, distributions_arguments, distributions_equivalents
from ANNarchy.parser.Equation import Equation
from ANNarchy.parser.Function import FunctionParser
from ANNarchy.parser.StringManipulation import *
from ANNarchy.parser.ITE import *
from inspect import getmembers
and any relevant context from other files:
# Path: ANNarchy/core/Random.py
# class RandomDistribution(object):
# class Uniform(RandomDistribution):
# class DiscreteUniform(RandomDistribution):
# class Normal(RandomDistribution):
# class LogNormal(RandomDistribution):
# class Exponential(RandomDistribution):
# class Gamma(RandomDistribution):
# def get_values(self, shape):
# def get_list_values(self, size):
# def get_value(self):
# def keywords(self):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, min, max):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, min, max):
# def get_values(self, shape):
# def latex(self):
# def __init__(self, mu, sigma, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, mu, sigma, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, Lambda, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def __init__(self, alpha, beta=1.0, seed=-1, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
. Output only the next line. | for dist in available_distributions: |
Using the snippet: <|code_start|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
def extract_randomdist(description):
" Extracts RandomDistribution objects from all variables"
rk_rand = 0
random_objects = []
for variable in description['variables']:
# Equation
eq = variable['eq']
# Dependencies
dependencies = []
# Search for all distributions
for dist in available_distributions:
matches = re.findall('(?P<pre>[^\w.])'+dist+'\(([^()]+)\)', eq)
if matches == ' ':
continue
for l, v in matches:
# Check the arguments
arguments = v.split(',')
# Check the number of provided arguments
<|code_end|>
, determine the next line of code. You have imports:
import ANNarchy.core.Global as Global
import re
import sympy
from ANNarchy.core.Random import available_distributions, distributions_arguments, distributions_equivalents
from ANNarchy.parser.Equation import Equation
from ANNarchy.parser.Function import FunctionParser
from ANNarchy.parser.StringManipulation import *
from ANNarchy.parser.ITE import *
from inspect import getmembers
and context (class names, function names, or code) available:
# Path: ANNarchy/core/Random.py
# class RandomDistribution(object):
# class Uniform(RandomDistribution):
# class DiscreteUniform(RandomDistribution):
# class Normal(RandomDistribution):
# class LogNormal(RandomDistribution):
# class Exponential(RandomDistribution):
# class Gamma(RandomDistribution):
# def get_values(self, shape):
# def get_list_values(self, size):
# def get_value(self):
# def keywords(self):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, min, max):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, min, max):
# def get_values(self, shape):
# def latex(self):
# def __init__(self, mu, sigma, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, mu, sigma, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, Lambda, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def __init__(self, alpha, beta=1.0, seed=-1, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
. Output only the next line. | if len(arguments) < distributions_arguments[dist]: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.