Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Here is a snippet: <|code_start|>
recursion_warning = '''\
WARNING: The peru module '{}' doesn't specify the 'recursive' field,
but its contents include a peru.yaml file. Peru's behavior here changed
<|code_end|>
. Write the next line using the current file imports:
import json
import os
import textwrap
from .cache import compute_key
from .error import PrintableError, error_context
from .edit_yaml import set_module_field_in_file
from . import imports
from .plugin import plugin_fetch, plugin_get_reup_fields
from . import scope
from . import parser # avoid circular imports
and context from other files:
# Path: peru/cache.py
# def compute_key(data):
# # To hash this dictionary of fields, serialize it as a JSON string, and
# # take the SHA1 of that string. Dictionary key order is unspecified, so
# # "sort_keys" keeps our hash stable. Specifying separators makes the
# # JSON slightly more compact, and protects us against changes in the
# # default. "ensure_ascii" defaults to true, so specifying it just
# # protects us from changes in the default.
# json_representation = json.dumps(
# data, sort_keys=True, ensure_ascii=True, separators=(',', ':'))
# sha1 = hashlib.sha1()
# sha1.update(json_representation.encode("utf8"))
# return sha1.hexdigest()
#
# Path: peru/edit_yaml.py
# def set_module_field_in_file(yaml_file_path, module_name, field_name, new_val):
# with open(yaml_file_path) as f:
# yaml_text = f.read()
# new_yaml_text = set_module_field(yaml_text, module_name, field_name,
# new_val)
# with open(yaml_file_path, "w") as f:
# f.write(new_yaml_text)
#
# Path: peru/plugin.py
# async def plugin_fetch(plugin_context, module_type, module_fields, dest,
# display_handle):
# env = {'PERU_SYNC_DEST': dest}
# await _plugin_job(plugin_context, module_type, module_fields, 'sync', env,
# display_handle)
#
# async def plugin_get_reup_fields(plugin_context, module_type, module_fields,
# display_handle):
# with tmp_dir(plugin_context) as output_file_dir:
# output_path = os.path.join(output_file_dir, 'reup_output')
# env = {'PERU_REUP_OUTPUT': output_path}
# await _plugin_job(plugin_context, module_type, module_fields, 'reup',
# env, display_handle)
# with open(output_path) as output_file:
# fields = yaml.safe_load(output_file) or {}
#
# for key, value in fields.items():
# if not isinstance(key, str):
# raise PluginModuleFieldError(
# 'reup field name must be a string: {}'.format(key))
# if not isinstance(value, str):
# raise PluginModuleFieldError(
# 'reup field value must be a string: {}'.format(value))
#
# return fields
, which may include functions, classes, or code. Output only the next line. | in version 0.4: modules with peru.yaml files are *no longer* recursive |
Given snippet: <|code_start|>
recursion_warning = '''\
WARNING: The peru module '{}' doesn't specify the 'recursive' field,
but its contents include a peru.yaml file. Peru's behavior here changed
in version 0.4: modules with peru.yaml files are *no longer* recursive
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
import os
import textwrap
from .cache import compute_key
from .error import PrintableError, error_context
from .edit_yaml import set_module_field_in_file
from . import imports
from .plugin import plugin_fetch, plugin_get_reup_fields
from . import scope
from . import parser # avoid circular imports
and context:
# Path: peru/cache.py
# def compute_key(data):
# # To hash this dictionary of fields, serialize it as a JSON string, and
# # take the SHA1 of that string. Dictionary key order is unspecified, so
# # "sort_keys" keeps our hash stable. Specifying separators makes the
# # JSON slightly more compact, and protects us against changes in the
# # default. "ensure_ascii" defaults to true, so specifying it just
# # protects us from changes in the default.
# json_representation = json.dumps(
# data, sort_keys=True, ensure_ascii=True, separators=(',', ':'))
# sha1 = hashlib.sha1()
# sha1.update(json_representation.encode("utf8"))
# return sha1.hexdigest()
#
# Path: peru/edit_yaml.py
# def set_module_field_in_file(yaml_file_path, module_name, field_name, new_val):
# with open(yaml_file_path) as f:
# yaml_text = f.read()
# new_yaml_text = set_module_field(yaml_text, module_name, field_name,
# new_val)
# with open(yaml_file_path, "w") as f:
# f.write(new_yaml_text)
#
# Path: peru/plugin.py
# async def plugin_fetch(plugin_context, module_type, module_fields, dest,
# display_handle):
# env = {'PERU_SYNC_DEST': dest}
# await _plugin_job(plugin_context, module_type, module_fields, 'sync', env,
# display_handle)
#
# async def plugin_get_reup_fields(plugin_context, module_type, module_fields,
# display_handle):
# with tmp_dir(plugin_context) as output_file_dir:
# output_path = os.path.join(output_file_dir, 'reup_output')
# env = {'PERU_REUP_OUTPUT': output_path}
# await _plugin_job(plugin_context, module_type, module_fields, 'reup',
# env, display_handle)
# with open(output_path) as output_file:
# fields = yaml.safe_load(output_file) or {}
#
# for key, value in fields.items():
# if not isinstance(key, str):
# raise PluginModuleFieldError(
# 'reup field name must be a string: {}'.format(key))
# if not isinstance(value, str):
# raise PluginModuleFieldError(
# 'reup field value must be a string: {}'.format(value))
#
# return fields
which might include code, classes, or functions. Output only the next line. | by default. Add 'recursive: true' to the module definition to re-enable |
Given snippet: <|code_start|>class ArticlePipeline(object):
def get_media_request(self,item,info):
for url in item['file_urls']:
yield scrapy.Request(url)
def process_item(self,item,spider):
try:
self._mysqldb_insertitem(item)
logging.info(item)
return item
except:
raise DropItem('Cannot Insert %s into mysql database'% item)
logging.info('db error')
def _parse_sourceandhtml(self,images,files,html):
source=[]
htmlreplace=html
for image in images:
logging.info(image)
imagename = image['path'].split('/')[-1]
htmlreplace = htmlreplace.replace(str(image['url']), str(imagename))
source.append(imagename)
#hfs.UploadStreamXImages(imagename)
for file in files:
logging.info(file)
filename = file['path'].split('/')[-1]
htmlreplace = htmlreplace.replace(str(file['url']),str(filename))
source.append(filename)
#hfs.UploadStreamXFiles(filename)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from scrapy.exceptions import DropItem
from scrapywork.models import mysqldb
from urllib.request import urlretrieve
from scrapy.pipelines.images import ImagesPipeline
from PIL import Image
from io import BytesIO
from scrapy.pipelines.files import FileException
import scrapy
import six
import logging
import tempfile
import cv2
and context:
# Path: scrapywork/models.py
# class ArticleRule():
# class CrawlRule(ArticleRule):
# class BaseRule(ArticleRule):
# class SpiderRulesSingletion(object):
# class ItemParser(object):
# class Mail(object):
# class DatabaseSingleton(object):
# class IPProxy(object):
# def __init__(self):
# def _parsexml(self,xmlpath):
# def _parseCrawlrules(self,rule):
# def _parseBaserules(self,rule):
# def __init__(self,rule,response):
# def parse_item(self):
# def _fixhtml(self,html):
# def _formatetime(self,time):
# def __init__(self,sender,passwd,receiver):
# def sendmail(self,subject,contents):
# def get_maile_content(self):
# def get_allitem_count(self):
# def __init__(self):
# def __del__(self):
# def ensuretableexist(self):
# def insertItem(self,url,site,title,time,type,publish,html,text,xml,sources):
# def insertItem(self,item,source):
# def queryItem(self,url):
# def queryItemCount(self,site=None):
# def __init__(self):
# def _initipproxy(self,types,inlandcount,foreigncount):
# def getinlandipproxy(self):
# def getforeignipproxy(self):
which might include code, classes, or functions. Output only the next line. | return '|'.join(source),htmlreplace |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
class AutoSpider(scrapy.Spider):
name = 'autoSpider'
def __init__(self,rule):
self.rule=rule
self.allowed_domains = rule.allow_domains.split(',')
self.urllist=[]
super(AutoSpider,self).__init__()
def start_requests(self):
try:
for i in range(1,int(self.rule.pages),int(self.rule.pagestep)):
url=self.rule.next_page_url.format(str(i))
yield scrapy.Request(url,callback=self.parse_url)
except:
print('autospider request failed')
def parse_url(self,response):
try:
print(response.text)
host_url=self.rule.host_url
urls=re.findall(self.rule.url_regex,response.text)
for url in urls:
self.urllist.append(host_url+url)
except:
print('auto spider urllist error.')
<|code_end|>
, predict the next line using imports from the current file:
import scrapy
import re
from scrapywork.items import articleItem
and context including class names, function names, and sometimes code from other files:
# Path: scrapywork/items.py
# class articleItem(scrapy.Item):
# #文章的链接
# url = scrapy.Field()
# #站点
# site=scrapy.Field()
# #文章标题
# title=scrapy.Field()
# #文章发布时间
# time=scrapy.Field()
# #文章类型
# type=scrapy.Field()
# #文章发布方
# publish=scrapy.Field()
# html =scrapy.Field()
# text=scrapy.Field()
# xml=scrapy.Field()
#
# #文件下载
# file_urls=scrapy.Field()
# files=scrapy.Field()
# #图片下载
# image_urls=scrapy.Field()
# images=scrapy.Field()
. Output only the next line. | finally: |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
class ArticleSpider(CrawlSpider):
name = 'articleSpider'
def __init__(self,rule):
self.rule=rule
self.start_urls=rule.start_urls.split(',')
self.allowed_domains=rule.allow_domains.split(',')
rule_list=[]
# 添加`下一页`的规则
if rule.next_page:
rule_list.append(Rule(LinkExtractor(restrict_xpaths=rule.next_page,unique=True),follow=True))
#添加 '列表'的规则
if rule.list_page:
rule_list.append(Rule(LinkExtractor(allow=rule.list_page,unique=True),callback='parse_url',follow=True))
#添加抽取文章链接的规则
rule_list.append(Rule(LinkExtractor(
allow=rule.allow_url,unique=True),
callback='parse_item',follow=True)
)
self.rules=tuple(rule_list)
super(ArticleSpider,self).__init__()
def parse_url(self,response):
yield response
<|code_end|>
with the help of current file imports:
import scrapy
import re
import logging
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapywork.items import articleItem
from scrapy.http import Request
and context from other files:
# Path: scrapywork/items.py
# class articleItem(scrapy.Item):
# #文章的链接
# url = scrapy.Field()
# #站点
# site=scrapy.Field()
# #文章标题
# title=scrapy.Field()
# #文章发布时间
# time=scrapy.Field()
# #文章类型
# type=scrapy.Field()
# #文章发布方
# publish=scrapy.Field()
# html =scrapy.Field()
# text=scrapy.Field()
# xml=scrapy.Field()
#
# #文件下载
# file_urls=scrapy.Field()
# files=scrapy.Field()
# #图片下载
# image_urls=scrapy.Field()
# images=scrapy.Field()
, which may contain function names, class names, or code. Output only the next line. | self.log('this is a list page %s'% response.url) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
class BasespiderTemplate(scrapy.Spider):
def __init__(self,rule):
self.rule=rule
self.name = rule.name
self.allowed_domains = rule.allow_domains.split(',')
self.urllist=[]
super(BasespiderTemplate,self).__init__()
def start_requests(self):
try:
for i in range(2,int(self.rule.pages),int(self.rule.pagestep)):
url=self.rule.next_page_url.format(str(i))
yield scrapy.Request(url,callback=self.parse_url)
<|code_end|>
, predict the next line using imports from the current file:
import scrapy
import re
import logging
from scrapywork.models import ItemParser
and context including class names, function names, and sometimes code from other files:
# Path: scrapywork/models.py
# class ItemParser(object):
# def __init__(self,rule,response):
# self.rule=rule
# self.response=response
#
# def parse_item(self):
# articleInfo=articleItem()
# articleInfo['url'] = self.response.url
# articleInfo['site'] = self.rule.allow_domains
#
# if self.rule.title_xpath:
# title = self.response.xpath(self.rule.title_xpath).extract_first().strip()
# articleInfo['title'] = title
# else:
# articleInfo['title'] = ''
#
# if self.rule.time_xpath:
# time = self.response.xpath(self.rule.time_xpath).extract_first().split('/')[-1].strip()
# articleInfo['time'] = self._formatetime(time)
# else:
# articleInfo['time'] = ''
#
# if self.rule.type_xpath:
# type = self.response.xpath(self.rule.type_xpath).extract()
# articleInfo['type'] = ' '.join(type).strip()
# else:
# articleInfo['type'] = ''
#
# if self.rule.publish_xpath:
# publish = self.response.xpath(self.rule.publish_xpath).extract_first()
# if publish:
# articleInfo['publish'] = publish.strip()
# else:
# articleInfo['publish'] = ''
# else:
# articleInfo['publish'] = ''
#
# if self.rule.html_xpath:
# htmls = self.response.xpath(self.rule.html_xpath).extract()
# html = r'\r\n'.join(htmls)
# if self.rule.html_regexexculde:
# re_exclude = re.compile(self.rule.html_regexexculde, re.S)
# html = re_exclude.sub('', html)
# articleInfo['html'] = self._fixhtml(html)
# else:
# articleInfo['html'] = ''
#
# xml = html
# articleInfo['xml'] = xml
#
# if self.rule.text_xpath:
# texts = self.response.xpath(self.rule.text_xpath).extract()
# articleInfo['text'] = ''.join(texts).strip()
# else:
# articleInfo['text']=''
#
# if self.rule.imagelink_xpath:
# articleInfo['image_urls'] = self.response.xpath(self.rule.imagelink_xpath).extract()
# if self.rule.filelink_xpath:
# articleInfo['file_urls']= [self.rule.host_url + url for url in self.response.xpath(self.rule.filelink_xpath).extract()]
# print(articleInfo['file_urls'])
#
# return articleInfo
#
#
# '''''
# Created on 2017-7-28
# html代码补全
# "<tag>xxx</tag>"正常
# "<tag/>"正常
# "<tag>xxx"异常-没有关闭标签
# "xxx</tag>"异常-没有开始标签
# @author: bean
# '''
# def _fixhtml(self,html):
# soup = BeautifulSoup(html, 'lxml')
# fixed_html = soup.prettify()
# return fixed_html.replace('\\r\\n', '')
#
# '''
# 格式化时间
# '''
# def _formatetime(self,time):
# regextime = '\d+? [A-Z]{3} \d{4}'
# timeres = re.match(re.compile(regextime), time)
#
# if timeres:
# month = ''
# timetmp = time.split(' ')
# if timetmp[1] == 'JAN':
# month = str('01')
# elif timetmp[1] == 'FEB':
# month = str('02')
# elif timetmp[1] == 'MAR':
# month = str('03')
# elif timetmp[1] == 'APR':
# month = str('04')
# elif timetmp[1] == 'MAY':
# month = str('05')
# elif timetmp[1] == 'JUN':
# month = str('06')
# elif timetmp[1] == 'JUL':
# month = str('07')
# elif timetmp[1] == 'AUG':
# month = str('08')
# elif timetmp[1] == 'SEP':
# month = str('09')
# elif timetmp[1] == 'OCT':
# month = str('10')
# elif timetmp[1] == 'NOV':
# month = str('11')
# elif timetmp[1] == 'DEC':
# month = str('12')
# return '{}-{}-{}'.format(timetmp[2], month, timetmp[0])
#
# else:
# return time
. Output only the next line. | except: |
Using the snippet: <|code_start|> logger = logging.getLogger(gen_restricted_reference.__name__)
reference_handle = pysam.Fastafile(reference)
regions_bedtool = pybedtools.BedTool(regions_bed)
with open(out_reference, "w") as out_fasta:
for region_index, region in enumerate(regions_bedtool, start=1):
sequence = reference_handle.fetch(reference=str(region.chrom), start=region.start, end=region.end)
region_name = str(region_index) if use_short_contigs_names else ("%s_%d_%d" % (str(region.chrom), region.start, region.end) )
if region_index == 1:
out_fasta.write(">{}\n{}".format(region_name, sequence))
else: out_fasta.write("\n>{}\n{}".format(region_name, sequence))
pysam.faidx(out_reference)
logger.info("Lifted over the reference to {}".format(out_reference))
reference_handle.close()
return out_reference
def gen_restricted_vcf(in_vcf, regions_bed, out_vcf, restricted_reference, targeted_samples, flank=0, use_short_contig_names=False):
logger = logging.getLogger(gen_restricted_vcf.__name__)
if not in_vcf:
return None
if not os.path.isfile(in_vcf):
logger.error("%s not found" % in_vcf)
return None
<|code_end|>
, determine the next line of code. You have imports:
import os
import subprocess
import sys
import numpy
import argparse
import pysam
import vcf
import pybedtools
import logging
from collections import defaultdict, OrderedDict
from utils import makedirs
and context (class names, function names, or code) available:
# Path: utils.py
# def makedirs(dirs):
# if type(dirs) == list:
# for d in dirs:
# if not os.path.exists(d):
# os.makedirs(d)
# else:
# if not os.path.exists(dirs):
# os.makedirs(dirs)
. Output only the next line. | reference_handle = pysam.Fastafile(restricted_reference) |
Given the code snippet: <|code_start|>
targets = []
for x in rtorrent_config:
try:
info = parse_config_part(rtorrent_config[x], x)
except RTorrentConfigException as e:
print('Invalid config: ', e)
sys.exit(1)
targets.append(info)
for x in targets:
r = RTorrent(x)
<|code_end|>
, generate the next line using the imports in this file:
from model.rtorrent import RTorrent
from config import rtorrent_config
from lib.config_parser import parse_config_part, RTorrentConfigException
import socket
import sys
and context (functions, classes, or occasionally code) from other files:
# Path: model/rtorrent.py
# class RTorrent(object):
# """
# RTorrent class. This wraps most of the RTorrent *main* functionality
# (read: global functionality) in a class. Think of, current upload and
# download, libTorrent version.
#
# Methods specific to a Torrent can be found in the :ref:`torrent-class`
# class.
# """
#
# # FIXME: If we leave URL at '' xmlrpclib will default to /RPC2 as well.
# def __init__(self, target):
# """
# Initialise the RTorrent object.
# ``target`` is target dict as parsed by parse_config (pyrotorrent.py).
# """
# self.target = target
# self.s = RTorrentXMLRPC(target)
#
# self.hacks()
#
# def hacks(self):
# # Hack in all the methods in _rpc_methods!
# for x, y in _rpc_methods.items():
#
# # caller = create_caller(y[0], create_argcheck(y[2])) # belongs to the
# # argument checking
#
# caller = (lambda name: lambda self, *args: getattr(self.s, name)(*args))(y[0])
# caller.__doc__ = y[1] + '\nOriginal libTorrent method: ``%s``' % y[0]
# setattr(RTorrent, x, types.MethodType(caller, self))
#
# del caller
#
# def __repr__(self):
# return 'RTorrent(%s)' % self.target['name']
#
# def get_download_list(self, _type=''):
# """
# Returns a list of torrents.
# _type defines what is returned. Valid:
#
# * '' (Empty string), 'default'
# * 'complete'
# * 'incomplete'
# * 'started'
# * 'stopped'
# * 'active'
# * 'hashing'
# * 'seeding'
#
# Plus all customly defined views.
# """
# # FIXME: List is not complete(?) + exception should be raised.
# if _type not in ('complete', 'incomplete', 'started', 'stopped',
# 'active', 'hashing', 'seeding', '', 'default'):
# return None
#
# res = self.s.download_list(_type)
#
# # FIXME: We now only have the hashes. Do we also want to fetch all the
# # data per torrent? Or perhaps only the basic info?
#
# return res
#
# def query(self):
# """
# Query returns a new RTorrentQuery object with the target
# from the current RTorrent object.
#
# Use this to execute several (different) calls on the RTorrent class in
# one request. This can increase performance and reduce latency and load.
#
# See :ref:`rtorrentquery-class` on how to use it.
# """
# from lib.rtorrentquery import RTorrentQuery
# return RTorrentQuery(self.target)
#
# Path: config.py
# BASE_URL = '/torrent'
# STATIC_URL = BASE_URL + '/static'
# USE_OWN_HTTPD = False
# FILE_BLOCK_SIZE = 4096
# BACKGROUND_IMAGE = 'cat.jpg'
# USE_AUTH = True
# ENABLE_API = False
# CACHE_TIMEOUT=10
#
# Path: lib/config_parser.py
# def parse_config_part(config_dict, name):
# """
# Parse target configuration.
# """
#
# info = _parse_config_part_connection(config_dict, name)
# info = _parse_config_part_storage(config_dict, info)
#
# return info
#
# class RTorrentConfigException(Exception):
# pass
. Output only the next line. | try: |
Next line prediction: <|code_start|>
targets = []
for x in rtorrent_config:
try:
info = parse_config_part(rtorrent_config[x], x)
except RTorrentConfigException as e:
print('Invalid config: ', e)
sys.exit(1)
<|code_end|>
. Use current file imports:
(from model.rtorrent import RTorrent
from config import rtorrent_config
from lib.config_parser import parse_config_part, RTorrentConfigException
import socket
import sys)
and context including class names, function names, or small code snippets from other files:
# Path: model/rtorrent.py
# class RTorrent(object):
# """
# RTorrent class. This wraps most of the RTorrent *main* functionality
# (read: global functionality) in a class. Think of, current upload and
# download, libTorrent version.
#
# Methods specific to a Torrent can be found in the :ref:`torrent-class`
# class.
# """
#
# # FIXME: If we leave URL at '' xmlrpclib will default to /RPC2 as well.
# def __init__(self, target):
# """
# Initialise the RTorrent object.
# ``target`` is target dict as parsed by parse_config (pyrotorrent.py).
# """
# self.target = target
# self.s = RTorrentXMLRPC(target)
#
# self.hacks()
#
# def hacks(self):
# # Hack in all the methods in _rpc_methods!
# for x, y in _rpc_methods.items():
#
# # caller = create_caller(y[0], create_argcheck(y[2])) # belongs to the
# # argument checking
#
# caller = (lambda name: lambda self, *args: getattr(self.s, name)(*args))(y[0])
# caller.__doc__ = y[1] + '\nOriginal libTorrent method: ``%s``' % y[0]
# setattr(RTorrent, x, types.MethodType(caller, self))
#
# del caller
#
# def __repr__(self):
# return 'RTorrent(%s)' % self.target['name']
#
# def get_download_list(self, _type=''):
# """
# Returns a list of torrents.
# _type defines what is returned. Valid:
#
# * '' (Empty string), 'default'
# * 'complete'
# * 'incomplete'
# * 'started'
# * 'stopped'
# * 'active'
# * 'hashing'
# * 'seeding'
#
# Plus all customly defined views.
# """
# # FIXME: List is not complete(?) + exception should be raised.
# if _type not in ('complete', 'incomplete', 'started', 'stopped',
# 'active', 'hashing', 'seeding', '', 'default'):
# return None
#
# res = self.s.download_list(_type)
#
# # FIXME: We now only have the hashes. Do we also want to fetch all the
# # data per torrent? Or perhaps only the basic info?
#
# return res
#
# def query(self):
# """
# Query returns a new RTorrentQuery object with the target
# from the current RTorrent object.
#
# Use this to execute several (different) calls on the RTorrent class in
# one request. This can increase performance and reduce latency and load.
#
# See :ref:`rtorrentquery-class` on how to use it.
# """
# from lib.rtorrentquery import RTorrentQuery
# return RTorrentQuery(self.target)
#
# Path: config.py
# BASE_URL = '/torrent'
# STATIC_URL = BASE_URL + '/static'
# USE_OWN_HTTPD = False
# FILE_BLOCK_SIZE = 4096
# BACKGROUND_IMAGE = 'cat.jpg'
# USE_AUTH = True
# ENABLE_API = False
# CACHE_TIMEOUT=10
#
# Path: lib/config_parser.py
# def parse_config_part(config_dict, name):
# """
# Parse target configuration.
# """
#
# info = _parse_config_part_connection(config_dict, name)
# info = _parse_config_part_storage(config_dict, info)
#
# return info
#
# class RTorrentConfigException(Exception):
# pass
. Output only the next line. | targets.append(info) |
Here is a snippet: <|code_start|>
targets = []
for x in rtorrent_config:
try:
info = parse_config_part(rtorrent_config[x], x)
except RTorrentConfigException as e:
print('Invalid config: ', e)
sys.exit(1)
targets.append(info)
for x in targets:
r = RTorrent(x)
try:
print('[', x['name'], '] libTorrent version:', r.get_libtorrent_version())
except socket.error as e:
<|code_end|>
. Write the next line using the current file imports:
from model.rtorrent import RTorrent
from config import rtorrent_config
from lib.config_parser import parse_config_part, RTorrentConfigException
import socket
import sys
and context from other files:
# Path: model/rtorrent.py
# class RTorrent(object):
# """
# RTorrent class. This wraps most of the RTorrent *main* functionality
# (read: global functionality) in a class. Think of, current upload and
# download, libTorrent version.
#
# Methods specific to a Torrent can be found in the :ref:`torrent-class`
# class.
# """
#
# # FIXME: If we leave URL at '' xmlrpclib will default to /RPC2 as well.
# def __init__(self, target):
# """
# Initialise the RTorrent object.
# ``target`` is target dict as parsed by parse_config (pyrotorrent.py).
# """
# self.target = target
# self.s = RTorrentXMLRPC(target)
#
# self.hacks()
#
# def hacks(self):
# # Hack in all the methods in _rpc_methods!
# for x, y in _rpc_methods.items():
#
# # caller = create_caller(y[0], create_argcheck(y[2])) # belongs to the
# # argument checking
#
# caller = (lambda name: lambda self, *args: getattr(self.s, name)(*args))(y[0])
# caller.__doc__ = y[1] + '\nOriginal libTorrent method: ``%s``' % y[0]
# setattr(RTorrent, x, types.MethodType(caller, self))
#
# del caller
#
# def __repr__(self):
# return 'RTorrent(%s)' % self.target['name']
#
# def get_download_list(self, _type=''):
# """
# Returns a list of torrents.
# _type defines what is returned. Valid:
#
# * '' (Empty string), 'default'
# * 'complete'
# * 'incomplete'
# * 'started'
# * 'stopped'
# * 'active'
# * 'hashing'
# * 'seeding'
#
# Plus all customly defined views.
# """
# # FIXME: List is not complete(?) + exception should be raised.
# if _type not in ('complete', 'incomplete', 'started', 'stopped',
# 'active', 'hashing', 'seeding', '', 'default'):
# return None
#
# res = self.s.download_list(_type)
#
# # FIXME: We now only have the hashes. Do we also want to fetch all the
# # data per torrent? Or perhaps only the basic info?
#
# return res
#
# def query(self):
# """
# Query returns a new RTorrentQuery object with the target
# from the current RTorrent object.
#
# Use this to execute several (different) calls on the RTorrent class in
# one request. This can increase performance and reduce latency and load.
#
# See :ref:`rtorrentquery-class` on how to use it.
# """
# from lib.rtorrentquery import RTorrentQuery
# return RTorrentQuery(self.target)
#
# Path: config.py
# BASE_URL = '/torrent'
# STATIC_URL = BASE_URL + '/static'
# USE_OWN_HTTPD = False
# FILE_BLOCK_SIZE = 4096
# BACKGROUND_IMAGE = 'cat.jpg'
# USE_AUTH = True
# ENABLE_API = False
# CACHE_TIMEOUT=10
#
# Path: lib/config_parser.py
# def parse_config_part(config_dict, name):
# """
# Parse target configuration.
# """
#
# info = _parse_config_part_connection(config_dict, name)
# info = _parse_config_part_storage(config_dict, info)
#
# return info
#
# class RTorrentConfigException(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | print('Failed to connect to libTorrent:', str(e)) |
Here is a snippet: <|code_start|>
targets = []
for x in rtorrent_config:
try:
info = parse_config_part(rtorrent_config[x], x)
except RTorrentConfigException as e:
print('Invalid config: ', e)
sys.exit(1)
targets.append(info)
for x in targets:
r = RTorrent(x)
<|code_end|>
. Write the next line using the current file imports:
from model.rtorrent import RTorrent
from config import rtorrent_config
from lib.config_parser import parse_config_part, RTorrentConfigException
import socket
import sys
and context from other files:
# Path: model/rtorrent.py
# class RTorrent(object):
# """
# RTorrent class. This wraps most of the RTorrent *main* functionality
# (read: global functionality) in a class. Think of, current upload and
# download, libTorrent version.
#
# Methods specific to a Torrent can be found in the :ref:`torrent-class`
# class.
# """
#
# # FIXME: If we leave URL at '' xmlrpclib will default to /RPC2 as well.
# def __init__(self, target):
# """
# Initialise the RTorrent object.
# ``target`` is target dict as parsed by parse_config (pyrotorrent.py).
# """
# self.target = target
# self.s = RTorrentXMLRPC(target)
#
# self.hacks()
#
# def hacks(self):
# # Hack in all the methods in _rpc_methods!
# for x, y in _rpc_methods.items():
#
# # caller = create_caller(y[0], create_argcheck(y[2])) # belongs to the
# # argument checking
#
# caller = (lambda name: lambda self, *args: getattr(self.s, name)(*args))(y[0])
# caller.__doc__ = y[1] + '\nOriginal libTorrent method: ``%s``' % y[0]
# setattr(RTorrent, x, types.MethodType(caller, self))
#
# del caller
#
# def __repr__(self):
# return 'RTorrent(%s)' % self.target['name']
#
# def get_download_list(self, _type=''):
# """
# Returns a list of torrents.
# _type defines what is returned. Valid:
#
# * '' (Empty string), 'default'
# * 'complete'
# * 'incomplete'
# * 'started'
# * 'stopped'
# * 'active'
# * 'hashing'
# * 'seeding'
#
# Plus all customly defined views.
# """
# # FIXME: List is not complete(?) + exception should be raised.
# if _type not in ('complete', 'incomplete', 'started', 'stopped',
# 'active', 'hashing', 'seeding', '', 'default'):
# return None
#
# res = self.s.download_list(_type)
#
# # FIXME: We now only have the hashes. Do we also want to fetch all the
# # data per torrent? Or perhaps only the basic info?
#
# return res
#
# def query(self):
# """
# Query returns a new RTorrentQuery object with the target
# from the current RTorrent object.
#
# Use this to execute several (different) calls on the RTorrent class in
# one request. This can increase performance and reduce latency and load.
#
# See :ref:`rtorrentquery-class` on how to use it.
# """
# from lib.rtorrentquery import RTorrentQuery
# return RTorrentQuery(self.target)
#
# Path: config.py
# BASE_URL = '/torrent'
# STATIC_URL = BASE_URL + '/static'
# USE_OWN_HTTPD = False
# FILE_BLOCK_SIZE = 4096
# BACKGROUND_IMAGE = 'cat.jpg'
# USE_AUTH = True
# ENABLE_API = False
# CACHE_TIMEOUT=10
#
# Path: lib/config_parser.py
# def parse_config_part(config_dict, name):
# """
# Parse target configuration.
# """
#
# info = _parse_config_part_connection(config_dict, name)
# info = _parse_config_part_storage(config_dict, info)
#
# return info
#
# class RTorrentConfigException(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | try: |
Continue the code snippet: <|code_start|>#!/usr/bin/env python3
_default_logger_name = os.path.basename(sys.argv[0]).replace('.py', '')
def add_arguments(argument_parser):
argument_parser.add_argument(
'--quiet', '-q',
action = 'count',
default = 0,
help = 'Be less verbose.',
<|code_end|>
. Use current file imports:
import sys, os, logging
import boto3 # Only here to ensure boto3 loggers are created
import coloredlogs
from socket import gethostname as ghn
from .util import clamp
and context (classes, functions, or code) from other files:
# Path: scripts/assume_role_lib/util.py
# def clamp(low, x, high):
# return low if x < low else high if x > high else x
. Output only the next line. | ) |
Given snippet: <|code_start|>#!/usr/bin/env python3
_default_session_name = os.path.basename(sys.argv[0]).replace('.py', '')
def add_arguments(argument_parser):
argument_parser.add_argument(
'--profile', '-p',
metavar = 'NAME',
default = None,
help = unwrap("""
Use an AWS configuration profile for IAM credentials. If not
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys, os, boto3
from .util import unwrap
and context:
# Path: scripts/assume_role_lib/util.py
# def unwrap(txt):
# return ' '.join(( s.strip() for s in txt.strip().splitlines() ))
which might include code, classes, or functions. Output only the next line. | specified, the default credential search order is used. If a role |
Predict the next line for this snippet: <|code_start|>
class Type (Item):
template = 'out/type.sql'
directory = 'types'
<|code_end|>
with the help of current file imports:
from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants
and context from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
, which may contain function names, class names, or code. Output only the next line. | def __init__(self, src, version): |
Given snippet: <|code_start|>
class Function (Item):
template = 'out/function.sql'
template_signature = 'out/_signature.sql'
directory = 'functions'
def __init__(self, src, version):
super(Function, self).__init__(src, version)
self.with_out_args = any(True
for a in self.arguments
if a['mode'] == 'o')
self.arguments_as_table = (len(self.arguments) > 1 and
any(True
for a in self.arguments
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants
and context:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
which might include code, classes, or functions. Output only the next line. | if a['name'])) |
Given the following code snippet before the placeholder: <|code_start|>
class Function (Item):
template = 'out/function.sql'
template_signature = 'out/_signature.sql'
directory = 'functions'
def __init__(self, src, version):
super(Function, self).__init__(src, version)
self.with_out_args = any(True
<|code_end|>
, predict the next line using imports from the current file:
from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants
and context including class names, function names, and sometimes code from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | for a in self.arguments |
Next line prediction: <|code_start|>
class View (Item):
template = 'out/view.sql'
directory = 'views'
def __init__(self, src, version):
super(View, self).__init__(src, version)
self.grants = acl_to_grants(self.acl, 'table', self.full_name)
self.query = self.query[:-1] # drop ";"
if self.kind == 'm':
self.directory = 'materializedviews'
for c in self.columns:
c['grants'] = acl_to_grants(c['acl'],
<|code_end|>
. Use current file imports:
(from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants)
and context including class names, function names, or small code snippets from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | 'column', |
Next line prediction: <|code_start|># -*- coding:utf-8 -*-
class Renderer:
def __init__(self, fork, version):
base_path = os.path.join(os.path.dirname(__file__), 'templates')
path = [fork + '.'.join(version)]
for i in reversed(range(1, len(version))):
<|code_end|>
. Use current file imports:
(import sys
import os
from jinja2 import Environment, FileSystemLoader
from pg_export.filters import (untype_default, ljust,
rjust, join_attr, concat_items))
and context including class names, function names, or small code snippets from other files:
# Path: pg_export/filters.py
# def untype_default(default, column_type):
# return default.replace("'::"+column_type, "'") \
# .replace("'::public."+column_type[-1], "'") \
# .replace("'::"+column_type.split('.')[-1], "'")
#
# def ljust(string, width, fillchar):
# return string.ljust(width, fillchar)
#
# def rjust(string, width, fillchar):
# return string.rjust(width, fillchar)
#
# def join_attr(ittr, attribute, delimiter):
# return delimiter.join(i.get(attribute) for i in ittr)
#
# def concat_items(l1, s, l2):
# return ['%s%s%s' % (i1, s, i2) for i1, i2 in zip(l1, l2)]
. Output only the next line. | path.append(fork + '.'.join(version[:i] + ('x',))) |
Continue the code snippet: <|code_start|> def __init__(self, fork, version):
base_path = os.path.join(os.path.dirname(__file__), 'templates')
path = [fork + '.'.join(version)]
for i in reversed(range(1, len(version))):
path.append(fork + '.'.join(version[:i] + ('x',)))
path = [os.path.join(base_path, p) for p in path]
if not any(os.path.isdir(p) for p in path):
raise Exception('Version not suported: template not found:\n' +
'\n'.join(path))
path.append(os.path.join(base_path, 'base'))
self.env = Environment(
loader=FileSystemLoader([
self.join_path(os.path.dirname(__file__), 'templates', p)
for p in path]))
self.env.filters['untype_default'] = untype_default
self.env.filters['ljust'] = ljust
self.env.filters['rjust'] = rjust
self.env.filters['join_attr'] = join_attr
self.env.filters['concat_items'] = concat_items
def join_path(self, *items):
return self.fix_bug_in_windows(os.path.join(*items))
def fix_bug_in_windows(self, path):
<|code_end|>
. Use current file imports:
import sys
import os
from jinja2 import Environment, FileSystemLoader
from pg_export.filters import (untype_default, ljust,
rjust, join_attr, concat_items)
and context (classes, functions, or code) from other files:
# Path: pg_export/filters.py
# def untype_default(default, column_type):
# return default.replace("'::"+column_type, "'") \
# .replace("'::public."+column_type[-1], "'") \
# .replace("'::"+column_type.split('.')[-1], "'")
#
# def ljust(string, width, fillchar):
# return string.ljust(width, fillchar)
#
# def rjust(string, width, fillchar):
# return string.rjust(width, fillchar)
#
# def join_attr(ittr, attribute, delimiter):
# return delimiter.join(i.get(attribute) for i in ittr)
#
# def concat_items(l1, s, l2):
# return ['%s%s%s' % (i1, s, i2) for i1, i2 in zip(l1, l2)]
. Output only the next line. | return path.replace('\\', '/') |
Given the code snippet: <|code_start|> if not any(os.path.isdir(p) for p in path):
raise Exception('Version not suported: template not found:\n' +
'\n'.join(path))
path.append(os.path.join(base_path, 'base'))
self.env = Environment(
loader=FileSystemLoader([
self.join_path(os.path.dirname(__file__), 'templates', p)
for p in path]))
self.env.filters['untype_default'] = untype_default
self.env.filters['ljust'] = ljust
self.env.filters['rjust'] = rjust
self.env.filters['join_attr'] = join_attr
self.env.filters['concat_items'] = concat_items
def join_path(self, *items):
return self.fix_bug_in_windows(os.path.join(*items))
def fix_bug_in_windows(self, path):
return path.replace('\\', '/')
def render(self, template_name, context):
try:
template_name = self.fix_bug_in_windows(template_name)
res = self.env.get_template(template_name).render(context)
except Exception:
print("Error on render template:", template_name, file=sys.stderr)
raise
<|code_end|>
, generate the next line using the imports in this file:
import sys
import os
from jinja2 import Environment, FileSystemLoader
from pg_export.filters import (untype_default, ljust,
rjust, join_attr, concat_items)
and context (functions, classes, or occasionally code) from other files:
# Path: pg_export/filters.py
# def untype_default(default, column_type):
# return default.replace("'::"+column_type, "'") \
# .replace("'::public."+column_type[-1], "'") \
# .replace("'::"+column_type.split('.')[-1], "'")
#
# def ljust(string, width, fillchar):
# return string.ljust(width, fillchar)
#
# def rjust(string, width, fillchar):
# return string.rjust(width, fillchar)
#
# def join_attr(ittr, attribute, delimiter):
# return delimiter.join(i.get(attribute) for i in ittr)
#
# def concat_items(l1, s, l2):
# return ['%s%s%s' % (i1, s, i2) for i1, i2 in zip(l1, l2)]
. Output only the next line. | return res |
Here is a snippet: <|code_start|> self.join_path(os.path.dirname(__file__), 'templates', p)
for p in path]))
self.env.filters['untype_default'] = untype_default
self.env.filters['ljust'] = ljust
self.env.filters['rjust'] = rjust
self.env.filters['join_attr'] = join_attr
self.env.filters['concat_items'] = concat_items
def join_path(self, *items):
return self.fix_bug_in_windows(os.path.join(*items))
def fix_bug_in_windows(self, path):
return path.replace('\\', '/')
def render(self, template_name, context):
try:
template_name = self.fix_bug_in_windows(template_name)
res = self.env.get_template(template_name).render(context)
except Exception:
print("Error on render template:", template_name, file=sys.stderr)
raise
return res
def render_to_file(self, template_name, context, file_name):
if isinstance(file_name, tuple):
file_name = self.join_path(*file_name)
if os.path.isfile(file_name):
open(file_name, 'a', newline='\n').write('\n')
open(file_name, 'ab').write(
<|code_end|>
. Write the next line using the current file imports:
import sys
import os
from jinja2 import Environment, FileSystemLoader
from pg_export.filters import (untype_default, ljust,
rjust, join_attr, concat_items)
and context from other files:
# Path: pg_export/filters.py
# def untype_default(default, column_type):
# return default.replace("'::"+column_type, "'") \
# .replace("'::public."+column_type[-1], "'") \
# .replace("'::"+column_type.split('.')[-1], "'")
#
# def ljust(string, width, fillchar):
# return string.ljust(width, fillchar)
#
# def rjust(string, width, fillchar):
# return string.rjust(width, fillchar)
#
# def join_attr(ittr, attribute, delimiter):
# return delimiter.join(i.get(attribute) for i in ittr)
#
# def concat_items(l1, s, l2):
# return ['%s%s%s' % (i1, s, i2) for i1, i2 in zip(l1, l2)]
, which may include functions, classes, or code. Output only the next line. | self.render(template_name, context).encode('utf8')) |
Predict the next line after this snippet: <|code_start|>
path = [fork + '.'.join(version)]
for i in reversed(range(1, len(version))):
path.append(fork + '.'.join(version[:i] + ('x',)))
path = [os.path.join(base_path, p) for p in path]
if not any(os.path.isdir(p) for p in path):
raise Exception('Version not suported: template not found:\n' +
'\n'.join(path))
path.append(os.path.join(base_path, 'base'))
self.env = Environment(
loader=FileSystemLoader([
self.join_path(os.path.dirname(__file__), 'templates', p)
for p in path]))
self.env.filters['untype_default'] = untype_default
self.env.filters['ljust'] = ljust
self.env.filters['rjust'] = rjust
self.env.filters['join_attr'] = join_attr
self.env.filters['concat_items'] = concat_items
def join_path(self, *items):
return self.fix_bug_in_windows(os.path.join(*items))
def fix_bug_in_windows(self, path):
return path.replace('\\', '/')
<|code_end|>
using the current file's imports:
import sys
import os
from jinja2 import Environment, FileSystemLoader
from pg_export.filters import (untype_default, ljust,
rjust, join_attr, concat_items)
and any relevant context from other files:
# Path: pg_export/filters.py
# def untype_default(default, column_type):
# return default.replace("'::"+column_type, "'") \
# .replace("'::public."+column_type[-1], "'") \
# .replace("'::"+column_type.split('.')[-1], "'")
#
# def ljust(string, width, fillchar):
# return string.ljust(width, fillchar)
#
# def rjust(string, width, fillchar):
# return string.rjust(width, fillchar)
#
# def join_attr(ittr, attribute, delimiter):
# return delimiter.join(i.get(attribute) for i in ittr)
#
# def concat_items(l1, s, l2):
# return ['%s%s%s' % (i1, s, i2) for i1, i2 in zip(l1, l2)]
. Output only the next line. | def render(self, template_name, context): |
Given the code snippet: <|code_start|>
class Server (Item):
template = 'out/server.sql'
directory = 'servers'
schema = '.'
<|code_end|>
, generate the next line using the imports in this file:
from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants
and context (functions, classes, or occasionally code) from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | def __init__(self, src, version): |
Using the snippet: <|code_start|>
class Server (Item):
template = 'out/server.sql'
directory = 'servers'
schema = '.'
def __init__(self, src, version):
<|code_end|>
, determine the next line of code. You have imports:
from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants
and context (class names, function names, or code) available:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | super(Server, self).__init__(src, version) |
Here is a snippet: <|code_start|>
class Table (Item):
template = 'out/table.sql'
directory = 'tables'
def __init__(self, src, version):
super(Table, self).__init__(src, version)
self.primary_key = self.get_constraints('p')
self.primary_key = self.primary_key and self.primary_key[0] or None
self.foreign_keys = self.get_constraints('f')
self.uniques = self.get_constraints('u')
self.checks = self.get_constraints('c')
self.exclusions = self.get_constraints('x')
self.triggers = self.triggers or []
self.grants = acl_to_grants(self.acl, 'table', self.full_name)
for i in self.inherits:
i['table'] = get_full_name(i['table_schema'], i['table_name'])
if self.attach:
self.attach.update(self.inherits[0])
<|code_end|>
. Write the next line using the current file imports:
from pg_export.pg_items.item import Item
from pg_export.filters import get_full_name
from pg_export.acl import acl_to_grants
and context from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/filters.py
# def get_full_name(schema, name):
# if schema in ('public', 'pg_catalog'):
# return name
# return '%s.%s' % (schema, name)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
, which may include functions, classes, or code. Output only the next line. | self.inherits = [] |
Continue the code snippet: <|code_start|>
class Table (Item):
template = 'out/table.sql'
directory = 'tables'
def __init__(self, src, version):
super(Table, self).__init__(src, version)
self.primary_key = self.get_constraints('p')
<|code_end|>
. Use current file imports:
from pg_export.pg_items.item import Item
from pg_export.filters import get_full_name
from pg_export.acl import acl_to_grants
and context (classes, functions, or code) from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/filters.py
# def get_full_name(schema, name):
# if schema in ('public', 'pg_catalog'):
# return name
# return '%s.%s' % (schema, name)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | self.primary_key = self.primary_key and self.primary_key[0] or None |
Based on the snippet: <|code_start|>
class Table (Item):
template = 'out/table.sql'
directory = 'tables'
def __init__(self, src, version):
super(Table, self).__init__(src, version)
self.primary_key = self.get_constraints('p')
self.primary_key = self.primary_key and self.primary_key[0] or None
self.foreign_keys = self.get_constraints('f')
self.uniques = self.get_constraints('u')
self.checks = self.get_constraints('c')
<|code_end|>
, predict the immediate next line with the help of imports:
from pg_export.pg_items.item import Item
from pg_export.filters import get_full_name
from pg_export.acl import acl_to_grants
and context (classes, functions, sometimes code) from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/filters.py
# def get_full_name(schema, name):
# if schema in ('public', 'pg_catalog'):
# return name
# return '%s.%s' % (schema, name)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | self.exclusions = self.get_constraints('x') |
Using the snippet: <|code_start|>
class Operator (Item):
template = 'out/operator.sql'
directory = 'operators'
<|code_end|>
, determine the next line of code. You have imports:
from pg_export.pg_items.item import Item
and context (class names, function names, or code) available:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
. Output only the next line. | def dump(self, root): |
Given snippet: <|code_start|>
def main():
arg_parser = argparse.ArgumentParser(
description='Export structure of databese to object '
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import shutil
import argparse
import psycopg2
from pg_export.extractor import Extractor
and context:
# Path: pg_export/extractor.py
# class Extractor:
# def __init__(self, connect):
# self.connect = connect
# self.INDOPTION_DESC = 0x0001 # src/backend/catalog/pg_index_d.h
# self.INDOPTION_NULLS_FIRST = 0x0002 # src/backend/catalog/pg_index_d.h
# self.get_last_builtin_oid()
# self.create_renderer()
#
# def sql_execute(self, query, **query_params):
# c = self.connect.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
# c.execute(query, query_params)
# res = c.fetchall()
# return res
#
# def get_version(self):
# version = self.sql_execute('select version()')[0]['version']
# match = re.match('.*Greenplum Database (\\d+).(\\d+).(\\d+)', version)
# if match:
# version = 'gp_', match.groups()
# else:
# match = re.match('PostgreSQL (\\d+).(\\d+)', version)
# if match:
# version = 'pg_', match.groups()
# else:
# raise Exception('Could not determine the version number: ' +
# version)
# return version
#
# def get_last_builtin_oid(self):
# """
# postgresql-11.5/src/include/access/transam.h:
# #define FirstNormalObjectId 16384
#
# postgresql-11.5/src/bin/pg_dump/pg_dump.c:
# g_last_builtin_oid = FirstNormalObjectId - 1;
# """
# self.last_builtin_oid = 16384 - 1 # src/include/access/transam.h
#
# def create_renderer(self):
# fork, version = self.get_version()
# self.renderer = Renderer(fork, version)
#
# def extract_structure(self):
# self.src = self.sql_execute(
# self.renderer.render(
# os.path.join('in', 'database.sql'),
# self.__dict__))[0]['src']
#
# self.casts = [Cast(i, self.renderer)
# for i in self.src['casts'] or []]
# self.extensions = [Extension(i, self.renderer)
# for i in self.src['extensions'] or []]
# self.languages = [Language(i, self.renderer)
# for i in self.src['languages'] or []]
# self.servers = [Server(i, self.renderer)
# for i in self.src['servers'] or []]
# self.schemas = [Schema(i, self.renderer)
# for i in self.src['schemas'] or []]
# self.types = [Type(i, self.renderer)
# for i in self.src['types'] or []]
# self.tables = [Table(i, self.renderer)
# for i in self.src['tables'] or []]
# self.views = [View(i, self.renderer)
# for i in self.src['views'] or []]
# self.sequences = [Sequence(i, self.renderer)
# for i in self.src['sequences'] or []]
# self.functions = [Function(i, self.renderer)
# for i in self.src['functions'] or []]
# self.aggregates = [Aggregate(i, self.renderer)
# for i in self.src['aggregates'] or []]
# self.operators = [Operator(i, self.renderer)
# for i in self.src['operators'] or []]
#
# def dump_structure(self, root):
# self.extract_structure()
#
# for c in self.casts:
# c.dump(root)
# for e in self.extensions:
# e.dump(root)
# for i in self.languages:
# i.dump(root)
# for s in self.servers:
# s.dump(root)
#
# root = os.path.join(root, 'schemas')
# os.mkdir(root)
#
# for s in self.schemas:
# s.dump(root)
# for t in self.types:
# t.dump(root)
# for t in self.tables:
# t.dump(root)
# for v in self.views:
# v.dump(root)
# for s in self.sequences:
# s.dump(root)
# for f in self.functions:
# f.dump(root)
# for a in self.aggregates:
# a.dump(root)
# for o in self.operators:
# o.dump(root)
#
# def dump_directory(self, root):
# tables = self.sql_execute(directory_sql)
# if not tables:
# return
#
# root = os.path.join(root, 'data')
# os.mkdir(root)
#
# for s in set(t['schema'] for t in tables):
# os.mkdir(os.path.join(root, s))
#
# for t in tables:
# table_name = '.'.join([t['schema'],
# t['name']]).replace('public.', '')
#
# if t['cond'] and t['cond'].startswith('select'):
# query = t['cond']
# else:
# query = 'select * from %s %s order by 1' % (
# table_name,
# 'where ' + t['cond'] if t['cond'] else ''
# )
#
# with open(os.path.join(root,
# t['schema'],
# t['name'] + '.sql'),
# 'w',
# encoding="utf-8") as f:
# f.write('copy %s from stdin;\n' % table_name)
# self.connect.cursor().copy_to(f, '(%s)' % query)
# f.write('\\.\n')
which might include code, classes, or functions. Output only the next line. | 'files for control version system', |
Given the code snippet: <|code_start|>
class Schema (Item):
template = 'out/schema.sql'
directory = ''
<|code_end|>
, generate the next line using the imports in this file:
from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants
and context (functions, classes, or occasionally code) from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | def __init__(self, src, version): |
Continue the code snippet: <|code_start|>
class Schema (Item):
template = 'out/schema.sql'
directory = ''
def __init__(self, src, version):
super(Schema, self).__init__(src, version)
<|code_end|>
. Use current file imports:
from pg_export.pg_items.item import Item
from pg_export.acl import acl_to_grants
and context (classes, functions, or code) from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
#
# Path: pg_export/acl.py
# def acl_to_grants(acl, obj_type, obj_name, subobj_name=''):
# if not acl:
# return ''
#
# res = []
# if obj_type in ['function', 'procedure']:
# for fpa in function_public_acl:
# if fpa in acl:
# acl.remove(fpa)
# break
# else:
# res.append(
# 'revoke all on %(obj_type)s %(obj_name)s from public;'
# % locals())
# for a in sorted('public' + i if i.startswith('=') else i
# for i in acl):
# role, perm = a.split('/')[0].split('=') # format: role=perm/grantor
# if role in ['postgres', 'gpadmin']:
# continue
#
# if subobj_name: # column
# subobj_name = '(%s) ' % subobj_name
#
# perm, gr_opt = resolve_perm(obj_type, perm)
# if obj_type == 'column':
# obj_type = 'table'
# res.append(
# 'grant %(perm)s %(subobj_name)son %(obj_type)s '
# '%(obj_name)s to %(role)s;'
# % locals())
# return '\n'.join(res)
. Output only the next line. | self.schema = self.name |
Given the code snippet: <|code_start|>
class Cast (Item):
template = 'out/cast.sql'
directory = 'casts'
schema = '.'
<|code_end|>
, generate the next line using the imports in this file:
from pg_export.pg_items.item import Item
and context (functions, classes, or occasionally code) from other files:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
. Output only the next line. | def __init__(self, src, version): |
Using the snippet: <|code_start|>
class Extension (Item):
template = 'out/extension.sql'
directory = 'extensions'
<|code_end|>
, determine the next line of code. You have imports:
from pg_export.pg_items.item import Item
and context (class names, function names, or code) available:
# Path: pg_export/pg_items/item.py
# class Item (object):
# template = None
# directory = None
# ext = '.sql'
#
# def __init__(self, src, renderer):
# self.__dict__.update(src)
# self.renderer = renderer
# if 'schema' in self.__dict__ and 'name' in self.__dict__:
# self.full_name = filters.get_full_name(self.schema, self.name)
#
# def dump(self, root):
# if not os.path.isdir(os.path.join(root, self.schema, self.directory)):
# os.mkdir(os.path.join(root, self.schema, self.directory))
# self.renderer.render_to_file(
# self.template,
# self.__dict__,
# (root, self.schema, self.directory,
# self.name.replace('"', '') + self.ext))
#
# def render(self, template):
# return self.renderer.render(template, self.__dict__)
. Output only the next line. | schema = '.' |
Based on the snippet: <|code_start|>
class Aggregate (Function):
template = 'out/aggregate.sql'
directory = 'aggregates'
columns = []
returns_type = None
returns_type_name = ''
language = 'sql'
<|code_end|>
, predict the immediate next line with the help of imports:
from pg_export.pg_items.function import Function
and context (classes, functions, sometimes code) from other files:
# Path: pg_export/pg_items/function.py
# class Function (Item):
# template = 'out/function.sql'
# template_signature = 'out/_signature.sql'
# directory = 'functions'
#
# def __init__(self, src, version):
# super(Function, self).__init__(src, version)
#
# self.with_out_args = any(True
# for a in self.arguments
# if a['mode'] == 'o')
# self.arguments_as_table = (len(self.arguments) > 1 and
# any(True
# for a in self.arguments
# if a['name']))
# self.argument_max_length = max(
# [len('OUT' if a['mode'] == 'o' else
# 'INOUT' if a['mode'] == 'b' else
# 'VARIADIC' if a['mode'] == 'VARIADIC' else
# a['name'])
# for a in self.arguments
# if a['name'] and self.arguments_as_table] or [0]
# )
# self.column_max_length = max(
# [len(c['name'])
# for c in self.columns
# if c['name']] or [0]
# )
# if self.columns:
# self.returns_type = 'table'
# else:
# self.returns_type = self.returns_type_name.replace('public.', '')
# self.signature = self.render(self.template_signature)
# self.ext = '.' + self.language
# if self.returns_type == 'trigger':
# self.directory = 'triggers'
# if self.kind == 'p':
# self.directory = 'procedures'
# self.grants = acl_to_grants(
# self.acl,
# 'procedure' if self.kind == 'p' else 'function',
# self.signature)
. Output only the next line. | kind = 'a' |
Given the following code snippet before the placeholder: <|code_start|> target_cmd = 'ls'
timeout_secs = 3
self.assertEqual((['testdata/sync/fuzz000/fuzzer_stats'], []),
afl_vcrash.verify_samples(num_threads, samples, target_cmd, timeout_secs))
# test for timeout detection
num_threads = 1
samples = ['testdata/sync/fuzz000/fuzzer_stats'] # invalid (non-crashing) sample
target_cmd = 'python testdata/dummy_process/dummyproc.py'
timeout_secs = 1
self.assertEqual(([], ['testdata/sync/fuzz000/fuzzer_stats']),
afl_vcrash.verify_samples(num_threads, samples, target_cmd, timeout_secs))
def test_remove_samples(self):
# fail
samples = ['testdata/invalid']
with self.assertRaises(FileNotFoundError):
afl_vcrash.remove_samples(samples, False)
# success
open('testdata/invalid', 'a').close()
self.assertEqual(1, afl_vcrash.remove_samples(samples, False))
def test_build_target_cmd(self):
# fail
target_cmdline = ['/some/path/to/invalid/target/binary', '--some-opt', '--some-other-opt']
with self.assertRaises(SystemExit) as se:
afl_vcrash.build_target_cmd(target_cmdline)
<|code_end|>
, predict the next line using imports from the current file:
from afl_utils import afl_vcrash
import os
import unittest
and context including class names, function names, and sometimes code from other files:
# Path: afl_utils/afl_vcrash.py
# def show_info():
# def verify_samples(num_threads, samples, target_cmd, timeout_secs=60):
# def remove_samples(crash_samples, quiet=True):
# def build_target_cmd(target_cmdline):
# def main(argv):
. Output only the next line. | self.assertEqual(2, se.exception.code) |
Continue the code snippet: <|code_start|> 'fuzzers': len(sum_stats),
'fuzzer_pid': 0,
'execs_done': 0,
'execs_per_sec': 0,
'paths_total': 0,
'paths_favored': 0,
'pending_favs': 0,
'pending_total': 0,
'unique_crashes': 0,
'unique_hangs': 0,
'afl_banner': 0,
'host': socket.gethostname()[:10]
}
for k in sum_stats.keys():
if k not in ['afl_banner', 'host']:
diff_stat[k] = sum_stats[k] - old_stats[k]
else:
diff_stat[k] = sum_stats[k]
return diff_stat
def prettify_stat(stat, dstat, console=True):
_stat = stat.copy()
_dstat = dstat.copy()
_stat['execs_done'] /= 1e6
_dstat['execs_done'] /= 1e6
if _dstat['fuzzer_pid'] == _dstat['fuzzers'] == 0:
<|code_end|>
. Use current file imports:
import argparse
import simplejson as json
import json
import os
import sys
import socket
import twitter
import afl_utils
from urllib.error import URLError
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
and context (classes, functions, or code) from other files:
# Path: afl_utils/AflPrettyPrint.py
# class clr:
# # taken from AFL's debug.h
# BLK = "\x1b[0;30m"
# RED = "\x1b[0;31m"
# GRN = "\x1b[0;32m"
# BRN = "\x1b[0;33m"
# BLU = "\x1b[0;34m"
# MGN = "\x1b[0;35m"
# CYA = "\x1b[0;36m"
# NOR = "\x1b[0;37m"
# GRA = "\x1b[1;30m"
# LRD = "\x1b[1;31m"
# LGN = "\x1b[1;32m"
# YEL = "\x1b[1;33m"
# LBL = "\x1b[1;34m"
# PIN = "\x1b[1;35m"
# LCY = "\x1b[1;36m"
# BRI = "\x1b[1;37m"
# RST = "\x1b[0m"
#
# def print_ok(msg_str):
# print("{0}[*] {1}{2}".format(clr.LGN, clr.RST, msg_str))
#
# def print_warn(msg_str):
# print("{0}[!] {1}{2}".format(clr.YEL, clr.RST, msg_str))
#
# def print_err(msg_str):
# print("{0}[!] {1}{2}".format(clr.LRD, clr.RST, msg_str))
#
# Path: db_connectors/con_sqlite.py
# class sqliteConnector:
# def __init__(self, database_path, verbose=True):
# def init_database(self, table, table_spec):
# def dataset_exists(self, table, dataset, compare_fields):
# def insert_dataset(self, table, dataset):
# def commit_close(self):
. Output only the next line. | ds_alive = "" |
Given the following code snippet before the placeholder: <|code_start|> if not os.path.isdir(fuzzer_dir):
print_warn("Invalid fuzzing directory specified: " + clr.GRA + "%s" % fuzzer_dir + clr.RST)
return None
fuzzer_stats = []
if os.path.isfile(os.path.join(fuzzer_dir, "fuzzer_stats")):
# single afl-fuzz job
stats = parse_stat_file(os.path.join(fuzzer_dir, "fuzzer_stats"), summary)
if stats:
fuzzer_stats.append(stats)
else:
fuzzer_inst = []
for fdir in os.listdir(fuzzer_dir):
if os.path.isdir(os.path.join(fuzzer_dir, fdir)):
fuzzer_inst.append(os.path.join(fuzzer_dir, fdir, "fuzzer_stats"))
for stat_file in fuzzer_inst:
stats = parse_stat_file(stat_file, summary)
if stats:
fuzzer_stats.append(stats)
return fuzzer_stats
def summarize_stats(stats):
sum_stat = {
'fuzzers': len(stats),
'fuzzer_pid': 0,
'execs_done': 0,
<|code_end|>
, predict the next line using imports from the current file:
import argparse
import simplejson as json
import json
import os
import sys
import socket
import twitter
import afl_utils
from urllib.error import URLError
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
and context including class names, function names, and sometimes code from other files:
# Path: afl_utils/AflPrettyPrint.py
# class clr:
# # taken from AFL's debug.h
# BLK = "\x1b[0;30m"
# RED = "\x1b[0;31m"
# GRN = "\x1b[0;32m"
# BRN = "\x1b[0;33m"
# BLU = "\x1b[0;34m"
# MGN = "\x1b[0;35m"
# CYA = "\x1b[0;36m"
# NOR = "\x1b[0;37m"
# GRA = "\x1b[1;30m"
# LRD = "\x1b[1;31m"
# LGN = "\x1b[1;32m"
# YEL = "\x1b[1;33m"
# LBL = "\x1b[1;34m"
# PIN = "\x1b[1;35m"
# LCY = "\x1b[1;36m"
# BRI = "\x1b[1;37m"
# RST = "\x1b[0m"
#
# def print_ok(msg_str):
# print("{0}[*] {1}{2}".format(clr.LGN, clr.RST, msg_str))
#
# def print_warn(msg_str):
# print("{0}[!] {1}{2}".format(clr.YEL, clr.RST, msg_str))
#
# def print_err(msg_str):
# print("{0}[!] {1}{2}".format(clr.LRD, clr.RST, msg_str))
#
# Path: db_connectors/con_sqlite.py
# class sqliteConnector:
# def __init__(self, database_path, verbose=True):
# def init_database(self, table, table_spec):
# def dataset_exists(self, table, dataset, compare_fields):
# def insert_dataset(self, table, dataset):
# def commit_close(self):
. Output only the next line. | 'execs_per_sec': 0, |
Using the snippet: <|code_start|> stats[k] = l[19:].strip(": %\r\n")
return stats
except FileNotFoundError as e:
print_warn("Stat file " + clr.GRA + "%s" % e.filename + clr.RST + " not found!")
return None
def load_stats(fuzzer_dir, summary=True):
fuzzer_dir = os.path.abspath(os.path.expanduser(fuzzer_dir))
if not os.path.isdir(fuzzer_dir):
print_warn("Invalid fuzzing directory specified: " + clr.GRA + "%s" % fuzzer_dir + clr.RST)
return None
fuzzer_stats = []
if os.path.isfile(os.path.join(fuzzer_dir, "fuzzer_stats")):
# single afl-fuzz job
stats = parse_stat_file(os.path.join(fuzzer_dir, "fuzzer_stats"), summary)
if stats:
fuzzer_stats.append(stats)
else:
fuzzer_inst = []
for fdir in os.listdir(fuzzer_dir):
if os.path.isdir(os.path.join(fuzzer_dir, fdir)):
fuzzer_inst.append(os.path.join(fuzzer_dir, fdir, "fuzzer_stats"))
for stat_file in fuzzer_inst:
<|code_end|>
, determine the next line of code. You have imports:
import argparse
import simplejson as json
import json
import os
import sys
import socket
import twitter
import afl_utils
from urllib.error import URLError
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
and context (class names, function names, or code) available:
# Path: afl_utils/AflPrettyPrint.py
# class clr:
# # taken from AFL's debug.h
# BLK = "\x1b[0;30m"
# RED = "\x1b[0;31m"
# GRN = "\x1b[0;32m"
# BRN = "\x1b[0;33m"
# BLU = "\x1b[0;34m"
# MGN = "\x1b[0;35m"
# CYA = "\x1b[0;36m"
# NOR = "\x1b[0;37m"
# GRA = "\x1b[1;30m"
# LRD = "\x1b[1;31m"
# LGN = "\x1b[1;32m"
# YEL = "\x1b[1;33m"
# LBL = "\x1b[1;34m"
# PIN = "\x1b[1;35m"
# LCY = "\x1b[1;36m"
# BRI = "\x1b[1;37m"
# RST = "\x1b[0m"
#
# def print_ok(msg_str):
# print("{0}[*] {1}{2}".format(clr.LGN, clr.RST, msg_str))
#
# def print_warn(msg_str):
# print("{0}[!] {1}{2}".format(clr.YEL, clr.RST, msg_str))
#
# def print_err(msg_str):
# print("{0}[!] {1}{2}".format(clr.LRD, clr.RST, msg_str))
#
# Path: db_connectors/con_sqlite.py
# class sqliteConnector:
# def __init__(self, database_path, verbose=True):
# def init_database(self, table, table_spec):
# def dataset_exists(self, table, dataset, compare_fields):
# def insert_dataset(self, table, dataset):
# def commit_close(self):
. Output only the next line. | stats = parse_stat_file(stat_file, summary) |
Using the snippet: <|code_start|> if not database.dataset_exists(table, fuzzer, ['last_update', 'afl_banner']):
database.insert_dataset(table, fuzzer)
def fetch_stats(config_settings, twitter_inst):
stat_dict = dict()
for fuzzer in config_settings['fuzz_dirs']:
stats = load_stats(fuzzer)
if not stats:
continue
sum_stats = summarize_stats(stats)
try:
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'r') as f:
old_stats = json.load(f)
except FileNotFoundError:
old_stats = sum_stats.copy()
# initialize/update stat_dict
stat_dict[fuzzer] = (sum_stats, old_stats)
stat_change = diff_stats(sum_stats, old_stats)
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'w') as f:
json.dump(sum_stats, f)
print(prettify_stat(sum_stats, stat_change, True))
<|code_end|>
, determine the next line of code. You have imports:
import argparse
import simplejson as json
import json
import os
import sys
import socket
import twitter
import afl_utils
from urllib.error import URLError
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
and context (class names, function names, or code) available:
# Path: afl_utils/AflPrettyPrint.py
# class clr:
# # taken from AFL's debug.h
# BLK = "\x1b[0;30m"
# RED = "\x1b[0;31m"
# GRN = "\x1b[0;32m"
# BRN = "\x1b[0;33m"
# BLU = "\x1b[0;34m"
# MGN = "\x1b[0;35m"
# CYA = "\x1b[0;36m"
# NOR = "\x1b[0;37m"
# GRA = "\x1b[1;30m"
# LRD = "\x1b[1;31m"
# LGN = "\x1b[1;32m"
# YEL = "\x1b[1;33m"
# LBL = "\x1b[1;34m"
# PIN = "\x1b[1;35m"
# LCY = "\x1b[1;36m"
# BRI = "\x1b[1;37m"
# RST = "\x1b[0m"
#
# def print_ok(msg_str):
# print("{0}[*] {1}{2}".format(clr.LGN, clr.RST, msg_str))
#
# def print_warn(msg_str):
# print("{0}[!] {1}{2}".format(clr.YEL, clr.RST, msg_str))
#
# def print_err(msg_str):
# print("{0}[!] {1}{2}".format(clr.LRD, clr.RST, msg_str))
#
# Path: db_connectors/con_sqlite.py
# class sqliteConnector:
# def __init__(self, database_path, verbose=True):
# def init_database(self, table, table_spec):
# def dataset_exists(self, table, dataset, compare_fields):
# def insert_dataset(self, table, dataset):
# def commit_close(self):
. Output only the next line. | tweet = prettify_stat(sum_stats, stat_change, False) |
Predict the next line for this snippet: <|code_start|> table = 'aflutils_fuzzerstats'
database.init_database(table, db_table_spec)
if not database.dataset_exists(table, fuzzer, ['last_update', 'afl_banner']):
database.insert_dataset(table, fuzzer)
def fetch_stats(config_settings, twitter_inst):
stat_dict = dict()
for fuzzer in config_settings['fuzz_dirs']:
stats = load_stats(fuzzer)
if not stats:
continue
sum_stats = summarize_stats(stats)
try:
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'r') as f:
old_stats = json.load(f)
except FileNotFoundError:
old_stats = sum_stats.copy()
# initialize/update stat_dict
stat_dict[fuzzer] = (sum_stats, old_stats)
stat_change = diff_stats(sum_stats, old_stats)
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'w') as f:
json.dump(sum_stats, f)
<|code_end|>
with the help of current file imports:
import argparse
import simplejson as json
import json
import os
import sys
import socket
import twitter
import afl_utils
from urllib.error import URLError
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
and context from other files:
# Path: afl_utils/AflPrettyPrint.py
# class clr:
# # taken from AFL's debug.h
# BLK = "\x1b[0;30m"
# RED = "\x1b[0;31m"
# GRN = "\x1b[0;32m"
# BRN = "\x1b[0;33m"
# BLU = "\x1b[0;34m"
# MGN = "\x1b[0;35m"
# CYA = "\x1b[0;36m"
# NOR = "\x1b[0;37m"
# GRA = "\x1b[1;30m"
# LRD = "\x1b[1;31m"
# LGN = "\x1b[1;32m"
# YEL = "\x1b[1;33m"
# LBL = "\x1b[1;34m"
# PIN = "\x1b[1;35m"
# LCY = "\x1b[1;36m"
# BRI = "\x1b[1;37m"
# RST = "\x1b[0m"
#
# def print_ok(msg_str):
# print("{0}[*] {1}{2}".format(clr.LGN, clr.RST, msg_str))
#
# def print_warn(msg_str):
# print("{0}[!] {1}{2}".format(clr.YEL, clr.RST, msg_str))
#
# def print_err(msg_str):
# print("{0}[!] {1}{2}".format(clr.LRD, clr.RST, msg_str))
#
# Path: db_connectors/con_sqlite.py
# class sqliteConnector:
# def __init__(self, database_path, verbose=True):
# def init_database(self, table, table_spec):
# def dataset_exists(self, table, dataset, compare_fields):
# def insert_dataset(self, table, dataset):
# def commit_close(self):
, which may contain function names, class names, or code. Output only the next line. | print(prettify_stat(sum_stats, stat_change, True)) |
Using the snippet: <|code_start|> self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_dir, 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, '--cmin', '--tmin', 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, '--tmin', 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, '--cmin', '--tmin', '--dry-run', 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, '--cmin', '--dry-run', 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, '--tmin', '--dry-run', 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, '--dry-run', 'testdata/sync', '--', '/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
argv = ['afl-minimize', '-c', collection_new, '--cmin', '--tmin', '--reseed', 'testdata/sync', '--',
'/bin/echo']
self.assertIsNone(afl_minimize.main(argv))
<|code_end|>
, determine the next line of code. You have imports:
from afl_utils import afl_minimize
import os
import shutil
import subprocess
import unittest
and context (class names, function names, or code) available:
# Path: afl_utils/afl_minimize.py
# def show_info():
# def invoke_cmin(input_dir, output_dir, target_cmd, mem_limit=None, timeout=None, qemu=False):
# def invoke_tmin(input_files, output_dir, target_cmd, num_threads=1, mem_limit=None, timeout=None, qemu=False):
# def invoke_dryrun(input_files, crash_dir, timeout_dir, target_cmd, timeout=60, num_threads=1):
# def afl_reseed(sync_dir, coll_dir):
# def main(argv):
. Output only the next line. | argv = ['afl-minimize', '--dry-run', 'testdata/sync', '--', '/bin/echo'] |
Using the snippet: <|code_start|>
class SampleIndexTestCase(unittest.TestCase):
def setUp(self):
# Use to set up test environment prior to test case
# invocation
pass
def tearDown(self):
# Use for clean up after tests have run
pass
def prepare_SampleIndex(self):
test_dir = 'test_data'
test_index = [
{'input': 1, 'fuzzer': 'fuzz01', 'output': 'fuzz01:file01'},
{'input': 4, 'fuzzer': 'fuzz02', 'output': 'fuzz02:file02'},
{'input': 7, 'fuzzer': 'fuzz03', 'output': 'fuzz03:file03'},
]
test_inputs = [1, 4, 7]
<|code_end|>
, determine the next line of code. You have imports:
from afl_utils import SampleIndex
import os
import unittest
and context (class names, function names, or code) available:
# Path: afl_utils/SampleIndex.py
# class SampleIndex:
# def __init__(self, output_dir, index=None, min_filename=False, omit_fuzzer_name=False):
# self.output_dir = os.path.abspath(output_dir)
# if index is not None:
# self.index = index
# else:
# self.index = []
# self.min_filename = min_filename
# self.omit_fuzzer_name = omit_fuzzer_name
#
# def __generate_output__(self, fuzzer, input_file):
# input_filename = os.path.basename(input_file)
# fuzzer_name = os.path.basename(fuzzer)
# if self.min_filename:
# try:
# input_filename = input_filename.split(",")[0].split(":")[1]
# except Exception:
# pass
# if self.omit_fuzzer_name:
# return input_filename
# else:
# return "%s:%s" % (fuzzer_name, input_filename)
#
# def __remove__(self, key, values):
# self.index = [x for x in self.index if x[key] not in values]
# return self.index
#
# def __return_values__(self, key):
# return [v[key] for v in self.index]
#
# def divide(self, count):
# """
# Divide sample index into approx. equally sized parts.
#
# :param count: Number of parts
# :return: List containing divided sample indexes
# """
# indexes = [self.index[i::count] for i in range(count)]
# sample_indexes = []
# for i in indexes:
# sample_indexes.append(SampleIndex(self.output_dir, i))
#
# return sample_indexes
#
# def add(self, fuzzer, input_file):
# sample_output = self.__generate_output__(fuzzer, input_file)
# # avoid to add duplicates (by filename) to sample index
# # #TODO: Speed this up, if possible
# if sample_output not in self.outputs():
# self.index.append({
# 'input': os.path.abspath(os.path.expanduser(input_file)),
# 'fuzzer': fuzzer,
# 'output': sample_output})
# return self.index
#
# def add_output(self, output_file):
# output_file = os.path.abspath(output_file)
# # avoid to add duplicates to index
# if output_file not in self.outputs():
# # we can't generate input filenames, fuzzer from output filenames,
# # so leave them blank
# self.index.append({
# 'input': None,
# 'fuzzer': None,
# 'output': output_file})
# return self.index
#
# def remove_inputs(self, input_files):
# self.index = self.__remove__("input", input_files)
# return self.index
#
# def remove_fuzzers(self, fuzzers):
# self.index = self.__remove__("fuzzer", fuzzers)
# return self.index
#
# def remove_outputs(self, output_files):
# self.index = self.__remove__("output", output_files)
# return self.index
#
# def inputs(self):
# return self.__return_values__("input")
#
# def outputs(self, fuzzer=None, input_file=None):
# if fuzzer is not None and input_file is not None:
# for i in self.index:
# if i['fuzzer'] == fuzzer and i['input'] == input_file:
# return [i['output']]
# elif fuzzer is not None:
# return [i['output'] for i in self.index if i['fuzzer'] == fuzzer]
# elif input_file is not None:
# return [i['output'] for i in self.index if i['input'] == input_file]
# else:
# return self.__return_values__("output")
#
# def fuzzers(self):
# return self.__return_values__("fuzzer")
#
# def size(self):
# return len(self.index)
. Output only the next line. | test_fuzzers = ['fuzz01', 'fuzz02', 'fuzz03'] |
Here is a snippet: <|code_start|> self.assertTrue(afl_multicore.has_master(conf_settings, 11))
# negative test
self.assertFalse(afl_multicore.has_master(conf_settings, 12))
conf_settings = {
'session': 'fuzz',
'output': 'testdata/sync',
'master_instances': 0,
}
self.assertFalse(afl_multicore.has_master(conf_settings, 0))
self.assertFalse(afl_multicore.has_master(conf_settings, 2))
conf_settings = {
'session': 'fuzz',
'output': 'testdata/sync',
'master_instances': -23,
}
self.assertFalse(afl_multicore.has_master(conf_settings, 0))
def test_main(self):
# we're only going to test some error cases
# invalid invocation (Argparser failure)
with self.assertRaises(SystemExit) as se:
afl_multicore.main(['afl-multicore', '-c', 'invalid.conf', '--invalid-opt'])
self.assertEqual(2, se.exception.code)
# test run
with self.assertRaises(SystemExit) as se:
afl_multicore.main(['afl-multicore', '-c', 'testdata/afl-multicore.conf.test', '-t', 'start', '4'])
<|code_end|>
. Write the next line using the current file imports:
from afl_utils import afl_multicore
import simplejson as json
import json
import shutil
import os
import unittest
and context from other files:
# Path: afl_utils/afl_multicore.py
# def find_fuzzer_binary(fuzzer_bin):
# def show_info():
# def read_config(config_file):
# def afl_cmdline_from_config(config_settings, instance_number):
# def check_screen():
# def setup_screen_env(env_list):
# def setup_screen(windows, env_list):
# def sigint_handler(signal, frame):
# def build_target_cmd(conf_settings):
# def build_master_cmd(conf_settings, master_index, target_cmd):
# def build_slave_cmd(conf_settings, slave_index, target_cmd):
# def write_pgid_file(conf_settings):
# def get_master_count(conf_settings):
# def get_started_instance_count(command, conf_settings):
# def get_job_counts(jobs_arg):
# def has_master(conf_settings, jobs_offset):
# def startup_delay(conf_settings, instance_num, command, startup_delay):
# def auto_startup_delay(config_settings, instance_num, resume=True):
# def main(argv):
# N = len(sample_list)
# T = float(config_settings["timeout"].strip(" +")) if "timeout" in config_settings else 1000.0
# O = N**(-1/2)
, which may include functions, classes, or code. Output only the next line. | self.assertEqual(1, se.exception.code) |
Based on the snippet: <|code_start|>
try:
except ImportError:
test_conf_settings = {
'twitter_creds_file': '.afl-stats.creds',
'twitter_consumer_key': 'your_consumer_key_here',
'twitter_consumer_secret': 'your_consumer_secret_here',
'fuzz_dirs': [
'/path/to/fuzz/dir/0',
'/path/to/fuzz/dir/1',
'testdata/sync'
]
}
test_stats = {
'pending_total': '0',
'paths_favored': '25',
'pending_favs': '0',
'execs_per_sec': '1546.82',
<|code_end|>
, predict the immediate next line with the help of imports:
from afl_utils import afl_stats
from db_connectors import con_sqlite
import simplejson as json
import json
import os
import socket
import unittest
and context (classes, functions, sometimes code) from other files:
# Path: afl_utils/afl_stats.py
# def show_info():
# def read_config(config_file):
# def twitter_init(config):
# def shorten_tweet(tweet):
# def fuzzer_alive(pid):
# def parse_stat_file(stat_file, summary=True):
# def load_stats(fuzzer_dir, summary=True):
# def summarize_stats(stats):
# def diff_stats(sum_stats, old_stats):
# def prettify_stat(stat, dstat, console=True):
# def dump_stats(config_settings, database):
# def fetch_stats(config_settings, twitter_inst):
# def main(argv):
#
# Path: db_connectors/con_sqlite.py
# class sqliteConnector:
# def __init__(self, database_path, verbose=True):
# def init_database(self, table, table_spec):
# def dataset_exists(self, table, dataset, compare_fields):
# def insert_dataset(self, table, dataset):
# def commit_close(self):
. Output only the next line. | 'fuzzer_pid': 0, |
Predict the next line for this snippet: <|code_start|>
try:
except ImportError:
test_conf_settings = {
'twitter_creds_file': '.afl-stats.creds',
'twitter_consumer_key': 'your_consumer_key_here',
'twitter_consumer_secret': 'your_consumer_secret_here',
'fuzz_dirs': [
'/path/to/fuzz/dir/0',
'/path/to/fuzz/dir/1',
'testdata/sync'
]
}
test_stats = {
'pending_total': '0',
'paths_favored': '25',
'pending_favs': '0',
'execs_per_sec': '1546.82',
<|code_end|>
with the help of current file imports:
from afl_utils import afl_stats
from db_connectors import con_sqlite
import simplejson as json
import json
import os
import socket
import unittest
and context from other files:
# Path: afl_utils/afl_stats.py
# def show_info():
# def read_config(config_file):
# def twitter_init(config):
# def shorten_tweet(tweet):
# def fuzzer_alive(pid):
# def parse_stat_file(stat_file, summary=True):
# def load_stats(fuzzer_dir, summary=True):
# def summarize_stats(stats):
# def diff_stats(sum_stats, old_stats):
# def prettify_stat(stat, dstat, console=True):
# def dump_stats(config_settings, database):
# def fetch_stats(config_settings, twitter_inst):
# def main(argv):
#
# Path: db_connectors/con_sqlite.py
# class sqliteConnector:
# def __init__(self, database_path, verbose=True):
# def init_database(self, table, table_spec):
# def dataset_exists(self, table, dataset, compare_fields):
# def insert_dataset(self, table, dataset):
# def commit_close(self):
, which may contain function names, class names, or code. Output only the next line. | 'fuzzer_pid': 0, |
Using the snippet: <|code_start|> pass
def tearDown(self):
# Use for clean up after tests have run
pass
def setup_testprocess(self, session):
# spawn dummy process in a new process group
new_proc = subprocess.Popen(['setsid', 'setsid', 'python', 'testdata/dummy_process/dummyproc.py'])
# write/append PGID to file /tmp/afl-multicore.PGID.<SESSION>
f = open("/tmp/afl_multicore.PGID.%s" % session, "a")
if f.writable():
f.write("%d\n" % new_proc.pid) # PGID ok
f.write("%d\n" % 0x7fffffff) # PGID invalid
f.close()
time.sleep(0.1)
def test_kill_session(self):
test_session = 'dummy_proc_01'
# test missing PGID file
with self.assertRaises(SystemExit) as se:
afl_multikill.kill_session('Invalid-multikill-session')
self.assertEqual(se.exception.code, 1)
# test with dummy process
self.setup_testprocess(test_session)
self.assertIsNone(afl_multikill.kill_session(test_session))
<|code_end|>
, determine the next line of code. You have imports:
from afl_utils import afl_multikill
import os
import subprocess
import unittest
import time
and context (class names, function names, or code) available:
# Path: afl_utils/afl_multikill.py
# def show_info():
# def kill_session(session):
# def main(argv):
. Output only the next line. | def test_main(self): |
Here is a snippet: <|code_start|> ('fuzz001', ['queue'])
]
sync_dir = os.path.abspath('testdata/sync')
self.assertListEqual(fuzzer_inst, sorted(afl_collect.get_queue_directories(sync_dir, fuzzer_inst)))
def test_get_samples_from_dir(self):
sample_dir = 'testdata/queue'
expected_result = (5, [
'sample0',
'sample1',
'sample2',
'sample3',
'sample4'
])
result = afl_collect.get_samples_from_dir(sample_dir)
self.assertEqual(expected_result[0], result[0])
self.assertListEqual(expected_result[1], sorted(result[1]))
expected_result = (5, [
os.path.join(sample_dir, 'sample0'),
os.path.join(sample_dir, 'sample1'),
os.path.join(sample_dir, 'sample2'),
os.path.join(sample_dir, 'sample3'),
os.path.join(sample_dir, 'sample4'),
])
result = afl_collect.get_samples_from_dir(sample_dir, abs_path=True)
self.assertEqual(expected_result[0], result[0])
self.assertListEqual(expected_result[1], sorted(result[1]))
def test_collect_samples(self):
<|code_end|>
. Write the next line using the current file imports:
from afl_utils import afl_collect
from afl_utils.SampleIndex import SampleIndex
import os
import shutil
import subprocess
import unittest
and context from other files:
# Path: afl_utils/afl_collect.py
# def show_info():
# def get_fuzzer_instances(sync_dir, crash_dirs=True):
# def get_crash_directories(sync_dir, fuzzer_instances):
# def get_queue_directories(sync_dir, fuzzer_instances):
# def get_samples_from_dir(sample_subdir, abs_path=False):
# def collect_samples(sync_dir, fuzzer_instances):
# def build_sample_index(sync_dir, out_dir, fuzzer_instances, db=None, min_filename=False, omit_fuzzer_name=False):
# def copy_samples(sample_index):
# def generate_sample_list(list_filename, files_collected):
# def stdin_mode(target_cmd):
# def generate_gdb_exploitable_script(script_filename, sample_index, target_cmd, script_id=0, intermediate=False):
# def execute_gdb_script(out_dir, script_filename, num_samples, num_threads):
# def main(argv):
#
# Path: afl_utils/SampleIndex.py
# class SampleIndex:
# def __init__(self, output_dir, index=None, min_filename=False, omit_fuzzer_name=False):
# self.output_dir = os.path.abspath(output_dir)
# if index is not None:
# self.index = index
# else:
# self.index = []
# self.min_filename = min_filename
# self.omit_fuzzer_name = omit_fuzzer_name
#
# def __generate_output__(self, fuzzer, input_file):
# input_filename = os.path.basename(input_file)
# fuzzer_name = os.path.basename(fuzzer)
# if self.min_filename:
# try:
# input_filename = input_filename.split(",")[0].split(":")[1]
# except Exception:
# pass
# if self.omit_fuzzer_name:
# return input_filename
# else:
# return "%s:%s" % (fuzzer_name, input_filename)
#
# def __remove__(self, key, values):
# self.index = [x for x in self.index if x[key] not in values]
# return self.index
#
# def __return_values__(self, key):
# return [v[key] for v in self.index]
#
# def divide(self, count):
# """
# Divide sample index into approx. equally sized parts.
#
# :param count: Number of parts
# :return: List containing divided sample indexes
# """
# indexes = [self.index[i::count] for i in range(count)]
# sample_indexes = []
# for i in indexes:
# sample_indexes.append(SampleIndex(self.output_dir, i))
#
# return sample_indexes
#
# def add(self, fuzzer, input_file):
# sample_output = self.__generate_output__(fuzzer, input_file)
# # avoid to add duplicates (by filename) to sample index
# # #TODO: Speed this up, if possible
# if sample_output not in self.outputs():
# self.index.append({
# 'input': os.path.abspath(os.path.expanduser(input_file)),
# 'fuzzer': fuzzer,
# 'output': sample_output})
# return self.index
#
# def add_output(self, output_file):
# output_file = os.path.abspath(output_file)
# # avoid to add duplicates to index
# if output_file not in self.outputs():
# # we can't generate input filenames, fuzzer from output filenames,
# # so leave them blank
# self.index.append({
# 'input': None,
# 'fuzzer': None,
# 'output': output_file})
# return self.index
#
# def remove_inputs(self, input_files):
# self.index = self.__remove__("input", input_files)
# return self.index
#
# def remove_fuzzers(self, fuzzers):
# self.index = self.__remove__("fuzzer", fuzzers)
# return self.index
#
# def remove_outputs(self, output_files):
# self.index = self.__remove__("output", output_files)
# return self.index
#
# def inputs(self):
# return self.__return_values__("input")
#
# def outputs(self, fuzzer=None, input_file=None):
# if fuzzer is not None and input_file is not None:
# for i in self.index:
# if i['fuzzer'] == fuzzer and i['input'] == input_file:
# return [i['output']]
# elif fuzzer is not None:
# return [i['output'] for i in self.index if i['fuzzer'] == fuzzer]
# elif input_file is not None:
# return [i['output'] for i in self.index if i['input'] == input_file]
# else:
# return self.__return_values__("output")
#
# def fuzzers(self):
# return self.__return_values__("fuzzer")
#
# def size(self):
# return len(self.index)
, which may include functions, classes, or code. Output only the next line. | sync_dir = 'testdata/sync' |
Predict the next line for this snippet: <|code_start|> self.assertDictEqual(config, cron.config)
def test_get_module(self):
module_path = 'afl_utils.afl_sync'
module = afl_sync
cron = AflCronDaemon(g_config_file)
self.assertEqual(module, cron.get_module(module_path))
# error case
with self.assertRaises(ValueError):
cron.get_module('invalid_module.path')
def test_get_member(self):
module = afl_sync
cls = afl_sync.AflBaseSync
cls_name = 'AflBaseSync'
fcn = afl_sync.main
fcn_name = 'main'
# get class
cron = AflCronDaemon(g_config_file)
self.assertEqual(cls, cron.get_member(module, cls_name))
# get func
cron = AflCronDaemon(g_config_file)
self.assertEqual(fcn, cron.get_member(module, fcn_name))
# error case
with self.assertRaises(ValueError):
<|code_end|>
with the help of current file imports:
from afl_utils import afl_cron, afl_sync
from afl_utils.afl_cron import AflCronDaemon
import os
import shutil
import unittest
and context from other files:
# Path: afl_utils/afl_cron.py
# class AflCronDaemon(object):
# def __init__(self, config_file, quiet=False):
# def load_config(self, config_file):
# def get_module(self, module_path):
# def get_member(self, module, member_name):
# def run_job(self, job):
# def run(self):
# def show_info():
# def main(argv):
#
# Path: afl_utils/afl_sync.py
# class AflBaseSync(object):
# class AflRsync(AflBaseSync):
# def __init__(self, server_config, fuzzer_config):
# def __init__(self, server_config, fuzzer_config):
# def __prepare_rsync_commandline(self, local_path, remote_path, rsync_options=list(_rsync_default_options),
# rsync_excludes=list([]), rsync_get=False):
# def __invoke_rsync(self, rsync_cmdline):
# def __get_fuzzers(self):
# def rsync_put(self, local_path, remote_path, rsync_options=list(_rsync_default_options), rsync_excludes=list([])):
# def rsync_get(self, remote_path, local_path, rsync_options=list(_rsync_default_options), rsync_excludes=list([])):
# def push(self):
# def pull(self):
# def sync(self):
# def show_info():
# def main(argv):
#
# Path: afl_utils/afl_cron.py
# class AflCronDaemon(object):
# def __init__(self, config_file, quiet=False):
# self.config = self.load_config(config_file)
# self.quiet = quiet
#
# def load_config(self, config_file):
# with open(config_file, 'r') as raw_config:
# config = json.load(raw_config)
# return config
#
# def get_module(self, module_path):
# module_name = module_path.rsplit('.', 1)[1]
# try:
# module = __import__(module_path, fromlist=[module_name])
# except ImportError:
# raise ValueError('Module \'{}\' could not be imported' .format(module_path,))
# return module
#
# def get_member(self, module, member_name):
# try:
# cls = getattr(module, member_name)
# except AttributeError:
# raise ValueError('Module \'{}\' has no member \'{}\''.format(module, member_name, ))
# return cls
#
# def run_job(self, job):
# job_module = self.get_module(job['module'])
# job_func = self.get_member(job_module, job['function'])
# job_args = [job['module'].rsplit('.', 1)[1]] + job['params'].split()
# if not self.quiet:
# print_ok('Executing \'{}\' ({}.{})'.format(job['name'], job['module'], job['function']))
# job_func(job_args)
#
# def run(self):
# doExit = False
# while not doExit:
# try:
# time_start = datetime.datetime.now()
# for job in self.config['jobs']:
# self.run_job(job)
#
# print_ok('All jobs done [{}]'.format(datetime.datetime.now()-time_start))
#
# if float(self.config['interval']) < 0:
# doExit = True
# else:
# time.sleep(float(self.config['interval']) * 60)
# except KeyboardInterrupt:
# print('\b\b')
# print_ok('Aborted by user. Good bye!')
# doExit = True
, which may contain function names, class names, or code. Output only the next line. | cron.get_member(module, 'invalid_class') |
Predict the next line after this snippet: <|code_start|>
g_config_file = 'config/afl-cron.conf.sample'
class AflSyncTestCase(unittest.TestCase):
def setUp(self):
# Use to set up test environment prior to test case
# invocation
pass
def tearDown(self):
# Use for clean up after tests have run
pass
def clean_remove(self, file):
if os.path.exists(file):
<|code_end|>
using the current file's imports:
from afl_utils import afl_cron, afl_sync
from afl_utils.afl_cron import AflCronDaemon
import os
import shutil
import unittest
and any relevant context from other files:
# Path: afl_utils/afl_cron.py
# class AflCronDaemon(object):
# def __init__(self, config_file, quiet=False):
# def load_config(self, config_file):
# def get_module(self, module_path):
# def get_member(self, module, member_name):
# def run_job(self, job):
# def run(self):
# def show_info():
# def main(argv):
#
# Path: afl_utils/afl_sync.py
# class AflBaseSync(object):
# class AflRsync(AflBaseSync):
# def __init__(self, server_config, fuzzer_config):
# def __init__(self, server_config, fuzzer_config):
# def __prepare_rsync_commandline(self, local_path, remote_path, rsync_options=list(_rsync_default_options),
# rsync_excludes=list([]), rsync_get=False):
# def __invoke_rsync(self, rsync_cmdline):
# def __get_fuzzers(self):
# def rsync_put(self, local_path, remote_path, rsync_options=list(_rsync_default_options), rsync_excludes=list([])):
# def rsync_get(self, remote_path, local_path, rsync_options=list(_rsync_default_options), rsync_excludes=list([])):
# def push(self):
# def pull(self):
# def sync(self):
# def show_info():
# def main(argv):
#
# Path: afl_utils/afl_cron.py
# class AflCronDaemon(object):
# def __init__(self, config_file, quiet=False):
# self.config = self.load_config(config_file)
# self.quiet = quiet
#
# def load_config(self, config_file):
# with open(config_file, 'r') as raw_config:
# config = json.load(raw_config)
# return config
#
# def get_module(self, module_path):
# module_name = module_path.rsplit('.', 1)[1]
# try:
# module = __import__(module_path, fromlist=[module_name])
# except ImportError:
# raise ValueError('Module \'{}\' could not be imported' .format(module_path,))
# return module
#
# def get_member(self, module, member_name):
# try:
# cls = getattr(module, member_name)
# except AttributeError:
# raise ValueError('Module \'{}\' has no member \'{}\''.format(module, member_name, ))
# return cls
#
# def run_job(self, job):
# job_module = self.get_module(job['module'])
# job_func = self.get_member(job_module, job['function'])
# job_args = [job['module'].rsplit('.', 1)[1]] + job['params'].split()
# if not self.quiet:
# print_ok('Executing \'{}\' ({}.{})'.format(job['name'], job['module'], job['function']))
# job_func(job_args)
#
# def run(self):
# doExit = False
# while not doExit:
# try:
# time_start = datetime.datetime.now()
# for job in self.config['jobs']:
# self.run_job(job)
#
# print_ok('All jobs done [{}]'.format(datetime.datetime.now()-time_start))
#
# if float(self.config['interval']) < 0:
# doExit = True
# else:
# time.sleep(float(self.config['interval']) * 60)
# except KeyboardInterrupt:
# print('\b\b')
# print_ok('Aborted by user. Good bye!')
# doExit = True
. Output only the next line. | os.remove(file) |
Here is a snippet: <|code_start|>
def tearDown(self):
# Use for clean up after tests have run
pass
def clean_remove(self, file):
if os.path.exists(file):
os.remove(file)
def clean_remove_dir(self, dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def test_load_config(self):
config = {
"interval": 60,
"jobs": [
{
"name": "afl-stats",
"description": "Job description here",
"module": "afl_utils.afl_stats",
"function": "main",
"params": "--quiet -c config/afl-stats.conf.sample"
}
]
}
cron = AflCronDaemon(g_config_file)
self.assertDictEqual(config, cron.config)
<|code_end|>
. Write the next line using the current file imports:
from afl_utils import afl_cron, afl_sync
from afl_utils.afl_cron import AflCronDaemon
import os
import shutil
import unittest
and context from other files:
# Path: afl_utils/afl_cron.py
# class AflCronDaemon(object):
# def __init__(self, config_file, quiet=False):
# def load_config(self, config_file):
# def get_module(self, module_path):
# def get_member(self, module, member_name):
# def run_job(self, job):
# def run(self):
# def show_info():
# def main(argv):
#
# Path: afl_utils/afl_sync.py
# class AflBaseSync(object):
# class AflRsync(AflBaseSync):
# def __init__(self, server_config, fuzzer_config):
# def __init__(self, server_config, fuzzer_config):
# def __prepare_rsync_commandline(self, local_path, remote_path, rsync_options=list(_rsync_default_options),
# rsync_excludes=list([]), rsync_get=False):
# def __invoke_rsync(self, rsync_cmdline):
# def __get_fuzzers(self):
# def rsync_put(self, local_path, remote_path, rsync_options=list(_rsync_default_options), rsync_excludes=list([])):
# def rsync_get(self, remote_path, local_path, rsync_options=list(_rsync_default_options), rsync_excludes=list([])):
# def push(self):
# def pull(self):
# def sync(self):
# def show_info():
# def main(argv):
#
# Path: afl_utils/afl_cron.py
# class AflCronDaemon(object):
# def __init__(self, config_file, quiet=False):
# self.config = self.load_config(config_file)
# self.quiet = quiet
#
# def load_config(self, config_file):
# with open(config_file, 'r') as raw_config:
# config = json.load(raw_config)
# return config
#
# def get_module(self, module_path):
# module_name = module_path.rsplit('.', 1)[1]
# try:
# module = __import__(module_path, fromlist=[module_name])
# except ImportError:
# raise ValueError('Module \'{}\' could not be imported' .format(module_path,))
# return module
#
# def get_member(self, module, member_name):
# try:
# cls = getattr(module, member_name)
# except AttributeError:
# raise ValueError('Module \'{}\' has no member \'{}\''.format(module, member_name, ))
# return cls
#
# def run_job(self, job):
# job_module = self.get_module(job['module'])
# job_func = self.get_member(job_module, job['function'])
# job_args = [job['module'].rsplit('.', 1)[1]] + job['params'].split()
# if not self.quiet:
# print_ok('Executing \'{}\' ({}.{})'.format(job['name'], job['module'], job['function']))
# job_func(job_args)
#
# def run(self):
# doExit = False
# while not doExit:
# try:
# time_start = datetime.datetime.now()
# for job in self.config['jobs']:
# self.run_job(job)
#
# print_ok('All jobs done [{}]'.format(datetime.datetime.now()-time_start))
#
# if float(self.config['interval']) < 0:
# doExit = True
# else:
# time.sleep(float(self.config['interval']) * 60)
# except KeyboardInterrupt:
# print('\b\b')
# print_ok('Aborted by user. Good bye!')
# doExit = True
, which may include functions, classes, or code. Output only the next line. | def test_get_module(self): |
Given snippet: <|code_start|>
transforms = game_object_utils.get_game_object_positions_from_map(
ascii_map, 'W')
self.assertSameElements(
[
# Top walls
get_transform(0, 0, game_object_utils.Orientation.NORTH),
get_transform(1, 0, game_object_utils.Orientation.NORTH),
get_transform(2, 0, game_object_utils.Orientation.NORTH),
get_transform(3, 0, game_object_utils.Orientation.NORTH),
get_transform(4, 0, game_object_utils.Orientation.NORTH),
get_transform(5, 0, game_object_utils.Orientation.NORTH),
# Side walls
get_transform(0, 1, game_object_utils.Orientation.NORTH),
get_transform(5, 1, game_object_utils.Orientation.NORTH),
# Bottom walls
get_transform(0, 2, game_object_utils.Orientation.NORTH),
get_transform(1, 2, game_object_utils.Orientation.NORTH),
get_transform(2, 2, game_object_utils.Orientation.NORTH),
get_transform(3, 2, game_object_utils.Orientation.NORTH),
get_transform(4, 2, game_object_utils.Orientation.NORTH),
get_transform(5, 2, game_object_utils.Orientation.NORTH),
],
transforms)
def test_get_game_objects(self):
ascii_map = '''
WWWWWW
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.python.utils.substrates import game_object_utils
and context:
# Path: meltingpot/python/utils/substrates/game_object_utils.py
# class Position(NamedTuple):
# class Orientation(enum.Enum):
# class Transform(NamedTuple):
# NORTH = "N"
# EAST = "E"
# SOUTH = "S"
# WEST = "W"
# TYPE_ALL = "all"
# TYPE_CHOICE = "choice"
# def get_named_components(
# game_object_config: PrefabConfig,
# name: str):
# def get_first_named_component(
# game_object_config: PrefabConfig,
# name: str):
# def build_game_objects(
# num_players: int,
# ascii_map: str,
# prefabs: Optional[Mapping[str, PrefabConfig]] = None,
# char_prefab_map: Optional[PrefabConfig] = None,
# player_palettes: Optional[Sequence[shapes.Color]] = None,
# use_badges: bool = False,
# badge_palettes: Optional[Sequence[shapes.Color]] = None,
# ) -> Tuple[List[PrefabConfig], List[PrefabConfig]]:
# def build_avatar_objects(
# num_players: int,
# prefabs: Optional[Mapping[str, PrefabConfig]] = None,
# player_palettes: Optional[Sequence[shapes.Color]] = None,
# ) -> List[PrefabConfig]:
# def build_avatar_badges(
# num_players: int,
# prefabs: Optional[Mapping[str, PrefabConfig]] = None,
# badge_palettes: Optional[Sequence[shapes.Color]] = None,
# ) -> List[PrefabConfig]:
# def get_game_object_positions_from_map(
# ascii_map: str, char: str, orientation_mode: str = "always_north"
# ) -> Sequence[Transform]:
# def _create_game_object(
# prefab: PrefabConfig, transform: Transform) -> PrefabConfig:
# def get_game_objects_from_map(
# ascii_map: str,
# char_prefab_map: Mapping[str, str],
# prefabs: Mapping[str, PrefabConfig],
# random: np.random.RandomState = np.random.RandomState()
# ) -> List[PrefabConfig]:
which might include code, classes, or functions. Output only the next line. | W A W |
Here is a snippet: <|code_start|>
#: Pattern that matches both SubStation and SubRip timestamps.
TIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})[.,](\d{2,3})")
Times = namedtuple("Times", ["h", "m", "s", "ms"])
def make_time(h: IntOrFloat=0, m: IntOrFloat=0, s: IntOrFloat=0, ms: IntOrFloat=0,
<|code_end|>
. Write the next line using the current file imports:
from collections import namedtuple
from typing import Optional, List, Tuple, Sequence
from pysubs2.common import IntOrFloat
import re
and context from other files:
# Path: pysubs2/common.py
# class Color:
# def __init__(self, r: int, g: int, b: int, a: int = 0):
# VERSION = "1.4.1"
, which may include functions, classes, or code. Output only the next line. | frames: Optional[int]=None, fps: Optional[float]=None): |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
try:
except ImportError:
setup(
name = "pysubs2",
packages = ["pysubs2"],
version = VERSION,
author = "Tomas Karabela",
<|code_end|>
with the help of current file imports:
from setuptools import setup
from distutils.core import setup
from textwrap import dedent
from pysubs2 import VERSION
and context from other files:
# Path: pysubs2/common.py
# VERSION = "1.4.1"
, which may contain function names, class names, or code. Output only the next line. | author_email = "tkarabela@seznam.cz", |
Given the following code snippet before the placeholder: <|code_start|>
def test_repr_plain():
ev = SSAStyle(fontname="Calibri", fontsize=36)
ref = "<SSAStyle 36px 'Calibri'>"
assert repr(ev) == ref
def test_repr_italic():
ev = SSAStyle(fontname="Calibri", fontsize=36, italic=True)
ref = "<SSAStyle 36px italic 'Calibri'>"
assert repr(ev) == ref
def test_repr_bold_italic():
ev = SSAStyle(fontname="Calibri", fontsize=36, italic=True, bold=True)
ref = "<SSAStyle 36px bold italic 'Calibri'>"
assert repr(ev) == ref
def test_repr_floatsize():
ev = SSAStyle(fontname="Calibri", fontsize=36.499)
ref = "<SSAStyle 36.499px 'Calibri'>"
assert repr(ev) == ref
def test_fields():
sty = SSAStyle()
<|code_end|>
, predict the next line using imports from the current file:
import pytest
from pysubs2 import SSAStyle
and context including class names, function names, and sometimes code from other files:
# Path: pysubs2/ssastyle.py
# class SSAStyle:
# """
# A SubStation Style.
#
# In SubStation, each subtitle (:class:`SSAEvent`) is associated with a style which defines its font, color, etc.
# Like a subtitle event, a style also consists of "fields"; see :attr:`SSAStyle.FIELDS` for a list
# (note the spelling, which is different from SubStation proper).
#
# Subtitles and styles are connected via an :class:`SSAFile` they belong to. :attr:`SSAEvent.style` is a string
# which is (or should be) a key in the :attr:`SSAFile.styles` dict. Note that style name is stored separately;
# a given :class:`SSAStyle` instance has no particular name itself.
#
# This class defines equality (equality of all fields).
#
# """
# DEFAULT_STYLE: ClassVar["SSAStyle"] = None # type: ignore[assignment]
#
# @property
# def FIELDS(self):
# """All fields in SSAStyle."""
# warnings.warn("Deprecated in 1.2.0 - it's a dataclass now", DeprecationWarning)
# return frozenset(field.name for field in dataclasses.fields(self))
#
# fontname: str = "Arial" #: Font name
# fontsize: float = 20.0 #: Font size (in pixels)
# primarycolor: Color = Color(255, 255, 255, 0) #: Primary color (:class:`pysubs2.Color` instance)
# secondarycolor: Color = Color(255, 0, 0, 0) #: Secondary color (:class:`pysubs2.Color` instance)
# tertiarycolor: Color = Color(0, 0, 0, 0) #: Tertiary color (:class:`pysubs2.Color` instance)
# outlinecolor: Color = Color(0, 0, 0, 0) #: Outline color (:class:`pysubs2.Color` instance)
# backcolor: Color = Color(0, 0, 0, 0) #: Back, ie. shadow color (:class:`pysubs2.Color` instance)
# bold: bool = False #: Bold
# italic: bool = False #: Italic
# underline: bool = False #: Underline (ASS only)
# strikeout: bool = False #: Strikeout (ASS only)
# scalex: float = 100.0 #: Horizontal scaling (ASS only)
# scaley: float = 100.0 #: Vertical scaling (ASS only)
# spacing: float = 0.0 #: Letter spacing (ASS only)
# angle: float = 0.0 #: Rotation (ASS only)
# borderstyle: int = 1 #: Border style
# outline: float = 2.0 #: Outline width (in pixels)
# shadow: float = 2.0 #: Shadow depth (in pixels)
# alignment: int = 2 #: Numpad-style alignment, eg. 7 is "top left" (that is, ASS alignment semantics)
# marginl: int = 10 #: Left margin (in pixels)
# marginr: int = 10 #: Right margin (in pixels)
# marginv: int = 10 #: Vertical margin (in pixels)
# alphalevel: int = 0 #: Old, unused SSA-only field
# encoding: int = 1 #: Charset
#
# # The following attributes cannot be defined for SSA styles themselves,
# # but can be used in override tags and thus are useful to keep here
# # for the `pysubs2.substation.parse_tags()` interface which returns
# # SSAStyles for text fragments.
# drawing: bool = False #: Indicates that text span is a SSA vector drawing, see `pysubs2.substation.parse_tags()`
#
# def copy(self) -> "SSAStyle":
# return SSAStyle(**self.as_dict())
#
# def as_dict(self) -> Dict[str, Any]:
# # dataclasses.asdict() would recursively dictify Color objects, which we don't want
# return {field.name: getattr(self, field.name) for field in dataclasses.fields(self)}
#
# def __repr__(self):
# return f"<SSAStyle {self.fontsize!r}px" \
# f"{' bold' if self.bold else ''}" \
# f"{' italic' if self.italic else ''}" \
# f" {self.fontname!r}>"
. Output only the next line. | with pytest.warns(DeprecationWarning): |
Next line prediction: <|code_start|>
Classes = CollectorGroup()
@Classes.fragments.fragment
class WarpAnchorFragment(BaseConstructFragment):
base_container = Section.base(Magic[2], 0x6e)
container_versions = 18
<|code_end|>
. Use current file imports:
(from construct import Enum
from distance.bytes import Section, Magic
from distance.construct import (
BaseConstructFragment,
Byte, UInt, Int, Float, DstString,
Struct, Default,
)
from distance.classes import CollectorGroup)
and context including class names, function names, or small code snippets from other files:
# Path: distance/bytes.py
# UTF_16_DECODE = codecs.getdecoder('utf-16-le')
# UTF_16_ENCODE = codecs.getencoder('utf-16-le')
# S_COLOR_RGBA = Struct("<4f")
# S_FLOAT = Struct("<f")
# S_DOUBLE = Struct("<d")
# S_FLOAT3 = Struct("<fff")
# S_FLOAT4 = Struct("<ffff")
# S_BYTE = Struct('b')
# S_INT = Struct('<i')
# S_LONG = Struct('<q')
# S_UINT = Struct('<I')
# S_ULONG = Struct('<Q')
# S_UINT2 = Struct("<II")
# S_UINT3 = Struct("<III")
# SKIP_BYTES = b'\xFD\xFF\xFF\x7F'
# MAGIC_1 = 11111111
# MAGIC_2 = 22222222
# MAGIC_3 = 33333333
# MAGIC_5 = 55555555
# MAGIC_6 = 66666666
# MAGIC_7 = 77777777
# MAGIC_8 = 88888888
# MAGIC_9 = 99999999
# MAGIC_12 = 12121212
# MAGIC_32 = 32323232
# CATCH_EXCEPTIONS = (ValueError, EOFError)
# S_SEC_BASE = Struct("<IQ")
# MIN_SIZE = 12 # 4b (magic) + 8b (data_size)
# class ErrorPosition(namedtuple('_ErrorPosition', ['start', 'error'])):
# class BytesModel(object):
# class Section(BytesModel):
# class DstBytes(object):
# def __repr__(self):
# def get(cls, ex):
# def first(cls, ex):
# def maybe(cls, dbytes, **kw):
# def iter_n_maybe(cls, dbytes, n, **kw):
# def lazy_n_maybe(cls, dbytes, n, *, start_pos=None, **kw):
# def __init__(self, dbytes=None, **kw):
# def read(self, dbytes, *, seek_end=True, **kw):
# def _read(self, dbytes):
# def _init_defaults(self):
# def _after_init(self):
# def write(self, dbytes, **kw):
# def visit_write(self, dbytes):
# def __repr__(self):
# def _repr_detail(self):
# def print(self, file=None, flags=(), p=None):
# def visit_print(self, p):
# def _print_type(self, p):
# def _print_offset(self, p):
# def _visit_print_data(self, p):
# def _visit_print_children(self, p):
# def base(cls, *args, **kw):
# def __init__(self, *args, **kw):
# def _init_from_args(self, *args, any_version=False, base=None, **kw):
# def __repr__(self):
# def __getitem__(self, key):
# def to_key(self, noversion=False):
# def from_key(cls, key):
# def has_version(self):
# def _read(self, dbytes):
# def _write_header(self, dbytes):
# def _print_type(self, p):
# def _print_offset(self, p):
# def __init__(self, file):
# def __repr__(self):
# def in_memory(cls):
# def from_data(cls, data):
# def from_arg(cls, arg):
# def _write_arg(cls, obj, arg, write_mode='wb'):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, exc_traceback):
# def read_bytes(self, n):
# def read_byte(self):
# def read_var_int(self):
# def read_uint(self):
# def read_int(self):
# def read_ulong(self):
# def read_long(self):
# def read_struct(self, st):
# def read_str(self):
# def read_id(self):
# def write_bytes(self, data):
# def write_uint(self, value):
# def write_int(self, value):
# def write_ulong(self, value):
# def write_long(self, value):
# def write_var_int(self, value):
# def require_equal_uint4(self, expect):
# def write_str(self, s):
# def write_id(self, id_):
# def stable_iter(self, source, *, start_pos=None):
# def gen():
# def write_size(self):
# def write_num_subsections(self):
# def write_section(self, *args, **kw):
#
# Path: distance/construct.py
# def DstOptional(subcon, otherwise=None):
# def _get_subcons(con):
# def __init__(cls, name, bases, dct):
# def _init_defaults(self):
# def _clone_data(self, new):
# def _read_section_data(self, dbytes, sec):
# def _write_section_data(self, dbytes, sec):
# def _visit_print_data(self, p):
# def construct_property(cls, name, doc=None):
# def fget(self):
# def fset(self, value):
# def fdel(self):
# def ExposeConstructFields(target=None, only=None):
# def decorate(target):
# def pop_name(con):
# class ConstructMeta(type):
# class BaseConstructFragment(Fragment, metaclass=ConstructMeta):
#
# Path: distance/classes.py
# class CollectorGroup(object):
#
# """A group of ClassCollector.
#
# Used to register classes of modules for later collection by a
# ClassesRegistry.
#
# Categories are accessed via attribute access. New categories are created on
# first access.
#
# """
#
# def __init__(self):
# self._colls = {}
#
# def __getattr__(self, name):
# try:
# return self._colls[name]
# except KeyError:
# coll = ClassCollector()
# self._colls[name] = coll
# setattr(self, name, coll)
# return coll
. Output only the next line. | is_interesting = True |
Continue the code snippet: <|code_start|>
Classes = CollectorGroup()
@Classes.fragments.fragment
class WarpAnchorFragment(BaseConstructFragment):
base_container = Section.base(Magic[2], 0x6e)
container_versions = 18
<|code_end|>
. Use current file imports:
from construct import Enum
from distance.bytes import Section, Magic
from distance.construct import (
BaseConstructFragment,
Byte, UInt, Int, Float, DstString,
Struct, Default,
)
from distance.classes import CollectorGroup
and context (classes, functions, or code) from other files:
# Path: distance/bytes.py
# UTF_16_DECODE = codecs.getdecoder('utf-16-le')
# UTF_16_ENCODE = codecs.getencoder('utf-16-le')
# S_COLOR_RGBA = Struct("<4f")
# S_FLOAT = Struct("<f")
# S_DOUBLE = Struct("<d")
# S_FLOAT3 = Struct("<fff")
# S_FLOAT4 = Struct("<ffff")
# S_BYTE = Struct('b')
# S_INT = Struct('<i')
# S_LONG = Struct('<q')
# S_UINT = Struct('<I')
# S_ULONG = Struct('<Q')
# S_UINT2 = Struct("<II")
# S_UINT3 = Struct("<III")
# SKIP_BYTES = b'\xFD\xFF\xFF\x7F'
# MAGIC_1 = 11111111
# MAGIC_2 = 22222222
# MAGIC_3 = 33333333
# MAGIC_5 = 55555555
# MAGIC_6 = 66666666
# MAGIC_7 = 77777777
# MAGIC_8 = 88888888
# MAGIC_9 = 99999999
# MAGIC_12 = 12121212
# MAGIC_32 = 32323232
# CATCH_EXCEPTIONS = (ValueError, EOFError)
# S_SEC_BASE = Struct("<IQ")
# MIN_SIZE = 12 # 4b (magic) + 8b (data_size)
# class ErrorPosition(namedtuple('_ErrorPosition', ['start', 'error'])):
# class BytesModel(object):
# class Section(BytesModel):
# class DstBytes(object):
# def __repr__(self):
# def get(cls, ex):
# def first(cls, ex):
# def maybe(cls, dbytes, **kw):
# def iter_n_maybe(cls, dbytes, n, **kw):
# def lazy_n_maybe(cls, dbytes, n, *, start_pos=None, **kw):
# def __init__(self, dbytes=None, **kw):
# def read(self, dbytes, *, seek_end=True, **kw):
# def _read(self, dbytes):
# def _init_defaults(self):
# def _after_init(self):
# def write(self, dbytes, **kw):
# def visit_write(self, dbytes):
# def __repr__(self):
# def _repr_detail(self):
# def print(self, file=None, flags=(), p=None):
# def visit_print(self, p):
# def _print_type(self, p):
# def _print_offset(self, p):
# def _visit_print_data(self, p):
# def _visit_print_children(self, p):
# def base(cls, *args, **kw):
# def __init__(self, *args, **kw):
# def _init_from_args(self, *args, any_version=False, base=None, **kw):
# def __repr__(self):
# def __getitem__(self, key):
# def to_key(self, noversion=False):
# def from_key(cls, key):
# def has_version(self):
# def _read(self, dbytes):
# def _write_header(self, dbytes):
# def _print_type(self, p):
# def _print_offset(self, p):
# def __init__(self, file):
# def __repr__(self):
# def in_memory(cls):
# def from_data(cls, data):
# def from_arg(cls, arg):
# def _write_arg(cls, obj, arg, write_mode='wb'):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, exc_traceback):
# def read_bytes(self, n):
# def read_byte(self):
# def read_var_int(self):
# def read_uint(self):
# def read_int(self):
# def read_ulong(self):
# def read_long(self):
# def read_struct(self, st):
# def read_str(self):
# def read_id(self):
# def write_bytes(self, data):
# def write_uint(self, value):
# def write_int(self, value):
# def write_ulong(self, value):
# def write_long(self, value):
# def write_var_int(self, value):
# def require_equal_uint4(self, expect):
# def write_str(self, s):
# def write_id(self, id_):
# def stable_iter(self, source, *, start_pos=None):
# def gen():
# def write_size(self):
# def write_num_subsections(self):
# def write_section(self, *args, **kw):
#
# Path: distance/construct.py
# def DstOptional(subcon, otherwise=None):
# def _get_subcons(con):
# def __init__(cls, name, bases, dct):
# def _init_defaults(self):
# def _clone_data(self, new):
# def _read_section_data(self, dbytes, sec):
# def _write_section_data(self, dbytes, sec):
# def _visit_print_data(self, p):
# def construct_property(cls, name, doc=None):
# def fget(self):
# def fset(self, value):
# def fdel(self):
# def ExposeConstructFields(target=None, only=None):
# def decorate(target):
# def pop_name(con):
# class ConstructMeta(type):
# class BaseConstructFragment(Fragment, metaclass=ConstructMeta):
#
# Path: distance/classes.py
# class CollectorGroup(object):
#
# """A group of ClassCollector.
#
# Used to register classes of modules for later collection by a
# ClassesRegistry.
#
# Categories are accessed via attribute access. New categories are created on
# first access.
#
# """
#
# def __init__(self):
# self._colls = {}
#
# def __getattr__(self, name):
# try:
# return self._colls[name]
# except KeyError:
# coll = ClassCollector()
# self._colls[name] = coll
# setattr(self, name, coll)
# return coll
. Output only the next line. | is_interesting = True |
Predict the next line for this snippet: <|code_start|>
def mkargs(maxrecurse=-1, type=[], numbers=[], section=[],
print_=False, invert=False):
return Namespace(**locals())
class RemoveTest(unittest.TestCase):
<|code_end|>
with the help of current file imports:
from argparse import Namespace
from distance import Level
from distance.filter import RemoveFilter
import unittest
and context from other files:
# Path: distance/_level.py
# class Level(Fragment):
#
# class_tag = 'Level'
# default_container = Section(Magic[9])
#
# layers = ()
# name = None
# version = 3
#
# def _read_section_data(self, dbytes, sec):
# if sec.magic != Magic[9]:
# raise ValueError(f"Unexpected section: {sec.magic}")
# self.name = sec.name
# self.version = sec.version
#
# num_layers = sec.count
#
# self.content = self.classes.level_content.lazy_n_maybe(
# dbytes, num_layers + 1)
# if num_layers:
# self.layers = LazySequence(
# (obj for obj in self.content if obj.class_tag == 'Layer'),
# num_layers)
#
# def _get_write_section(self, sec):
# return Section(Magic[9], self.name, len(self.layers), self.version)
#
# def _visit_write_section_data(self, dbytes, sec):
# if sec.magic != Magic[9]:
# raise ValueError(f"Unexpected section: {sec.magic}")
# for obj in self.content:
# yield obj.visit_write(dbytes)
#
# @property
# def settings(self):
# try:
# return self._settings
# except AttributeError:
# for obj in self.content:
# if obj.class_tag != 'Layer':
# s = obj
# break
# else:
# s = None
# self._settings = s
# return s
#
# @settings.setter
# def settings(self, s):
# self._settings = s
#
# def _repr_detail(self):
# supstr = super()._repr_detail()
# if self.name:
# return f" {self.name!r}{supstr}"
# return supstr
#
# def visit_print(self, p):
# with need_counters(p) as counters:
# yield super().visit_print(p)
# if counters:
# counters.print(p)
#
# def _print_type(self, p):
# p(f"Level: {self.name!r} version {self.version}")
#
# def _visit_print_data(self, p):
# yield super()._visit_print_data(p)
# p(f"Level name: {self.name!r}")
#
# def _visit_print_children(self, p):
# if self.settings is not None:
# with p.tree_children(1):
# yield self.settings.visit_print(p)
# for layer in self.layers:
# yield layer.visit_print(p)
#
# Path: distance/filter/remove.py
# class RemoveFilter(ObjectFilter):
#
# @classmethod
# def add_args(cls, parser):
# super().add_args(parser)
# parser.add_argument(":type", action='append', default=[],
# help="Match object type (regex).")
# parser.add_argument(":section", action='append', default=[],
# help="Match sections.")
# parser.add_argument(":print", action='store_true', dest='print_',
# help="Print matching candidates and abort filter.")
# parser.add_argument(":all", action='store_true',
# help="This is now the default and has been removed.")
# parser.add_argument(":number", dest='numbers', action='append',
# type=int, default=[],
# help="Select by candidate number.")
# parser.add_argument(":invert", action='store_true',
# help="Remove unmatched objects.")
#
# def __init__(self, args):
# super().__init__(args)
# self.print_ = args.print_
# self.numbers = args.numbers
# self.type_patterns = [re.compile(r) for r in args.type]
# self.sections = {parse_section(arg).to_key() for arg in args.section}
# self.invert = args.invert
# self.num_matches = 0
# self.matches = []
# self.removed = []
#
# def _match_sections(self, obj):
# for sec in obj.sections:
# if sec.to_key() in self.sections:
# return True
# for child in obj.children:
# if self._match_sections(child):
# return True
# return False
#
# def match_props(self, obj):
# if not self.type_patterns and not self.sections:
# return True
# if self.type_patterns:
# typename = obj.type
# if any(r.search(typename) for r in self.type_patterns):
# return True
# if self.sections:
# if not obj.is_object_group and self._match_sections(obj):
# return True
# return False
#
# def match(self, obj):
# if self.match_props(obj):
# num = self.num_matches
# self.num_matches = num + 1
# self.matches.append(obj)
# if not self.numbers or num in self.numbers:
# return True
# return False
#
# def filter_any_object(self, obj, levels):
# remove = self.match(obj)
# if self.invert:
# remove = not remove
# res = super().filter_any_object(obj, levels)
# if remove:
# self.removed.append(obj)
# return ()
# return res
#
# def post_filter(self, content):
# if self.print_:
# print_candidates(self.matches)
# return False
# return True
#
# def print_summary(self, p):
# p(f"Removed matches: {len(self.removed)}")
# num_objs, num_groups = count_objects(self.removed)
# if num_objs != len(self.removed):
# p(f"Removed objects: {num_objs}")
# p(f"Removed groups: {num_groups}")
, which may contain function names, class names, or code. Output only the next line. | def test_by_type(self): |
Predict the next line after this snippet: <|code_start|>
class SplitArgsTest(unittest.TestCase):
def test_empty(self):
self.assertEqual([], make_arglist(""))
def test_single(self):
self.assertEqual([":all"], make_arglist("all"))
def test_value(self):
self.assertEqual([":num=2"], make_arglist("num=2"))
def test_multiple(self):
self.assertEqual([":all", ":num=2"], make_arglist("all:num=2"))
<|code_end|>
using the current file's imports:
import unittest
from distance_scripts.filterlevel import make_arglist
and any relevant context from other files:
# Path: distance_scripts/filterlevel.py
# def make_arglist(s):
#
# def iter_tokens(source):
# if not source:
# return
# token = []
# escape = False
# for char in source:
# if escape:
# escape = False
# token.append(char)
# elif char == '\\':
# escape = True
# elif char == ':':
# yield token
# token = []
# else:
# token.append(char)
# yield token
#
# return [":" + ''.join(token) for token in iter_tokens(s)]
. Output only the next line. | def test_escape(self): |
Given snippet: <|code_start|>
class DstBytesTest(unittest.TestCase):
def test_from_arg_dstbytes(self):
with open("tests/in/customobject/2cubes.bytes", 'rb') as f:
dbytes = DstBytes(f)
res = DstBytes.from_arg(dbytes)
self.assertIs(dbytes, res)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
from distance.bytes import DstBytes, Magic, Section
and context:
# Path: distance/bytes.py
# UTF_16_DECODE = codecs.getdecoder('utf-16-le')
# UTF_16_ENCODE = codecs.getencoder('utf-16-le')
# S_COLOR_RGBA = Struct("<4f")
# S_FLOAT = Struct("<f")
# S_DOUBLE = Struct("<d")
# S_FLOAT3 = Struct("<fff")
# S_FLOAT4 = Struct("<ffff")
# S_BYTE = Struct('b')
# S_INT = Struct('<i')
# S_LONG = Struct('<q')
# S_UINT = Struct('<I')
# S_ULONG = Struct('<Q')
# S_UINT2 = Struct("<II")
# S_UINT3 = Struct("<III")
# SKIP_BYTES = b'\xFD\xFF\xFF\x7F'
# MAGIC_1 = 11111111
# MAGIC_2 = 22222222
# MAGIC_3 = 33333333
# MAGIC_5 = 55555555
# MAGIC_6 = 66666666
# MAGIC_7 = 77777777
# MAGIC_8 = 88888888
# MAGIC_9 = 99999999
# MAGIC_12 = 12121212
# MAGIC_32 = 32323232
# CATCH_EXCEPTIONS = (ValueError, EOFError)
# S_SEC_BASE = Struct("<IQ")
# MIN_SIZE = 12 # 4b (magic) + 8b (data_size)
# class ErrorPosition(namedtuple('_ErrorPosition', ['start', 'error'])):
# class BytesModel(object):
# class Section(BytesModel):
# class DstBytes(object):
# def __repr__(self):
# def get(cls, ex):
# def first(cls, ex):
# def maybe(cls, dbytes, **kw):
# def iter_n_maybe(cls, dbytes, n, **kw):
# def lazy_n_maybe(cls, dbytes, n, *, start_pos=None, **kw):
# def __init__(self, dbytes=None, **kw):
# def read(self, dbytes, *, seek_end=True, **kw):
# def _read(self, dbytes):
# def _init_defaults(self):
# def _after_init(self):
# def write(self, dbytes, **kw):
# def visit_write(self, dbytes):
# def __repr__(self):
# def _repr_detail(self):
# def print(self, file=None, flags=(), p=None):
# def visit_print(self, p):
# def _print_type(self, p):
# def _print_offset(self, p):
# def _visit_print_data(self, p):
# def _visit_print_children(self, p):
# def base(cls, *args, **kw):
# def __init__(self, *args, **kw):
# def _init_from_args(self, *args, any_version=False, base=None, **kw):
# def __repr__(self):
# def __getitem__(self, key):
# def to_key(self, noversion=False):
# def from_key(cls, key):
# def has_version(self):
# def _read(self, dbytes):
# def _write_header(self, dbytes):
# def _print_type(self, p):
# def _print_offset(self, p):
# def __init__(self, file):
# def __repr__(self):
# def in_memory(cls):
# def from_data(cls, data):
# def from_arg(cls, arg):
# def _write_arg(cls, obj, arg, write_mode='wb'):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, exc_traceback):
# def read_bytes(self, n):
# def read_byte(self):
# def read_var_int(self):
# def read_uint(self):
# def read_int(self):
# def read_ulong(self):
# def read_long(self):
# def read_struct(self, st):
# def read_str(self):
# def read_id(self):
# def write_bytes(self, data):
# def write_uint(self, value):
# def write_int(self, value):
# def write_ulong(self, value):
# def write_long(self, value):
# def write_var_int(self, value):
# def require_equal_uint4(self, expect):
# def write_str(self, s):
# def write_id(self, id_):
# def stable_iter(self, source, *, start_pos=None):
# def gen():
# def write_size(self):
# def write_num_subsections(self):
# def write_section(self, *args, **kw):
which might include code, classes, or functions. Output only the next line. | def test_from_arg_filename(self): |
Given the code snippet: <|code_start|> self.assertIs(dbytes, res)
def test_from_arg_filename(self):
res = DstBytes.from_arg("tests/in/customobject/2cubes.bytes")
self.assertEqual(Magic[6], res.read_uint())
def test_from_arg_file(self):
with open("tests/in/customobject/2cubes.bytes", 'rb') as f:
res = DstBytes.from_arg(f)
self.assertEqual(Magic[6], res.read_uint())
def test_from_arg_checks_file_mode(self):
with open("tests/in/customobject/2cubes.bytes") as f:
with self.assertRaises(IOError) as cm:
DstBytes.from_arg(f)
msg = str(cm.exception)
self.assertTrue("'b' mode" in msg, msg=f"actual message: {msg!r}")
class SectionTest(unittest.TestCase):
def test_from_key_magic9(self):
key = Section(Magic[9], 'A Level').to_key()
sec = Section.from_key(key)
self.assertEqual(sec.magic, Magic[9])
<|code_end|>
, generate the next line using the imports in this file:
import unittest
from distance.bytes import DstBytes, Magic, Section
and context (functions, classes, or occasionally code) from other files:
# Path: distance/bytes.py
# UTF_16_DECODE = codecs.getdecoder('utf-16-le')
# UTF_16_ENCODE = codecs.getencoder('utf-16-le')
# S_COLOR_RGBA = Struct("<4f")
# S_FLOAT = Struct("<f")
# S_DOUBLE = Struct("<d")
# S_FLOAT3 = Struct("<fff")
# S_FLOAT4 = Struct("<ffff")
# S_BYTE = Struct('b')
# S_INT = Struct('<i')
# S_LONG = Struct('<q')
# S_UINT = Struct('<I')
# S_ULONG = Struct('<Q')
# S_UINT2 = Struct("<II")
# S_UINT3 = Struct("<III")
# SKIP_BYTES = b'\xFD\xFF\xFF\x7F'
# MAGIC_1 = 11111111
# MAGIC_2 = 22222222
# MAGIC_3 = 33333333
# MAGIC_5 = 55555555
# MAGIC_6 = 66666666
# MAGIC_7 = 77777777
# MAGIC_8 = 88888888
# MAGIC_9 = 99999999
# MAGIC_12 = 12121212
# MAGIC_32 = 32323232
# CATCH_EXCEPTIONS = (ValueError, EOFError)
# S_SEC_BASE = Struct("<IQ")
# MIN_SIZE = 12 # 4b (magic) + 8b (data_size)
# class ErrorPosition(namedtuple('_ErrorPosition', ['start', 'error'])):
# class BytesModel(object):
# class Section(BytesModel):
# class DstBytes(object):
# def __repr__(self):
# def get(cls, ex):
# def first(cls, ex):
# def maybe(cls, dbytes, **kw):
# def iter_n_maybe(cls, dbytes, n, **kw):
# def lazy_n_maybe(cls, dbytes, n, *, start_pos=None, **kw):
# def __init__(self, dbytes=None, **kw):
# def read(self, dbytes, *, seek_end=True, **kw):
# def _read(self, dbytes):
# def _init_defaults(self):
# def _after_init(self):
# def write(self, dbytes, **kw):
# def visit_write(self, dbytes):
# def __repr__(self):
# def _repr_detail(self):
# def print(self, file=None, flags=(), p=None):
# def visit_print(self, p):
# def _print_type(self, p):
# def _print_offset(self, p):
# def _visit_print_data(self, p):
# def _visit_print_children(self, p):
# def base(cls, *args, **kw):
# def __init__(self, *args, **kw):
# def _init_from_args(self, *args, any_version=False, base=None, **kw):
# def __repr__(self):
# def __getitem__(self, key):
# def to_key(self, noversion=False):
# def from_key(cls, key):
# def has_version(self):
# def _read(self, dbytes):
# def _write_header(self, dbytes):
# def _print_type(self, p):
# def _print_offset(self, p):
# def __init__(self, file):
# def __repr__(self):
# def in_memory(cls):
# def from_data(cls, data):
# def from_arg(cls, arg):
# def _write_arg(cls, obj, arg, write_mode='wb'):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, exc_traceback):
# def read_bytes(self, n):
# def read_byte(self):
# def read_var_int(self):
# def read_uint(self):
# def read_int(self):
# def read_ulong(self):
# def read_long(self):
# def read_struct(self, st):
# def read_str(self):
# def read_id(self):
# def write_bytes(self, data):
# def write_uint(self, value):
# def write_int(self, value):
# def write_ulong(self, value):
# def write_long(self, value):
# def write_var_int(self, value):
# def require_equal_uint4(self, expect):
# def write_str(self, s):
# def write_id(self, id_):
# def stable_iter(self, source, *, start_pos=None):
# def gen():
# def write_size(self):
# def write_num_subsections(self):
# def write_section(self, *args, **kw):
. Output only the next line. | self.assertEqual(sec.name, None) |
Here is a snippet: <|code_start|> with p.tree_children():
p.tree_next_child()
p(f"First")
p.tree_next_child()
with p.tree_children():
p.tree_next_child()
p(f"Second")
p(f"Third")
self.assertResult("""
Root
├─ First
│ └─ Second
└─ Third
""")
def test_nested_end_empty(self):
p = self.p
p("Root")
with p.tree_children():
p.tree_next_child()
p(f"First")
p.tree_next_child()
with p.tree_children():
p.tree_next_child()
p(f"Second")
with p.tree_children():
p.tree_next_child()
self.assertResult("""
Root
└─ First
<|code_end|>
. Write the next line using the current file imports:
import unittest
from io import StringIO
from textwrap import dedent
from trampoline import trampoline
from distance.printing import PrintContext
from .common import small_stack
and context from other files:
# Path: distance/printing.py
# class PrintContext(object):
#
# """Context class for printing objects."""
#
# def __init__(self, file=sys.stdout, flags=()):
# self.file = file
# self.flags = flags
# # Data for each level of tree_children():
# # 0. Buffered lines for that level.
# # 1. Whether the child on that level has been ended
# # by a call to tree_next_child().
# # 2. Counter of remaining children if 'count' was passed to
# # tree_children() on that level.
# self._tree_data = [], [], []
#
# @classmethod
# def for_test(cls, file=None, flags=None):
# if flags is None:
# class ContainsEverything:
# def __contains__(self, obj):
# return True
# flags = ContainsEverything()
# p = cls(file=file, flags=flags)
# def print_exc(e):
# raise e
# p.print_exception = print_exc
# return p
#
# def __call__(self, text):
# buf, ended, remain = self._tree_data
# if buf:
# count = remain[-1]
# if count is not None:
# self._tree_push_up(len(buf) - 1, [text], count <= 1)
# else:
# lines = buf[-1]
# if ended[-1]:
# self._tree_push_up(len(buf) - 1, lines, False)
# lines.clear()
# lines.extend(text.split('\n'))
# else:
# f = self.file
# if f is not None:
# print(text, file=f)
#
# def _tree_push_up(self, level, lines, last):
# while True:
# if not lines:
# return
# buf, ended, remain = self._tree_data
# if level < 0:
# raise IndexError
# was_ended = ended[level]
# ended[level] = False
# if level > 0:
# upbuffer = buf[level - 1]
# push_line = upbuffer.append
# else:
# f = self.file
# def push_line(line):
# if f is not None:
# print(line, file=f)
# it = iter(lines)
# if was_ended:
# if last:
# prefix = "└─ "
# else:
# prefix = "├─ "
# push_line(prefix + next(it))
# if last:
# prefix = " "
# else:
# prefix = "│ "
# for line in it:
# push_line(prefix + line)
#
# if remain is not None:
# lines.clear()
# if level > 0 and remain[level - 1] is not None:
# # In unbuffered mode (with 'count' passed to tree_children)
# # we iterate up to the root and print everything immediately.
# level -= 1
# lines = upbuffer
# last = remain[level] <= 1
# else:
# return
#
# @contextmanager
# def tree_children(self, count=None):
# buf, ended, remain = self._tree_data
# level = len(buf)
# if remain and remain[level - 1] is None:
# # We are nested inside a tree_children() without count. Cannot
# # use unbufferd printing.
# count = None
# lines = []
# buf.append(lines)
# # When unbuffered, we start with ended state, so we get our tree
# # printed on the first nested line.
# ended.append(count is not None)
# remain.append(count)
# broken = False
# try:
# yield
# except BrokenPipeError:
# broken = True
# raise
# finally:
# ended[level] = True
# if not broken and count is None:
# self._tree_push_up(level, lines, True)
# buf.pop()
# ended.pop()
# remain.pop()
#
# def tree_next_child(self):
# buf, ended, remain = self._tree_data
# if buf:
# count = remain[-1]
# if count is not None:
# # We don't count down if nothing was printed.
# # For our tree to look correct, classes need to make sure
# # they print at least once for each child anyways.
# if not ended[-1]:
# remain[-1] = count - 1
# ended[-1] = True
# elif buf[-1]:
# ended[-1] = True
#
# def print_object(self, obj):
# obj.print(p=self)
#
# def print_exception(self, exc):
# exc_str = traceback.format_exception(type(exc), exc, exc.__traceback__)
# for part in exc_str:
# if part.endswith('\n'):
# part = part[:-1]
# for line in part.split('\n'):
# self(line)
# try:
# self(f"Exception start: 0x{exc.start_pos:08x}")
# self(f"Exception pos: 0x{exc.exc_pos:08x}")
# except AttributeError:
# pass
#
# Path: tests/common.py
# @contextmanager
# def small_stack(size):
# saved_limit = sys.getrecursionlimit()
# sys.setrecursionlimit(size + len(inspect.stack()))
# try:
# yield
# finally:
# sys.setrecursionlimit(saved_limit)
, which may include functions, classes, or code. Output only the next line. | └─ Second |
Given snippet: <|code_start|> p(f"First")
p.tree_next_child()
with p.tree_children():
p.tree_next_child()
p(f"Second")
with p.tree_children():
p.tree_next_child()
self.assertResult("""
Root
└─ First
└─ Second
""")
def test_count_unbuffered(self):
p = self.p
p("Root")
with p.tree_children(2):
p(f"First")
self.assertResult("""
Root
├─ First
""")
p.tree_next_child()
p(f"Second")
self.assertResult("""
Root
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
from io import StringIO
from textwrap import dedent
from trampoline import trampoline
from distance.printing import PrintContext
from .common import small_stack
and context:
# Path: distance/printing.py
# class PrintContext(object):
#
# """Context class for printing objects."""
#
# def __init__(self, file=sys.stdout, flags=()):
# self.file = file
# self.flags = flags
# # Data for each level of tree_children():
# # 0. Buffered lines for that level.
# # 1. Whether the child on that level has been ended
# # by a call to tree_next_child().
# # 2. Counter of remaining children if 'count' was passed to
# # tree_children() on that level.
# self._tree_data = [], [], []
#
# @classmethod
# def for_test(cls, file=None, flags=None):
# if flags is None:
# class ContainsEverything:
# def __contains__(self, obj):
# return True
# flags = ContainsEverything()
# p = cls(file=file, flags=flags)
# def print_exc(e):
# raise e
# p.print_exception = print_exc
# return p
#
# def __call__(self, text):
# buf, ended, remain = self._tree_data
# if buf:
# count = remain[-1]
# if count is not None:
# self._tree_push_up(len(buf) - 1, [text], count <= 1)
# else:
# lines = buf[-1]
# if ended[-1]:
# self._tree_push_up(len(buf) - 1, lines, False)
# lines.clear()
# lines.extend(text.split('\n'))
# else:
# f = self.file
# if f is not None:
# print(text, file=f)
#
# def _tree_push_up(self, level, lines, last):
# while True:
# if not lines:
# return
# buf, ended, remain = self._tree_data
# if level < 0:
# raise IndexError
# was_ended = ended[level]
# ended[level] = False
# if level > 0:
# upbuffer = buf[level - 1]
# push_line = upbuffer.append
# else:
# f = self.file
# def push_line(line):
# if f is not None:
# print(line, file=f)
# it = iter(lines)
# if was_ended:
# if last:
# prefix = "└─ "
# else:
# prefix = "├─ "
# push_line(prefix + next(it))
# if last:
# prefix = " "
# else:
# prefix = "│ "
# for line in it:
# push_line(prefix + line)
#
# if remain is not None:
# lines.clear()
# if level > 0 and remain[level - 1] is not None:
# # In unbuffered mode (with 'count' passed to tree_children)
# # we iterate up to the root and print everything immediately.
# level -= 1
# lines = upbuffer
# last = remain[level] <= 1
# else:
# return
#
# @contextmanager
# def tree_children(self, count=None):
# buf, ended, remain = self._tree_data
# level = len(buf)
# if remain and remain[level - 1] is None:
# # We are nested inside a tree_children() without count. Cannot
# # use unbufferd printing.
# count = None
# lines = []
# buf.append(lines)
# # When unbuffered, we start with ended state, so we get our tree
# # printed on the first nested line.
# ended.append(count is not None)
# remain.append(count)
# broken = False
# try:
# yield
# except BrokenPipeError:
# broken = True
# raise
# finally:
# ended[level] = True
# if not broken and count is None:
# self._tree_push_up(level, lines, True)
# buf.pop()
# ended.pop()
# remain.pop()
#
# def tree_next_child(self):
# buf, ended, remain = self._tree_data
# if buf:
# count = remain[-1]
# if count is not None:
# # We don't count down if nothing was printed.
# # For our tree to look correct, classes need to make sure
# # they print at least once for each child anyways.
# if not ended[-1]:
# remain[-1] = count - 1
# ended[-1] = True
# elif buf[-1]:
# ended[-1] = True
#
# def print_object(self, obj):
# obj.print(p=self)
#
# def print_exception(self, exc):
# exc_str = traceback.format_exception(type(exc), exc, exc.__traceback__)
# for part in exc_str:
# if part.endswith('\n'):
# part = part[:-1]
# for line in part.split('\n'):
# self(line)
# try:
# self(f"Exception start: 0x{exc.start_pos:08x}")
# self(f"Exception pos: 0x{exc.exc_pos:08x}")
# except AttributeError:
# pass
#
# Path: tests/common.py
# @contextmanager
# def small_stack(size):
# saved_limit = sys.getrecursionlimit()
# sys.setrecursionlimit(size + len(inspect.stack()))
# try:
# yield
# finally:
# sys.setrecursionlimit(saved_limit)
which might include code, classes, or functions. Output only the next line. | ├─ First |
Predict the next line for this snippet: <|code_start|>
class LazySequenceTest(unittest.TestCase):
def test_empty(self):
self.assertRaises(IndexError, LazySequence([], 0).__getitem__, 0)
class LazySequenceIndexTest(unittest.TestCase):
def setUp(self):
self.orig = [10, 11, 12, 13, 14]
self.iter = iter(self.orig)
self.lazy = LazySequence(self.iter, 5)
def _test_compare(self, *indices):
for index in indices:
<|code_end|>
with the help of current file imports:
import unittest
from distance.lazy import LazySequence, LazyMappedSequence
and context from other files:
# Path: distance/lazy.py
# class LazySequence(BaseLazySequence):
#
# """Lazy sequence using an iterator as source.
#
# If the iterator stops, the reported length of this sequence is adjusted
# to the number of values yielded up to that point.
#
# This affects indexing operations, and can result in IndexErrors for ranges
# that are within the length reported before the iterator stopped.
#
# Conversely, if the iterator yields more values, these values may be
# accessed by iterating this sequence or by indexing beyond the reported
# length.
#
# """
#
# __slots__ = ('_iterator', '_len', '_list')
#
# def __init__(self, source, length):
# self._iterator = iter(source)
# self._len = length
# self._list = []
#
# def __len__(self):
# return self._len
#
# def __repr__(self):
# l = self._list
# mylen = self._len
# curlen = len(l)
# if curlen != mylen:
# l = self._list
# remaining = mylen - curlen
# return f"<lazy seq {l!r}{remaining:+}>"
# else:
# return f"<lazy seq {l!r}>"
#
# def __iter__(self):
#
# """Iterate this sequence.
#
# May yield more or less values than the reported length of this
# sequence. Iteration is only stopped when the wrapped iterator exits.
#
# """
#
# iterator = self._iterator
# l = self._list
# if iterator is None:
# yield from l
# return
# i = 0
# try:
# while True:
# try:
# yield l[i]
# except IndexError:
# v = next(iterator)
# l.append(v)
# yield v
# i += 1
# except StopIteration:
# # reached the real end of the iterator
# self._iterator = None
# self._len = i
#
# def _inflate_slice(self, len_, start, stop, stride):
# l = self._list
# current = len(l)
# needed = stop - current
# if needed <= 0:
# return len_
# iterator = self._iterator
# if iterator is None:
# return len_
# if needed == 1:
# # optimize single element inflation
# try:
# l.append(next(iterator))
# return len_
# except StopIteration:
# pass # iterator ended early; fall through
# else:
# l.extend(islice(iterator, needed))
# current = len(l)
# if stop - 1 < current:
# return len_
# # iterator ended earlier than the reported length.
# # Try to patch our length and hope no one notices.
# self._iterator = None
# self._len = current
# return current
#
# class LazyMappedSequence(BaseLazySequence):
#
# """Lazy sequence yielding content of a sequence mapped by a function.
#
# The function is only called the first time an element is accessed.
#
# """
#
# __slots__ = ('_source', '_func', '_list')
#
# @staticmethod
# def peek(obj, index):
# """Access given sequence without evaluating any additional values.
#
# If `obj` is a LazyMappedSequence, no new values are evaluated, and
# `lazy.UNSET` is returned in their place.
#
# For any other object, this is a regular indexing operation.
# """
# if isinstance(obj, LazyMappedSequence):
# return obj._list[index]
# return obj[index]
#
# def __init__(self, source, func):
# self._source = source
# self._func = func
# self._list = [UNSET] * len(source)
#
# def __len__(self):
# return len(self._list)
#
# def __repr__(self):
# s = ', '.join('…' if i is UNSET else repr(i) for i in self._list)
# return f"<lazy map [{s}]>"
#
# def __iter__(self):
# l = self._list
# source = self._source
# if source is None:
# yield from l
# return
# func = self._func
# i = 0
# try:
# for v in l:
# if v is UNSET:
# v = func(source[i])
# l[i] = v
# yield v
# i += 1
# except IndexError:
# del l[i:]
# # All entries are now inflated.
# self._source = None
#
# def _inflate_slice(self, len_, start, stop, stride):
# try:
# l = self._list
# if start == stop - 1:
# # optimize single element access
# elem = l[start]
# if elem is UNSET:
# l[start] = self._func(self._source[start])
# else:
# source = self._source
# func = self._func
# for i in range(start, stop, stride):
# elem = l[i]
# if elem is UNSET:
# l[i] = func(source[i])
# return len_
# except IndexError:
# # source decided it's actually shorter.
# newlen = len(self._source)
# del l[newlen:]
# return newlen
, which may contain function names, class names, or code. Output only the next line. | with self.subTest(index=index): |
Given the code snippet: <|code_start|>
class LazySequenceTest(unittest.TestCase):
def test_empty(self):
self.assertRaises(IndexError, LazySequence([], 0).__getitem__, 0)
class LazySequenceIndexTest(unittest.TestCase):
def setUp(self):
self.orig = [10, 11, 12, 13, 14]
self.iter = iter(self.orig)
self.lazy = LazySequence(self.iter, 5)
def _test_compare(self, *indices):
for index in indices:
with self.subTest(index=index):
<|code_end|>
, generate the next line using the imports in this file:
import unittest
from distance.lazy import LazySequence, LazyMappedSequence
and context (functions, classes, or occasionally code) from other files:
# Path: distance/lazy.py
# class LazySequence(BaseLazySequence):
#
# """Lazy sequence using an iterator as source.
#
# If the iterator stops, the reported length of this sequence is adjusted
# to the number of values yielded up to that point.
#
# This affects indexing operations, and can result in IndexErrors for ranges
# that are within the length reported before the iterator stopped.
#
# Conversely, if the iterator yields more values, these values may be
# accessed by iterating this sequence or by indexing beyond the reported
# length.
#
# """
#
# __slots__ = ('_iterator', '_len', '_list')
#
# def __init__(self, source, length):
# self._iterator = iter(source)
# self._len = length
# self._list = []
#
# def __len__(self):
# return self._len
#
# def __repr__(self):
# l = self._list
# mylen = self._len
# curlen = len(l)
# if curlen != mylen:
# l = self._list
# remaining = mylen - curlen
# return f"<lazy seq {l!r}{remaining:+}>"
# else:
# return f"<lazy seq {l!r}>"
#
# def __iter__(self):
#
# """Iterate this sequence.
#
# May yield more or less values than the reported length of this
# sequence. Iteration is only stopped when the wrapped iterator exits.
#
# """
#
# iterator = self._iterator
# l = self._list
# if iterator is None:
# yield from l
# return
# i = 0
# try:
# while True:
# try:
# yield l[i]
# except IndexError:
# v = next(iterator)
# l.append(v)
# yield v
# i += 1
# except StopIteration:
# # reached the real end of the iterator
# self._iterator = None
# self._len = i
#
# def _inflate_slice(self, len_, start, stop, stride):
# l = self._list
# current = len(l)
# needed = stop - current
# if needed <= 0:
# return len_
# iterator = self._iterator
# if iterator is None:
# return len_
# if needed == 1:
# # optimize single element inflation
# try:
# l.append(next(iterator))
# return len_
# except StopIteration:
# pass # iterator ended early; fall through
# else:
# l.extend(islice(iterator, needed))
# current = len(l)
# if stop - 1 < current:
# return len_
# # iterator ended earlier than the reported length.
# # Try to patch our length and hope no one notices.
# self._iterator = None
# self._len = current
# return current
#
# class LazyMappedSequence(BaseLazySequence):
#
# """Lazy sequence yielding content of a sequence mapped by a function.
#
# The function is only called the first time an element is accessed.
#
# """
#
# __slots__ = ('_source', '_func', '_list')
#
# @staticmethod
# def peek(obj, index):
# """Access given sequence without evaluating any additional values.
#
# If `obj` is a LazyMappedSequence, no new values are evaluated, and
# `lazy.UNSET` is returned in their place.
#
# For any other object, this is a regular indexing operation.
# """
# if isinstance(obj, LazyMappedSequence):
# return obj._list[index]
# return obj[index]
#
# def __init__(self, source, func):
# self._source = source
# self._func = func
# self._list = [UNSET] * len(source)
#
# def __len__(self):
# return len(self._list)
#
# def __repr__(self):
# s = ', '.join('…' if i is UNSET else repr(i) for i in self._list)
# return f"<lazy map [{s}]>"
#
# def __iter__(self):
# l = self._list
# source = self._source
# if source is None:
# yield from l
# return
# func = self._func
# i = 0
# try:
# for v in l:
# if v is UNSET:
# v = func(source[i])
# l[i] = v
# yield v
# i += 1
# except IndexError:
# del l[i:]
# # All entries are now inflated.
# self._source = None
#
# def _inflate_slice(self, len_, start, stop, stride):
# try:
# l = self._list
# if start == stop - 1:
# # optimize single element access
# elem = l[start]
# if elem is UNSET:
# l[start] = self._func(self._source[start])
# else:
# source = self._source
# func = self._func
# for i in range(start, stop, stride):
# elem = l[i]
# if elem is UNSET:
# l[i] = func(source[i])
# return len_
# except IndexError:
# # source decided it's actually shorter.
# newlen = len(self._source)
# del l[newlen:]
# return newlen
. Output only the next line. | self.assertEqual(self.orig.__getitem__(index), |
Predict the next line after this snippet: <|code_start|>class Base(object):
class WriteReadTest(common.WriteReadTest):
read_obj = Level
class StraightroadTest(Base.WriteReadTest):
filename = "tests/in/level/test-straightroad.bytes"
def verify_obj(self, level):
self.assertEqual("Test-straightroad", level.name)
self.assertEqual(6, len([o for l in level.layers for o in l.objects]))
class StraightroadV25Test(Base.WriteReadTest):
filename = "tests/in/level/test straightroad v25.bytes"
def verify_obj(self, level):
self.assertEqual("Test-straightroad v25", level.name)
self.assertEqual(2, len(level.layers))
self.assertEqual(5, len(level.layers[0].objects))
self.assertEqual(1, len(level.layers[1].objects))
class StraightroadV26AuthorTest(Base.WriteReadTest):
<|code_end|>
using the current file's imports:
from distance import Level
from tests import common
and any relevant context from other files:
# Path: distance/_level.py
# class Level(Fragment):
#
# class_tag = 'Level'
# default_container = Section(Magic[9])
#
# layers = ()
# name = None
# version = 3
#
# def _read_section_data(self, dbytes, sec):
# if sec.magic != Magic[9]:
# raise ValueError(f"Unexpected section: {sec.magic}")
# self.name = sec.name
# self.version = sec.version
#
# num_layers = sec.count
#
# self.content = self.classes.level_content.lazy_n_maybe(
# dbytes, num_layers + 1)
# if num_layers:
# self.layers = LazySequence(
# (obj for obj in self.content if obj.class_tag == 'Layer'),
# num_layers)
#
# def _get_write_section(self, sec):
# return Section(Magic[9], self.name, len(self.layers), self.version)
#
# def _visit_write_section_data(self, dbytes, sec):
# if sec.magic != Magic[9]:
# raise ValueError(f"Unexpected section: {sec.magic}")
# for obj in self.content:
# yield obj.visit_write(dbytes)
#
# @property
# def settings(self):
# try:
# return self._settings
# except AttributeError:
# for obj in self.content:
# if obj.class_tag != 'Layer':
# s = obj
# break
# else:
# s = None
# self._settings = s
# return s
#
# @settings.setter
# def settings(self, s):
# self._settings = s
#
# def _repr_detail(self):
# supstr = super()._repr_detail()
# if self.name:
# return f" {self.name!r}{supstr}"
# return supstr
#
# def visit_print(self, p):
# with need_counters(p) as counters:
# yield super().visit_print(p)
# if counters:
# counters.print(p)
#
# def _print_type(self, p):
# p(f"Level: {self.name!r} version {self.version}")
#
# def _visit_print_data(self, p):
# yield super()._visit_print_data(p)
# p(f"Level name: {self.name!r}")
#
# def _visit_print_children(self, p):
# if self.settings is not None:
# with p.tree_children(1):
# yield self.settings.visit_print(p)
# for layer in self.layers:
# yield layer.visit_print(p)
. Output only the next line. | filename = "tests/in/level/test straightroad v26 author.bytes" |
Given snippet: <|code_start|>
"""Converts the given vetices representing a right triangle to a
transform for a WedgeGS GoldenSimple."""
pr, pa, pb = verts
rot = rangle_to_vers(pa - pr, pb - pr)
pos = (pa + pb) / 2
scale = [1e-5, length(pr - pb) / SIMPLE_SIZE, length(pr - pa) / SIMPLE_SIZE]
return pos, convquat(rot), scale
def create_two_wedges(pmax, pnext, plast, objs, simple_args={}):
"""Creates two wedges for an arbitrary triangle.
pmax - the vertex with the greatest angle
pnext, plast - the remaining vertices
objs - the list to put the objects into
simple_args - args to pass to GoldenSimple"""
vnm = pmax - pnext
vnl = plast - pnext
vr = dot(vnm, vnl) / length(vnl) * normalized(vnl)
pr = vr + pnext
transform = rtri_to_transform(np.array([pr, pmax, pnext]))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np, quaternion
import numpy as np
from distance.classes import DefaultClasses
from numpy import sin, cos, arctan2
from numpy import dot
from numpy import pi
and context:
# Path: distance/classes.py
# class ProbeError(Exception):
# class RegisterError(ValueError):
# class AutoloadError(Exception):
# class TagError(LookupError):
# class ClassLookupError(LookupError):
# class ClassCollector(object):
# class _BaseProber(object):
# class ClassCollection(_BaseProber, ClassCollector):
# class CompositeProber(_BaseProber):
# class CollectorGroup(object):
# class ClassesRegistry(object):
# class _InstanceFactory(object):
# def check(cls, tag):
# def __init__(self, tag):
# def __init__(self, *args, **kw):
# def fragment_property(tag, name, default=None, doc=None):
# def fget(self):
# def fset(self, value):
# def __init__(self, **kw):
# def add_fragment(self, cls, *args,
# any_version=False, versions=None, **kw):
# def add_object(self, type, cls):
# def add_info(self, *args, tag=None):
# def decorate(cls):
# def add_tag(self, tag, *args, **kw):
# def _add_fragment_for_section(self, cls, sec, any_version):
# def _add_info(self, tag, cls=None, container=None, versions=None):
# def object(self, *args):
# def decorate(cls):
# def fragment(self, *args, **kw):
# def decorate(cls):
# def __init__(self, *, baseclass=None, probe_baseclass=None, **kw):
# def _probe_section_key(self, key):
# def _probe_fallback(self, sec):
# def probe_section(self, sec):
# def probe(self, dbytes, *, probe_section=None):
# def read(self, dbytes, *, probe_section=None, **kw):
# def maybe(self, dbytes, *, probe_section=None, **kw):
# def iter_n_maybe(self, dbytes, n, **kw):
# def lazy_n_maybe(self, dbytes, n, *, start_pos=None, **kw):
# def __init__(self, *, key=None, get_fallback_container=None, **kw):
# def add_func(self, func, tag):
# def func(self, tag):
# def decorate(func):
# def _probe_section_key(self, key):
# def _probe_fallback(self, sec):
# def get_base_key(self, tag):
# def get_tag(self, section):
# def _get_tag_impl_info(self, tag):
# def fragment_attrs(self, *tags):
# def decorate(cls):
# def __klass(self, tag, version, fallback):
# def klass(self, tag, *, version=None):
# def factory(self, tag, *, version=None, fallback=None):
# def create(self, tag, *, fallback=None, **kw):
# def is_section_interesting(self, sec):
# def print_listing(self, file=None, *, tag=None, p=None):
# def print_info(tag, info):
# def _load_autoload_content(self, content):
# def _generate_autoload_content(self):
# def _get_current_autoload_content(self):
# def _autoload_impl_module(self, sec_key, impl_module):
# def _load_impl(self, coll, update_classes):
# def __init__(self, *, probers=None, **kw):
# def _probe_section_key(self, key):
# def _probe_fallback(self, sec):
# def print_listing(self, file=None, *, p=None):
# def __init__(self):
# def __getattr__(self, name):
# def __init__(self):
# def init_category(self, key, **kw):
# def init_composite(self, key, keys, **kw):
# def get_category(self, key):
# def print_listing(self, file=None, *, p=None, print_classes=True):
# def autoload_modules(self, module_name, impl_modules):
# def write_autoload_module(self, module_name):
# def _verify_autoload(self):
# def copy(self, **overrides):
# def __init__(self, cls, container):
# def __call__(self, **kw):
# def _get_klass_def(info, version):
# def _update_class_info(target, other):
# def _merge_class_info(classes, tag, info):
# def _merged_info(tag, prev, new):
# def _load_autoload_module(colls, module_name):
# def _load_impls_to_colls(colls, impl_modules):
which might include code, classes, or functions. Output only the next line. | objs.append(_mkwedge(transform=transform, **simple_args)) |
Given snippet: <|code_start|>
def mkargs(maxrecurse=-1, collision=True, color=True, debug=False):
return Namespace(**locals())
class UnkillTest(unittest.TestCase):
def test_replace(self):
l = Level("tests/in/level/finite grids.bytes")
f = UnkillFilter(mkargs())
f.apply(l)
types = [o.type for ly in l.layers for o in ly.objects]
self.assertEqual(5, len(types))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from argparse import Namespace
from distance import Level
from distance.filter import UnkillFilter
import unittest
and context:
# Path: distance/_level.py
# class Level(Fragment):
#
# class_tag = 'Level'
# default_container = Section(Magic[9])
#
# layers = ()
# name = None
# version = 3
#
# def _read_section_data(self, dbytes, sec):
# if sec.magic != Magic[9]:
# raise ValueError(f"Unexpected section: {sec.magic}")
# self.name = sec.name
# self.version = sec.version
#
# num_layers = sec.count
#
# self.content = self.classes.level_content.lazy_n_maybe(
# dbytes, num_layers + 1)
# if num_layers:
# self.layers = LazySequence(
# (obj for obj in self.content if obj.class_tag == 'Layer'),
# num_layers)
#
# def _get_write_section(self, sec):
# return Section(Magic[9], self.name, len(self.layers), self.version)
#
# def _visit_write_section_data(self, dbytes, sec):
# if sec.magic != Magic[9]:
# raise ValueError(f"Unexpected section: {sec.magic}")
# for obj in self.content:
# yield obj.visit_write(dbytes)
#
# @property
# def settings(self):
# try:
# return self._settings
# except AttributeError:
# for obj in self.content:
# if obj.class_tag != 'Layer':
# s = obj
# break
# else:
# s = None
# self._settings = s
# return s
#
# @settings.setter
# def settings(self, s):
# self._settings = s
#
# def _repr_detail(self):
# supstr = super()._repr_detail()
# if self.name:
# return f" {self.name!r}{supstr}"
# return supstr
#
# def visit_print(self, p):
# with need_counters(p) as counters:
# yield super().visit_print(p)
# if counters:
# counters.print(p)
#
# def _print_type(self, p):
# p(f"Level: {self.name!r} version {self.version}")
#
# def _visit_print_data(self, p):
# yield super()._visit_print_data(p)
# p(f"Level name: {self.name!r}")
#
# def _visit_print_children(self, p):
# if self.settings is not None:
# with p.tree_children(1):
# yield self.settings.visit_print(p)
# for layer in self.layers:
# yield layer.visit_print(p)
#
# Path: distance/filter/unkill.py
# class UnkillFilter(ObjectFilter):
#
# @classmethod
# def add_args(cls, parser):
# super().add_args(parser)
# parser.add_argument(":debug", action='store_true')
# parser.add_argument(":nocolor", dest='color',
# action='store_false',
# help="Use default kill grid color.")
# parser.add_argument(":color", dest='color',
# action='store_true',
# help="Copy kill grid color (default).")
# parser.add_argument(":collision", dest='collision',
# action='store_true', default=True,
# help="Enable simples collision (default).")
# parser.add_argument(":nocollision", dest='collision',
# action='store_false',
# help="Disable simples collision.")
#
# def __init__(self, args):
# super().__init__(args)
# self.collision = args.collision
# self.debug = args.debug
# self.color = args.color
# self.num_replaced = 0
#
# def filter_object(self, obj):
# try:
# mapper = KILLGRID_MAPPERS[obj.type]
# except KeyError:
# return obj,
# try:
# result = mapper.apply(obj, collision=self.collision,
# copy_color=self.color)
# except DoNotApply:
# return obj,
# self.num_replaced += 1
# if self.debug:
# return tuple(result) + (obj,)
# return result
#
# def print_summary(self, p):
# p(f"Replaced kill grids: {self.num_replaced}")
which might include code, classes, or functions. Output only the next line. | self.assertFalse(any('Kill' in t for t in types)) |
Predict the next line for this snippet: <|code_start|>
Classes = CollectorGroup()
@Classes.fragments.fragment
class SetAbilitiesTriggerFragment(BaseConstructFragment):
base_container = Section.base(Magic[2], 0xad)
<|code_end|>
with the help of current file imports:
from distance.bytes import Section, Magic
from distance.construct import (
BaseConstructFragment,
Byte, Float, DstString,
Struct, Default,
)
from distance.classes import CollectorGroup
and context from other files:
# Path: distance/bytes.py
# UTF_16_DECODE = codecs.getdecoder('utf-16-le')
# UTF_16_ENCODE = codecs.getencoder('utf-16-le')
# S_COLOR_RGBA = Struct("<4f")
# S_FLOAT = Struct("<f")
# S_DOUBLE = Struct("<d")
# S_FLOAT3 = Struct("<fff")
# S_FLOAT4 = Struct("<ffff")
# S_BYTE = Struct('b')
# S_INT = Struct('<i')
# S_LONG = Struct('<q')
# S_UINT = Struct('<I')
# S_ULONG = Struct('<Q')
# S_UINT2 = Struct("<II")
# S_UINT3 = Struct("<III")
# SKIP_BYTES = b'\xFD\xFF\xFF\x7F'
# MAGIC_1 = 11111111
# MAGIC_2 = 22222222
# MAGIC_3 = 33333333
# MAGIC_5 = 55555555
# MAGIC_6 = 66666666
# MAGIC_7 = 77777777
# MAGIC_8 = 88888888
# MAGIC_9 = 99999999
# MAGIC_12 = 12121212
# MAGIC_32 = 32323232
# CATCH_EXCEPTIONS = (ValueError, EOFError)
# S_SEC_BASE = Struct("<IQ")
# MIN_SIZE = 12 # 4b (magic) + 8b (data_size)
# class ErrorPosition(namedtuple('_ErrorPosition', ['start', 'error'])):
# class BytesModel(object):
# class Section(BytesModel):
# class DstBytes(object):
# def __repr__(self):
# def get(cls, ex):
# def first(cls, ex):
# def maybe(cls, dbytes, **kw):
# def iter_n_maybe(cls, dbytes, n, **kw):
# def lazy_n_maybe(cls, dbytes, n, *, start_pos=None, **kw):
# def __init__(self, dbytes=None, **kw):
# def read(self, dbytes, *, seek_end=True, **kw):
# def _read(self, dbytes):
# def _init_defaults(self):
# def _after_init(self):
# def write(self, dbytes, **kw):
# def visit_write(self, dbytes):
# def __repr__(self):
# def _repr_detail(self):
# def print(self, file=None, flags=(), p=None):
# def visit_print(self, p):
# def _print_type(self, p):
# def _print_offset(self, p):
# def _visit_print_data(self, p):
# def _visit_print_children(self, p):
# def base(cls, *args, **kw):
# def __init__(self, *args, **kw):
# def _init_from_args(self, *args, any_version=False, base=None, **kw):
# def __repr__(self):
# def __getitem__(self, key):
# def to_key(self, noversion=False):
# def from_key(cls, key):
# def has_version(self):
# def _read(self, dbytes):
# def _write_header(self, dbytes):
# def _print_type(self, p):
# def _print_offset(self, p):
# def __init__(self, file):
# def __repr__(self):
# def in_memory(cls):
# def from_data(cls, data):
# def from_arg(cls, arg):
# def _write_arg(cls, obj, arg, write_mode='wb'):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, exc_traceback):
# def read_bytes(self, n):
# def read_byte(self):
# def read_var_int(self):
# def read_uint(self):
# def read_int(self):
# def read_ulong(self):
# def read_long(self):
# def read_struct(self, st):
# def read_str(self):
# def read_id(self):
# def write_bytes(self, data):
# def write_uint(self, value):
# def write_int(self, value):
# def write_ulong(self, value):
# def write_long(self, value):
# def write_var_int(self, value):
# def require_equal_uint4(self, expect):
# def write_str(self, s):
# def write_id(self, id_):
# def stable_iter(self, source, *, start_pos=None):
# def gen():
# def write_size(self):
# def write_num_subsections(self):
# def write_section(self, *args, **kw):
#
# Path: distance/construct.py
# def DstOptional(subcon, otherwise=None):
# def _get_subcons(con):
# def __init__(cls, name, bases, dct):
# def _init_defaults(self):
# def _clone_data(self, new):
# def _read_section_data(self, dbytes, sec):
# def _write_section_data(self, dbytes, sec):
# def _visit_print_data(self, p):
# def construct_property(cls, name, doc=None):
# def fget(self):
# def fset(self, value):
# def fdel(self):
# def ExposeConstructFields(target=None, only=None):
# def decorate(target):
# def pop_name(con):
# class ConstructMeta(type):
# class BaseConstructFragment(Fragment, metaclass=ConstructMeta):
#
# Path: distance/classes.py
# class CollectorGroup(object):
#
# """A group of ClassCollector.
#
# Used to register classes of modules for later collection by a
# ClassesRegistry.
#
# Categories are accessed via attribute access. New categories are created on
# first access.
#
# """
#
# def __init__(self):
# self._colls = {}
#
# def __getattr__(self, name):
# try:
# return self._colls[name]
# except KeyError:
# coll = ClassCollector()
# self._colls[name] = coll
# setattr(self, name, coll)
# return coll
, which may contain function names, class names, or code. Output only the next line. | container_versions = 7 |
Based on the snippet: <|code_start|> for key, value in page.items():
setattr(self, key, value)
self.md_content = self.content
self.content = markdown(self.content)
return None
def attached_files(self):
files = query_db('SELECT id FROM uploads WHERE wiki_id = ?', [self.id])
if files == None:
return False
else:
files_list = []
for file in files:
files_list.append( File(file['id']) )
return files_list
class File:
def __init__(self, id):
file = query_db('SELECT * FROM uploads WHERE id = ?', [id], one=True)
for key, value in file.items():
setattr(self, key, value)
return None
class Message:
def __init__(self, id):
message = query_db('SELECT * FROM messages WHERE id = ?', [id], one=True)
self.message = message['message']
self.author = User(message['author'])
self.datetime = datetime.strptime(message['date'], '%Y-%m-%d %H:%M:%S.%f')
return None
<|code_end|>
, predict the immediate next line with the help of imports:
import sqlite3
import arrow
from roommates.helpers import query_db
from datetime import datetime
from markdown import markdown
and context (classes, functions, sometimes code) from other files:
# Path: roommates/helpers.py
# def query_db(query, args=(), one=False):
# cur = g.db.execute(query, args)
# rv = [dict((cur.description[idx][0], value) for idx, value in enumerate(row)) for row in cur.fetchall()]
# return (rv[0] if rv else None) if one else rv
. Output only the next line. | def __str__(self): |
Next line prediction: <|code_start|>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#taken from Pegasos algorithm by avaitla
def SGD(a, lab, Q, lr):
for i in range(20):
iterations = 1
for tau in range(len(a)):
if a[tau] > 0:
wx = a @ Q[:,tau]
a[tau] *= (1 - 1/iterations)
if(lab[tau]*wx < 1):
a[tau] += lab[tau]/(lr * iterations)
iterations += 1
return a
def attention_sgd(x,y,a=None):
if a == None:
a = np.zeros(x.shape[1])
else:
a = np.resize(a,x.shape[1])
<|code_end|>
. Use current file imports:
(from .subproblem import sigmoid, attention,s
import numpy as np)
and context including class names, function names, or small code snippets from other files:
# Path: gradients/subproblem.py
# def attention(Q, K, V):
. Output only the next line. | for i in range(5): |
Next line prediction: <|code_start|>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#taken from Pegasos algorithm by avaitla
def SGD(a, lab, Q, lr):
for i in range(20):
iterations = 1
for tau in range(len(a)):
if a[tau] > 0:
wx = a @ Q[:,tau]
a[tau] *= (1 - 1/iterations)
if(lab[tau]*wx < 1):
a[tau] += lab[tau]/(lr * iterations)
iterations += 1
return a
def attention_sgd(x,y,a=None):
if a == None:
a = np.zeros(x.shape[1])
else:
a = np.resize(a,x.shape[1])
for i in range(5):
for tau in range(x.shape[1]):
gh = abs(1-np.min(attention(np.mat(x),np.mat(y),a[tau]))) * np.gradient(x.T*a[tau],axis=0)[tau]
et = sigmoid(np.array(gh))
s_max = 1.5
s1 = s(1.5,tau,x.shape[1])
ql =(s_max * np.abs(np.cosh(s1 * et)+1)) / (s1*np.abs(np.cosh(s1 * et)+1))
<|code_end|>
. Use current file imports:
(from .subproblem import sigmoid, attention,s
import numpy as np)
and context including class names, function names, or small code snippets from other files:
# Path: gradients/subproblem.py
# def attention(Q, K, V):
. Output only the next line. | a[tau] -= ql |
Given snippet: <|code_start|>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#taken from Pegasos algorithm by avaitla
def SGD(a, lab, Q, lr):
for i in range(20):
iterations = 1
for tau in range(len(a)):
if a[tau] > 0:
wx = a @ Q[:,tau]
a[tau] *= (1 - 1/iterations)
if(lab[tau]*wx < 1):
a[tau] += lab[tau]/(lr * iterations)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .subproblem import sigmoid, attention,s
import numpy as np
and context:
# Path: gradients/subproblem.py
# def attention(Q, K, V):
which might include code, classes, or functions. Output only the next line. | iterations += 1 |
Given the code snippet: <|code_start|>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#taken from Pegasos algorithm by avaitla
def SGD(a, lab, Q, lr):
for i in range(20):
iterations = 1
for tau in range(len(a)):
if a[tau] > 0:
wx = a @ Q[:,tau]
a[tau] *= (1 - 1/iterations)
if(lab[tau]*wx < 1):
a[tau] += lab[tau]/(lr * iterations)
iterations += 1
return a
def attention_sgd(x,y,a=None):
if a == None:
a = np.zeros(x.shape[1])
else:
a = np.resize(a,x.shape[1])
for i in range(5):
for tau in range(x.shape[1]):
gh = abs(1-np.min(attention(np.mat(x),np.mat(y),a[tau]))) * np.gradient(x.T*a[tau],axis=0)[tau]
s = lambda smax, b, B: 1/smax+(smax-(1/smax))*((b-1)/(B-1))
et = sigmoid(np.array(gh))
<|code_end|>
, generate the next line using the imports in this file:
from .subproblem import sigmoid, attention
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: apicultor/gradients/subproblem.py
# def attention(Q, K, V):
. Output only the next line. | s_max = 1.5 |
Next line prediction: <|code_start|>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#taken from Pegasos algorithm by avaitla
def SGD(a, lab, Q, lr):
for i in range(20):
iterations = 1
for tau in range(len(a)):
if a[tau] > 0:
wx = a @ Q[:,tau]
a[tau] *= (1 - 1/iterations)
if(lab[tau]*wx < 1):
a[tau] += lab[tau]/(lr * iterations)
iterations += 1
<|code_end|>
. Use current file imports:
(from .subproblem import sigmoid, attention
import numpy as np)
and context including class names, function names, or small code snippets from other files:
# Path: apicultor/gradients/subproblem.py
# def attention(Q, K, V):
. Output only the next line. | return a |
Given snippet: <|code_start|>#hiss removal (a noise reduction algorithm working on signal samples to reduce its hissings)
def hiss_removal(audio):
pend = len(audio)-(4410+1102)
song = sonify(audio, 44100)
song.FrameGenerator().__next__()
song.window()
song.Spectrum()
noise_fft = song.fft(song.windowed_x)[:song.H+1]
noise_power = np.log10(np.abs(noise_fft + 2 ** -16))
noise_floor = np.exp(2.0 * noise_power.mean())
mn = song.magnitude_spectrum
e_n = energy(mn)
pin = 0
output = np.zeros(len(audio))
hold_time = 0
ca = 0
cr = 0
amp = audio.max()
while pin < pend:
selection = pin+2048
song.frame = audio[pin:selection]
song.window()
song.M = 2048
song.Spectrum()
e_m = energy(song.magnitude_spectrum)
SNR = 10 * np.log10(e_m / e_n)
ft = song.fft(song.windowed_x)[:song.H+1]
power_spectral_density = np.abs(ft) ** 2
song.Envelope()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from ..constraints.dynamic_range import dyn_constraint_satis
from ..utils.algorithms import *
from ..sonification.Sonification import normalize, write_file
from scipy.fftpack import fft, ifft, fftfreq
from scipy.signal import lfilter, fftconvolve, firwin
from soundfile import read
import numpy as np
import os
import sys
import logging
and context:
# Path: apicultor/constraints/dynamic_range.py
# def dyn_constraint_satis(audio, variables, gain):
# assert ((type(variables) is list) and len(variables) == 2)
# audio[variables[0] < variables[1]] = gain #noise gating: anything below a threshold is silenced
# audio[variables[0] > variables[1]] = 1
# return audio
#
# Path: apicultor/sonification/Sonification.py
# def normalize(signal):
# maximum_normalizing = np.max(np.abs(signal))/-1
# normalized = np.true_divide(signal,maximum_normalizing)
# return normalized
#
# def write_file(filename, fs, data):
# write(filename+'.wav', fs, data)
# call(['ffmpeg', '-i', filename+'.wav', filename+'.ogg', '-y'])
# call(['rm', '-f', filename+'.wav'])
which might include code, classes, or functions. Output only the next line. | song.AttackTime() |
Given the code snippet: <|code_start|> h_sig_L = lfilter(left.flatten(), 1., audio)
h_sig_R = lfilter(right.flatten(), 1., audio)
del hissless
result = np.float32([h_sig_L, h_sig_R]).T
neg_angle = result[:,(1,0)]
panned = result + neg_angle
normalized = normalize(panned)
del normalized
audio = mono_stereo(audio)
print(( "Rewriting without aliasing in %s"%f ))
song = sonify(audio, 44100)
audio = song.IIR(audio, 44100/2, 'lowpass') #anti-aliasing filtering: erase frequencies higher than the sample rate being used
print(( "Rewriting without DC in %s"%f ))
audio = song.IIR(audio, 40, 'highpass') #remove direct current on audio signal
print(( "Rewriting with Equal Loudness contour in %s"%f ))
audio = song.EqualLoudness(audio) #Equal-Loudness Contour
print(( "Rewriting with RIAA filter applied in %s"%f ))
riaa_filtered = biquad_filter(audio, abz) #riaa filter
del audio
normalized_riaa = normalize(riaa_filtered)
del riaa_filtered
print(( "Rewriting with Hum removal applied in %s"%f ))
song.signal = np.float32(normalized_riaa)
without_hum = song.BandReject(np.float32(normalized_riaa), 50, 16) #remove undesired 50 hz hum
del normalized_riaa
print(( "Rewriting with subsonic rumble removal applied in %s"%f ))
song.signal = without_hum
without_rumble = song.IIR(song.signal, 20, 'highpass') #remove subsonic rumble
del without_hum
db_mag = 20 * np.log10(abs(without_rumble)) #calculate silence if present in audio signal
<|code_end|>
, generate the next line using the imports in this file:
from ..constraints.dynamic_range import dyn_constraint_satis
from ..utils.algorithms import *
from ..sonification.Sonification import normalize, write_file
from scipy.fftpack import fft, ifft, fftfreq
from scipy.signal import lfilter, fftconvolve, firwin
from soundfile import read
import numpy as np
import os
import sys
import logging
and context (functions, classes, or occasionally code) from other files:
# Path: apicultor/constraints/dynamic_range.py
# def dyn_constraint_satis(audio, variables, gain):
# assert ((type(variables) is list) and len(variables) == 2)
# audio[variables[0] < variables[1]] = gain #noise gating: anything below a threshold is silenced
# audio[variables[0] > variables[1]] = 1
# return audio
#
# Path: apicultor/sonification/Sonification.py
# def normalize(signal):
# maximum_normalizing = np.max(np.abs(signal))/-1
# normalized = np.true_divide(signal,maximum_normalizing)
# return normalized
#
# def write_file(filename, fs, data):
# write(filename+'.wav', fs, data)
# call(['ffmpeg', '-i', filename+'.wav', filename+'.ogg', '-y'])
# call(['rm', '-f', filename+'.wav'])
. Output only the next line. | print(( "Rewriting without silence in %s"%f )) |
Based on the snippet: <|code_start|> del hissless
result = np.float32([h_sig_L, h_sig_R]).T
neg_angle = result[:,(1,0)]
panned = result + neg_angle
normalized = normalize(panned)
del normalized
audio = mono_stereo(audio)
print(( "Rewriting without aliasing in %s"%f ))
song = sonify(audio, 44100)
audio = song.IIR(audio, 44100/2, 'lowpass') #anti-aliasing filtering: erase frequencies higher than the sample rate being used
print(( "Rewriting without DC in %s"%f ))
audio = song.IIR(audio, 40, 'highpass') #remove direct current on audio signal
print(( "Rewriting with Equal Loudness contour in %s"%f ))
audio = song.EqualLoudness(audio) #Equal-Loudness Contour
print(( "Rewriting with RIAA filter applied in %s"%f ))
riaa_filtered = biquad_filter(audio, abz) #riaa filter
del audio
normalized_riaa = normalize(riaa_filtered)
del riaa_filtered
print(( "Rewriting with Hum removal applied in %s"%f ))
song.signal = np.float32(normalized_riaa)
without_hum = song.BandReject(np.float32(normalized_riaa), 50, 16) #remove undesired 50 hz hum
del normalized_riaa
print(( "Rewriting with subsonic rumble removal applied in %s"%f ))
song.signal = without_hum
without_rumble = song.IIR(song.signal, 20, 'highpass') #remove subsonic rumble
del without_hum
db_mag = 20 * np.log10(abs(without_rumble)) #calculate silence if present in audio signal
print(( "Rewriting without silence in %s"%f ))
silence_threshold = -130 #complete silence
<|code_end|>
, predict the immediate next line with the help of imports:
from ..constraints.dynamic_range import dyn_constraint_satis
from ..utils.algorithms import *
from ..sonification.Sonification import normalize, write_file
from scipy.fftpack import fft, ifft, fftfreq
from scipy.signal import lfilter, fftconvolve, firwin
from soundfile import read
import numpy as np
import os
import sys
import logging
and context (classes, functions, sometimes code) from other files:
# Path: apicultor/constraints/dynamic_range.py
# def dyn_constraint_satis(audio, variables, gain):
# assert ((type(variables) is list) and len(variables) == 2)
# audio[variables[0] < variables[1]] = gain #noise gating: anything below a threshold is silenced
# audio[variables[0] > variables[1]] = 1
# return audio
#
# Path: apicultor/sonification/Sonification.py
# def normalize(signal):
# maximum_normalizing = np.max(np.abs(signal))/-1
# normalized = np.true_divide(signal,maximum_normalizing)
# return normalized
#
# def write_file(filename, fs, data):
# write(filename+'.wav', fs, data)
# call(['ffmpeg', '-i', filename+'.wav', filename+'.ogg', '-y'])
# call(['rm', '-f', filename+'.wav'])
. Output only the next line. | loud_audio = np.delete(without_rumble, np.where(db_mag < silence_threshold))#remove it |
Given the following code snippet before the placeholder: <|code_start|>#! /usr/bin/env python3
# -*- coding: utf-8 -*-
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def get_desc_pair(descriptors, files_features, keys):
print(np.vstack((descriptors,keys)).T)
input1 = input("Key of first list of descriptors:")
input2 = input("Key of second list of descriptors: ")
if int(input1) not in keys:
raise IndexError("Need keys of descriptors")
if int(input2) not in keys:
raise IndexError("Need keys of descriptors")
first_descriptor_values = files_features.T[int(input1)]
second_descriptor_values = files_features.T[int(input2)]
return first_descriptor_values, second_descriptor_values, descriptors[int(input1)], descriptors[int(input2)]
# plot sound similarity clusters
<|code_end|>
, predict the next line using imports from the current file:
from ..emotion.MusicEmotionMachine import scratch_music
from ..utils.data import *
from colorama import Fore
from sklearn import preprocessing
from ..utils.algorithms import *
from ..sonification.Sonification import write_file
from soundfile import read
from sklearn.decomposition.pca import PCA
from sklearn.cluster import AffinityPropagation
from collections import defaultdict
from random import choice
import time
import numpy as np
import matplotlib.pyplot as plt
import os, sys
import shutil
import librosa
import logging
and context including class names, function names, and sometimes code from other files:
# Path: apicultor/emotion/MusicEmotionMachine.py
# class descriptors_and_keys():
# class deep_support_vector_machines(object):
# class svm_layers(deep_support_vector_machines):
# class main_svm(deep_support_vector_machines):
# class MusicEmotionStateMachine(object):
# def __init__(self, tags_dir, multitag):
# def feature_scaling(f):
# def KMeans_clusters(fscaled):
# def __init__(self, kernel1, kernel2):
# def polynomial_kernel(self, x, y, gamma):
# def linear_kernel_matrix(self, x, y):
# def sigmoid_kernel(self, x, y, gamma):
# def rbf_kernel(self, x, y, gamma):
# def fit_model(self, features,labels, kernel1, kernel2, C, reg_param, gamma, learning_rate):
# def decision_function(self, features):
# def predictions(self, features, targts):
# def __init__(self):
# def layer_computation(self, features, labels, kernel_configs):
# def store_attention(self, files, output_dir):
# def attention(self):
# def best_labels(self):
# def store_good_labels(self, files, output_dir):
# def best_kernels_output(best_estimator, kernel_configs):
# def __init__(self, S, lab, C, reg_param, gamma, kernels_config, output_dir):
# def neg_and_pos(self, files):
# def save_decisions(self):
# def save_classes(self, files):
# def emotions_data_dir(files_dir):
# def multitag_emotions_dictionary_dir():
# def multitag_emotions_dir(tags_dirs, files_dir, generator):
# def __init__(self, name, files_dir):
# def sad_music_remix(self, neg_arous_dir, files, decisions, files_dir, harmonic = None):
# def happy_music_remix(self, pos_arous_dir, files, decisions, files_dir, harmonic = None):
# def relaxed_music_remix(self, neg_arous_dir, files, decisions, files_dir):
# def angry_music_remix(self, pos_arous_dir, files, decisions, files_dir):
# def not_happy_music_remix(self, neg_arous_dir, files, decisions, files_dir):
# def not_sad_music_remix(self, pos_arous_dir, files, decisions, files_dir):
# def not_angry_music_remix(self, neg_arous_dir, files, decisions, files_dir):
# def not_relaxed_music_remix(self, pos_arous_dir, files, decisions, files_dir):
# def remix(self, files, decisions, files_dir):
# def main():
# K = 0
# C = best_estimator['C'][max_score]
#
# Path: apicultor/sonification/Sonification.py
# def write_file(filename, fs, data):
# write(filename+'.wav', fs, data)
# call(['ffmpeg', '-i', filename+'.wav', filename+'.ogg', '-y'])
# call(['rm', '-f', filename+'.wav'])
. Output only the next line. | def plot_similarity_clusters(desc1, desc2, files, plot = None): |
Given the code snippet: <|code_start|>
def attention_sga(x,y,a=None):
if a == None:
a = np.zeros(x.shape[1])
else:
<|code_end|>
, generate the next line using the imports in this file:
from .subproblem import s, sigmoid, attention
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: gradients/subproblem.py
# def attention(Q, K, V):
. Output only the next line. | a = np.resize(a,x.shape[1]) |
Using the snippet: <|code_start|>
def attention_sga(x,y,a=None):
if a == None:
a = np.zeros(x.shape[1])
else:
a = np.resize(a,x.shape[1])
for i in range(5):
for tau in range(x.shape[1]):
gh = abs(1-np.min(attention(np.mat(x),np.mat(y),a[tau]))) * np.gradient(x.T*a[tau],axis=0)[tau]
s = lambda smax, b, B: 1/smax+(smax-(1/smax))*((b-1)/(B-1))
et = sigmoid(np.array(gh))
s_max = 1.5
s1 = s(1.5,tau,x.shape[1])
<|code_end|>
, determine the next line of code. You have imports:
from .subproblem import s, sigmoid, attention
import numpy as np
and context (class names, function names, or code) available:
# Path: gradients/subproblem.py
# def attention(Q, K, V):
. Output only the next line. | ql =(s_max * np.abs(np.cosh(s1 * et)+1)) / (s1*np.abs(np.cosh(s1 * et)+1)) |
Next line prediction: <|code_start|>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
ext_filter = ['.mp3','.ogg','.wav','.wma', '.amr'] # check if all extensions are supported by the library
#ext_filter = ['.mp3','.ogg','.undefined','.wav','.wma','.mid', '.amr'] # .mid is not an audio file, why is the reason to have .undefined extension?
# descriptores de interés
descriptors = [
'lowlevel.spectral_centroid',
'lowlevel.spectral_contrast',
'lowlevel.dissonance',
'lowlevel.hfc',
'lowlevel.mfcc',
'loudness.level',
'sfx.logattacktime',
'sfx.inharmonicity',
'rhythm.bpm',
<|code_end|>
. Use current file imports:
(import os
import sys
import json
import numpy as np
import logging
import random
from .utils.algorithms import *
from soundfile import read
from collections import defaultdict, OrderedDict
from .sonification.Sonification import hfc_onsets)
and context including class names, function names, or small code snippets from other files:
# Path: apicultor/sonification/Sonification.py
# def hfc_onsets(audio):
# """
# Find onsets in music based on High Frequency Content
# :param audio: the input signal
# :returns:
# - hfcs = onsets locations in seconds
# """
# song = sonify(audio, 44100)
# hfcs = []
# for frame in song.FrameGenerator():
# song.window()
# song.Spectrum()
# hfcs.append(song.hfc())
# hfcs /= max(hfcs)
# song.hfcs = hfcs
# fir = firwin(11, 1.0 / 8, window = "hamming")
# song.filtered = np.convolve(hfcs, fir, mode="same")
# song.climb_hills()
# return np.array([i for i, x in enumerate(song.Filtered) if x > 0]) * song.N
. Output only the next line. | 'metadata.duration', |
Here is a snippet: <|code_start|>
def auth_url(regex, view, *args, **kwargs):
return url(regex, login_required(view), *args, **kwargs)
urlpatterns = [
<|code_end|>
. Write the next line using the current file imports:
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import (
Login, Logout,
Dashboard, VisualNetwork,
MinionList, MinionDetail, MinionEdit,
NetworkList, NetworkDetail, NetworkEdit,
DomainList, DomainDetail, DomainEdit,
EventView, JobView, JobDetailView
)
and context from other files:
# Path: salt_observer/views.py
# class Login(FormView):
# template_name = 'auth/login.html'
# form_class = LoginForm
# success_url = '/'
#
# def form_valid(self, form, *args, **kwargs):
# login(self.request, form.get_user())
# return super().form_valid(form, *args, **kwargs)
#
# def get_form_kwargs(self, *args, **kwargs):
# '''dirty hack to pass the current request down to the backends'''
# kwargs = super().get_form_kwargs(*args, **kwargs)
# kwargs['request'] = self.request
# return kwargs
#
# def get_success_url(self):
# return self.request.GET.get('next', reverse_lazy('dashboard'))
#
# def dispatch(self, request, *args, **kwargs):
# if self.request.user.is_authenticated():
# return HttpResponseRedirect(reverse_lazy('dashboard'))
# else:
# return super().dispatch(request, *args, **kwargs)
#
# class Logout(View):
#
# def dispatch(self, request, *args, **kwargs):
# logout(request)
# return HttpResponseRedirect(reverse_lazy('dashboard'))
#
# class Dashboard(TemplateView):
# template_name = 'home/dashboard.html'
#
# def get_context_data(self, *args, **kwargs):
# all_minions = Minion.objects.all()
# all_networks = Network.objects.all()
# all_domains = Domain.objects.all()
#
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'all_minions': all_minions,
# 'all_networks': all_networks,
# 'all_domains': all_domains,
# 'all_users': User.objects.all(),
# 'w5_outdated_minions': sorted(all_minions, key=lambda m: m.outdated_package_count(), reverse=True)[:5],
# 'w5_fullest_minions': sorted(all_minions, key=lambda m: m.fullest_partition_percentage(), reverse=True)[:5],
# 'w5_domain_ssl_grades': sorted(all_domains, key=lambda d: d.worst_grade(), reverse=True)[:5],
# })
# return ctx
#
# class VisualNetwork(TemplateView):
# template_name = 'home/visual_network.html'
#
# def get_context_data(self, *args, **kwargs):
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'minions': Minion.objects.all(),
# 'networks': Network.objects.all().exclude(ipv4=settings.SALT_NETWORK),
# })
# return ctx
#
# class MinionList(ListView):
# template_name = 'minion/list.html'
# model = Minion
#
# class MinionDetail(DetailView):
# template_name = 'minion/detail.html'
# model = Minion
# slug_field = 'fqdn'
#
# class MinionEdit(MarkdownEditMixin, UpdateView, MinionDetail):
# template_name = 'minion/edit.html'
# form_class = MinionEditForm
# success_url_name = 'minion-detail'
#
# class NetworkList(ListView):
# template_name = 'network/list.html'
# model = Network
#
# class NetworkDetail(DetailView):
# template_name = 'network/detail.html'
# model = Network
# slug_field = 'ipv4'
#
# class NetworkEdit(MarkdownEditMixin, UpdateView, NetworkDetail):
# template_name = 'network/edit.html'
# form_class = NetworkEditForm
# success_url_name = 'network-detail'
#
# class DomainList(ListView):
# template_name = 'domain/list.html'
# model = Domain
#
# class DomainDetail(DetailView):
# template_name = 'domain/detail.html'
# model = Domain
# slug_field = 'fqdn'
#
# class DomainEdit(MarkdownEditMixin, UpdateView, DomainDetail):
# template_name = 'domain/edit.html'
# form_class = DomainEditForm
# success_url_name = 'domain-detail'
#
# class EventView(AbstractTornadoView):
# template_name = 'events.html'
#
# class JobView(AbstractTornadoView):
# template_name = 'jobs/index.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_list': self.transform_jobs(self.get_jobs())
# })
# return context
#
# def get_jobs(self):
# return SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/'
# ).json().get('return')[0]
#
# def transform_jobs(self, jobs):
# transformed_jobs = dict()
#
# for key, value in jobs.items():
# value['StartTime'] = dateparse(value['StartTime'])
# if not value['Function'] in settings.SALT['jobs']['ignore']:
# transformed_jobs.update({key: value})
#
# return OrderedDict(sorted(transformed_jobs.items(), reverse=True))
#
# class JobDetailView(AbstractTornadoView):
# template_name = 'jobs/detail.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_details': SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/' + str(kwargs['jid']),
# ).json().get('return')[0]
# })
# return context
, which may include functions, classes, or code. Output only the next line. | url(r'^grappelli/', include('grappelli.urls')), |
Given the following code snippet before the placeholder: <|code_start|>
def auth_url(regex, view, *args, **kwargs):
return url(regex, login_required(view), *args, **kwargs)
urlpatterns = [
<|code_end|>
, predict the next line using imports from the current file:
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import (
Login, Logout,
Dashboard, VisualNetwork,
MinionList, MinionDetail, MinionEdit,
NetworkList, NetworkDetail, NetworkEdit,
DomainList, DomainDetail, DomainEdit,
EventView, JobView, JobDetailView
)
and context including class names, function names, and sometimes code from other files:
# Path: salt_observer/views.py
# class Login(FormView):
# template_name = 'auth/login.html'
# form_class = LoginForm
# success_url = '/'
#
# def form_valid(self, form, *args, **kwargs):
# login(self.request, form.get_user())
# return super().form_valid(form, *args, **kwargs)
#
# def get_form_kwargs(self, *args, **kwargs):
# '''dirty hack to pass the current request down to the backends'''
# kwargs = super().get_form_kwargs(*args, **kwargs)
# kwargs['request'] = self.request
# return kwargs
#
# def get_success_url(self):
# return self.request.GET.get('next', reverse_lazy('dashboard'))
#
# def dispatch(self, request, *args, **kwargs):
# if self.request.user.is_authenticated():
# return HttpResponseRedirect(reverse_lazy('dashboard'))
# else:
# return super().dispatch(request, *args, **kwargs)
#
# class Logout(View):
#
# def dispatch(self, request, *args, **kwargs):
# logout(request)
# return HttpResponseRedirect(reverse_lazy('dashboard'))
#
# class Dashboard(TemplateView):
# template_name = 'home/dashboard.html'
#
# def get_context_data(self, *args, **kwargs):
# all_minions = Minion.objects.all()
# all_networks = Network.objects.all()
# all_domains = Domain.objects.all()
#
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'all_minions': all_minions,
# 'all_networks': all_networks,
# 'all_domains': all_domains,
# 'all_users': User.objects.all(),
# 'w5_outdated_minions': sorted(all_minions, key=lambda m: m.outdated_package_count(), reverse=True)[:5],
# 'w5_fullest_minions': sorted(all_minions, key=lambda m: m.fullest_partition_percentage(), reverse=True)[:5],
# 'w5_domain_ssl_grades': sorted(all_domains, key=lambda d: d.worst_grade(), reverse=True)[:5],
# })
# return ctx
#
# class VisualNetwork(TemplateView):
# template_name = 'home/visual_network.html'
#
# def get_context_data(self, *args, **kwargs):
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'minions': Minion.objects.all(),
# 'networks': Network.objects.all().exclude(ipv4=settings.SALT_NETWORK),
# })
# return ctx
#
# class MinionList(ListView):
# template_name = 'minion/list.html'
# model = Minion
#
# class MinionDetail(DetailView):
# template_name = 'minion/detail.html'
# model = Minion
# slug_field = 'fqdn'
#
# class MinionEdit(MarkdownEditMixin, UpdateView, MinionDetail):
# template_name = 'minion/edit.html'
# form_class = MinionEditForm
# success_url_name = 'minion-detail'
#
# class NetworkList(ListView):
# template_name = 'network/list.html'
# model = Network
#
# class NetworkDetail(DetailView):
# template_name = 'network/detail.html'
# model = Network
# slug_field = 'ipv4'
#
# class NetworkEdit(MarkdownEditMixin, UpdateView, NetworkDetail):
# template_name = 'network/edit.html'
# form_class = NetworkEditForm
# success_url_name = 'network-detail'
#
# class DomainList(ListView):
# template_name = 'domain/list.html'
# model = Domain
#
# class DomainDetail(DetailView):
# template_name = 'domain/detail.html'
# model = Domain
# slug_field = 'fqdn'
#
# class DomainEdit(MarkdownEditMixin, UpdateView, DomainDetail):
# template_name = 'domain/edit.html'
# form_class = DomainEditForm
# success_url_name = 'domain-detail'
#
# class EventView(AbstractTornadoView):
# template_name = 'events.html'
#
# class JobView(AbstractTornadoView):
# template_name = 'jobs/index.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_list': self.transform_jobs(self.get_jobs())
# })
# return context
#
# def get_jobs(self):
# return SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/'
# ).json().get('return')[0]
#
# def transform_jobs(self, jobs):
# transformed_jobs = dict()
#
# for key, value in jobs.items():
# value['StartTime'] = dateparse(value['StartTime'])
# if not value['Function'] in settings.SALT['jobs']['ignore']:
# transformed_jobs.update({key: value})
#
# return OrderedDict(sorted(transformed_jobs.items(), reverse=True))
#
# class JobDetailView(AbstractTornadoView):
# template_name = 'jobs/detail.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_details': SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/' + str(kwargs['jid']),
# ).json().get('return')[0]
# })
# return context
. Output only the next line. | url(r'^grappelli/', include('grappelli.urls')), |
Predict the next line for this snippet: <|code_start|>
def auth_url(regex, view, *args, **kwargs):
return url(regex, login_required(view), *args, **kwargs)
urlpatterns = [
url(r'^grappelli/', include('grappelli.urls')),
<|code_end|>
with the help of current file imports:
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import (
Login, Logout,
Dashboard, VisualNetwork,
MinionList, MinionDetail, MinionEdit,
NetworkList, NetworkDetail, NetworkEdit,
DomainList, DomainDetail, DomainEdit,
EventView, JobView, JobDetailView
)
and context from other files:
# Path: salt_observer/views.py
# class Login(FormView):
# template_name = 'auth/login.html'
# form_class = LoginForm
# success_url = '/'
#
# def form_valid(self, form, *args, **kwargs):
# login(self.request, form.get_user())
# return super().form_valid(form, *args, **kwargs)
#
# def get_form_kwargs(self, *args, **kwargs):
# '''dirty hack to pass the current request down to the backends'''
# kwargs = super().get_form_kwargs(*args, **kwargs)
# kwargs['request'] = self.request
# return kwargs
#
# def get_success_url(self):
# return self.request.GET.get('next', reverse_lazy('dashboard'))
#
# def dispatch(self, request, *args, **kwargs):
# if self.request.user.is_authenticated():
# return HttpResponseRedirect(reverse_lazy('dashboard'))
# else:
# return super().dispatch(request, *args, **kwargs)
#
# class Logout(View):
#
# def dispatch(self, request, *args, **kwargs):
# logout(request)
# return HttpResponseRedirect(reverse_lazy('dashboard'))
#
# class Dashboard(TemplateView):
# template_name = 'home/dashboard.html'
#
# def get_context_data(self, *args, **kwargs):
# all_minions = Minion.objects.all()
# all_networks = Network.objects.all()
# all_domains = Domain.objects.all()
#
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'all_minions': all_minions,
# 'all_networks': all_networks,
# 'all_domains': all_domains,
# 'all_users': User.objects.all(),
# 'w5_outdated_minions': sorted(all_minions, key=lambda m: m.outdated_package_count(), reverse=True)[:5],
# 'w5_fullest_minions': sorted(all_minions, key=lambda m: m.fullest_partition_percentage(), reverse=True)[:5],
# 'w5_domain_ssl_grades': sorted(all_domains, key=lambda d: d.worst_grade(), reverse=True)[:5],
# })
# return ctx
#
# class VisualNetwork(TemplateView):
# template_name = 'home/visual_network.html'
#
# def get_context_data(self, *args, **kwargs):
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'minions': Minion.objects.all(),
# 'networks': Network.objects.all().exclude(ipv4=settings.SALT_NETWORK),
# })
# return ctx
#
# class MinionList(ListView):
# template_name = 'minion/list.html'
# model = Minion
#
# class MinionDetail(DetailView):
# template_name = 'minion/detail.html'
# model = Minion
# slug_field = 'fqdn'
#
# class MinionEdit(MarkdownEditMixin, UpdateView, MinionDetail):
# template_name = 'minion/edit.html'
# form_class = MinionEditForm
# success_url_name = 'minion-detail'
#
# class NetworkList(ListView):
# template_name = 'network/list.html'
# model = Network
#
# class NetworkDetail(DetailView):
# template_name = 'network/detail.html'
# model = Network
# slug_field = 'ipv4'
#
# class NetworkEdit(MarkdownEditMixin, UpdateView, NetworkDetail):
# template_name = 'network/edit.html'
# form_class = NetworkEditForm
# success_url_name = 'network-detail'
#
# class DomainList(ListView):
# template_name = 'domain/list.html'
# model = Domain
#
# class DomainDetail(DetailView):
# template_name = 'domain/detail.html'
# model = Domain
# slug_field = 'fqdn'
#
# class DomainEdit(MarkdownEditMixin, UpdateView, DomainDetail):
# template_name = 'domain/edit.html'
# form_class = DomainEditForm
# success_url_name = 'domain-detail'
#
# class EventView(AbstractTornadoView):
# template_name = 'events.html'
#
# class JobView(AbstractTornadoView):
# template_name = 'jobs/index.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_list': self.transform_jobs(self.get_jobs())
# })
# return context
#
# def get_jobs(self):
# return SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/'
# ).json().get('return')[0]
#
# def transform_jobs(self, jobs):
# transformed_jobs = dict()
#
# for key, value in jobs.items():
# value['StartTime'] = dateparse(value['StartTime'])
# if not value['Function'] in settings.SALT['jobs']['ignore']:
# transformed_jobs.update({key: value})
#
# return OrderedDict(sorted(transformed_jobs.items(), reverse=True))
#
# class JobDetailView(AbstractTornadoView):
# template_name = 'jobs/detail.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_details': SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/' + str(kwargs['jid']),
# ).json().get('return')[0]
# })
# return context
, which may contain function names, class names, or code. Output only the next line. | url(r'^admin/', admin.site.urls), |
Given the following code snippet before the placeholder: <|code_start|>
def auth_url(regex, view, *args, **kwargs):
return url(regex, login_required(view), *args, **kwargs)
urlpatterns = [
<|code_end|>
, predict the next line using imports from the current file:
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import (
Login, Logout,
Dashboard, VisualNetwork,
MinionList, MinionDetail, MinionEdit,
NetworkList, NetworkDetail, NetworkEdit,
DomainList, DomainDetail, DomainEdit,
EventView, JobView, JobDetailView
)
and context including class names, function names, and sometimes code from other files:
# Path: salt_observer/views.py
# class Login(FormView):
# template_name = 'auth/login.html'
# form_class = LoginForm
# success_url = '/'
#
# def form_valid(self, form, *args, **kwargs):
# login(self.request, form.get_user())
# return super().form_valid(form, *args, **kwargs)
#
# def get_form_kwargs(self, *args, **kwargs):
# '''dirty hack to pass the current request down to the backends'''
# kwargs = super().get_form_kwargs(*args, **kwargs)
# kwargs['request'] = self.request
# return kwargs
#
# def get_success_url(self):
# return self.request.GET.get('next', reverse_lazy('dashboard'))
#
# def dispatch(self, request, *args, **kwargs):
# if self.request.user.is_authenticated():
# return HttpResponseRedirect(reverse_lazy('dashboard'))
# else:
# return super().dispatch(request, *args, **kwargs)
#
# class Logout(View):
#
# def dispatch(self, request, *args, **kwargs):
# logout(request)
# return HttpResponseRedirect(reverse_lazy('dashboard'))
#
# class Dashboard(TemplateView):
# template_name = 'home/dashboard.html'
#
# def get_context_data(self, *args, **kwargs):
# all_minions = Minion.objects.all()
# all_networks = Network.objects.all()
# all_domains = Domain.objects.all()
#
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'all_minions': all_minions,
# 'all_networks': all_networks,
# 'all_domains': all_domains,
# 'all_users': User.objects.all(),
# 'w5_outdated_minions': sorted(all_minions, key=lambda m: m.outdated_package_count(), reverse=True)[:5],
# 'w5_fullest_minions': sorted(all_minions, key=lambda m: m.fullest_partition_percentage(), reverse=True)[:5],
# 'w5_domain_ssl_grades': sorted(all_domains, key=lambda d: d.worst_grade(), reverse=True)[:5],
# })
# return ctx
#
# class VisualNetwork(TemplateView):
# template_name = 'home/visual_network.html'
#
# def get_context_data(self, *args, **kwargs):
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'minions': Minion.objects.all(),
# 'networks': Network.objects.all().exclude(ipv4=settings.SALT_NETWORK),
# })
# return ctx
#
# class MinionList(ListView):
# template_name = 'minion/list.html'
# model = Minion
#
# class MinionDetail(DetailView):
# template_name = 'minion/detail.html'
# model = Minion
# slug_field = 'fqdn'
#
# class MinionEdit(MarkdownEditMixin, UpdateView, MinionDetail):
# template_name = 'minion/edit.html'
# form_class = MinionEditForm
# success_url_name = 'minion-detail'
#
# class NetworkList(ListView):
# template_name = 'network/list.html'
# model = Network
#
# class NetworkDetail(DetailView):
# template_name = 'network/detail.html'
# model = Network
# slug_field = 'ipv4'
#
# class NetworkEdit(MarkdownEditMixin, UpdateView, NetworkDetail):
# template_name = 'network/edit.html'
# form_class = NetworkEditForm
# success_url_name = 'network-detail'
#
# class DomainList(ListView):
# template_name = 'domain/list.html'
# model = Domain
#
# class DomainDetail(DetailView):
# template_name = 'domain/detail.html'
# model = Domain
# slug_field = 'fqdn'
#
# class DomainEdit(MarkdownEditMixin, UpdateView, DomainDetail):
# template_name = 'domain/edit.html'
# form_class = DomainEditForm
# success_url_name = 'domain-detail'
#
# class EventView(AbstractTornadoView):
# template_name = 'events.html'
#
# class JobView(AbstractTornadoView):
# template_name = 'jobs/index.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_list': self.transform_jobs(self.get_jobs())
# })
# return context
#
# def get_jobs(self):
# return SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/'
# ).json().get('return')[0]
#
# def transform_jobs(self, jobs):
# transformed_jobs = dict()
#
# for key, value in jobs.items():
# value['StartTime'] = dateparse(value['StartTime'])
# if not value['Function'] in settings.SALT['jobs']['ignore']:
# transformed_jobs.update({key: value})
#
# return OrderedDict(sorted(transformed_jobs.items(), reverse=True))
#
# class JobDetailView(AbstractTornadoView):
# template_name = 'jobs/detail.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_details': SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/' + str(kwargs['jid']),
# ).json().get('return')[0]
# })
# return context
. Output only the next line. | url(r'^grappelli/', include('grappelli.urls')), |
Based on the snippet: <|code_start|>
def auth_url(regex, view, *args, **kwargs):
return url(regex, login_required(view), *args, **kwargs)
urlpatterns = [
url(r'^grappelli/', include('grappelli.urls')),
<|code_end|>
, predict the immediate next line with the help of imports:
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .views import (
Login, Logout,
Dashboard, VisualNetwork,
MinionList, MinionDetail, MinionEdit,
NetworkList, NetworkDetail, NetworkEdit,
DomainList, DomainDetail, DomainEdit,
EventView, JobView, JobDetailView
)
and context (classes, functions, sometimes code) from other files:
# Path: salt_observer/views.py
# class Login(FormView):
# template_name = 'auth/login.html'
# form_class = LoginForm
# success_url = '/'
#
# def form_valid(self, form, *args, **kwargs):
# login(self.request, form.get_user())
# return super().form_valid(form, *args, **kwargs)
#
# def get_form_kwargs(self, *args, **kwargs):
# '''dirty hack to pass the current request down to the backends'''
# kwargs = super().get_form_kwargs(*args, **kwargs)
# kwargs['request'] = self.request
# return kwargs
#
# def get_success_url(self):
# return self.request.GET.get('next', reverse_lazy('dashboard'))
#
# def dispatch(self, request, *args, **kwargs):
# if self.request.user.is_authenticated():
# return HttpResponseRedirect(reverse_lazy('dashboard'))
# else:
# return super().dispatch(request, *args, **kwargs)
#
# class Logout(View):
#
# def dispatch(self, request, *args, **kwargs):
# logout(request)
# return HttpResponseRedirect(reverse_lazy('dashboard'))
#
# class Dashboard(TemplateView):
# template_name = 'home/dashboard.html'
#
# def get_context_data(self, *args, **kwargs):
# all_minions = Minion.objects.all()
# all_networks = Network.objects.all()
# all_domains = Domain.objects.all()
#
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'all_minions': all_minions,
# 'all_networks': all_networks,
# 'all_domains': all_domains,
# 'all_users': User.objects.all(),
# 'w5_outdated_minions': sorted(all_minions, key=lambda m: m.outdated_package_count(), reverse=True)[:5],
# 'w5_fullest_minions': sorted(all_minions, key=lambda m: m.fullest_partition_percentage(), reverse=True)[:5],
# 'w5_domain_ssl_grades': sorted(all_domains, key=lambda d: d.worst_grade(), reverse=True)[:5],
# })
# return ctx
#
# class VisualNetwork(TemplateView):
# template_name = 'home/visual_network.html'
#
# def get_context_data(self, *args, **kwargs):
# ctx = super().get_context_data(*args, **kwargs)
# ctx.update({
# 'minions': Minion.objects.all(),
# 'networks': Network.objects.all().exclude(ipv4=settings.SALT_NETWORK),
# })
# return ctx
#
# class MinionList(ListView):
# template_name = 'minion/list.html'
# model = Minion
#
# class MinionDetail(DetailView):
# template_name = 'minion/detail.html'
# model = Minion
# slug_field = 'fqdn'
#
# class MinionEdit(MarkdownEditMixin, UpdateView, MinionDetail):
# template_name = 'minion/edit.html'
# form_class = MinionEditForm
# success_url_name = 'minion-detail'
#
# class NetworkList(ListView):
# template_name = 'network/list.html'
# model = Network
#
# class NetworkDetail(DetailView):
# template_name = 'network/detail.html'
# model = Network
# slug_field = 'ipv4'
#
# class NetworkEdit(MarkdownEditMixin, UpdateView, NetworkDetail):
# template_name = 'network/edit.html'
# form_class = NetworkEditForm
# success_url_name = 'network-detail'
#
# class DomainList(ListView):
# template_name = 'domain/list.html'
# model = Domain
#
# class DomainDetail(DetailView):
# template_name = 'domain/detail.html'
# model = Domain
# slug_field = 'fqdn'
#
# class DomainEdit(MarkdownEditMixin, UpdateView, DomainDetail):
# template_name = 'domain/edit.html'
# form_class = DomainEditForm
# success_url_name = 'domain-detail'
#
# class EventView(AbstractTornadoView):
# template_name = 'events.html'
#
# class JobView(AbstractTornadoView):
# template_name = 'jobs/index.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_list': self.transform_jobs(self.get_jobs())
# })
# return context
#
# def get_jobs(self):
# return SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/'
# ).json().get('return')[0]
#
# def transform_jobs(self, jobs):
# transformed_jobs = dict()
#
# for key, value in jobs.items():
# value['StartTime'] = dateparse(value['StartTime'])
# if not value['Function'] in settings.SALT['jobs']['ignore']:
# transformed_jobs.update({key: value})
#
# return OrderedDict(sorted(transformed_jobs.items(), reverse=True))
#
# class JobDetailView(AbstractTornadoView):
# template_name = 'jobs/detail.html'
#
# def get_context_data(self, *args, **kwargs):
# context = super().get_context_data(*args, **kwargs)
# context.update({
# 'job_details': SaltTornado(
# token=self.request.session['salt_tornado_token']
# ).request(
# resource='/jobs/' + str(kwargs['jid']),
# ).json().get('return')[0]
# })
# return context
. Output only the next line. | url(r'^admin/', admin.site.urls), |
Next line prediction: <|code_start|>
class NetworkInterfaceInline(admin.TabularInline):
model = NetworkInterface
extra = 0
readonly_fields = ('network', 'minion', 'ip_address', 'mac_address', 'name')
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class MinionAdmin(admin.ModelAdmin):
inlines = [NetworkInterfaceInline]
<|code_end|>
. Use current file imports:
(from django.contrib import admin
from salt_observer.models import (
Minion, Network, NetworkInterface, Domain
))
and context including class names, function names, or small code snippets from other files:
# Path: salt_observer/models.py
# class Minion(MarkdownContent):
# ''' Representation of a Server in Salt '''
#
# fqdn = models.CharField(max_length=255)
# networks = models.ManyToManyField(Network, through='NetworkInterface')
# _data = models.TextField(default='{}')
#
# last_updated = models.DateTimeField()
#
# @property
# def data(self):
# try:
# return json.loads(self._data)
# except ValueError:
# return dict()
#
# @data.setter
# def data(self, value):
# self._data = json.dumps(value)
#
# def update_data(self, value):
# ''' In order to update self.data without 3 lines of code
# self.data.update() wont work!
# '''
# data = self.data
# data.update(value)
# self.data = data
#
# @property
# def user_count(self):
# return len(self.data.get('grains', {}).get('users', []))
#
# @property
# def package_count(self):
# return len(self.data.get('packages', []))
#
# @property
# def network_count(self):
# return len(self.networks.all())
#
# def outdated_package_count(self):
# return len([p for p, v in self.data.get('packages', {}).items() if v['latest_version']])
#
# def fullest_partition_percentage(self):
# try:
# return max([p.get('percent', 0) for p in self.data.get('mounted_devices', [])])
# except (AttributeError, ValueError):
# return 0
#
# def __str__(self):
# return self.fqdn
#
# class Network(MarkdownContent):
# ''' Representation of an Network '''
#
# ipv4 = models.CharField(max_length=15)
# mask = models.CharField(max_length=15)
#
# last_updated = models.DateTimeField()
#
# def __str__(self):
# return self.ipv4
#
# class NetworkInterface(models.Model):
# ''' Representing a network card '''
#
# network = models.ForeignKey(Network, on_delete=models.CASCADE)
# minion = models.ForeignKey(Minion, on_delete=models.CASCADE)
#
# name = models.CharField(max_length=255)
# ip_address = models.CharField(max_length=17)
# mac_address = models.CharField(max_length=17)
#
# def __str__(self):
# return '{} ({})'.format(self.name, self.mac_address)
#
# class Domain(MarkdownContent):
# ''' Represents a Fully qualified domain name '''
#
# fqdn = models.CharField(max_length=255)
# minion = models.ManyToManyField('Minion', blank=True)
# _ssl_lab_status = models.TextField(default='{}')
#
# can_speak_https = models.BooleanField(help_text='Is there a service listening on port 443')
# public = models.BooleanField(help_text='Is this domain public accessible')
# valid = models.BooleanField()
#
# @property
# def ssl_lab_status(self):
# try:
# return json.loads(self._ssl_lab_status)
# except ValueError:
# return dict()
#
# @ssl_lab_status.setter
# def ssl_lab_status(self, value):
# self._ssl_lab_status = json.dumps(value)
#
# def check_if_valid(self, commit=True):
# try:
# a = requests.get('http://{}/'.format(self.fqdn), timeout=5, verify=False)
# b = requests.get('https://{}/'.format(self.fqdn), timeout=5, verify=False)
# except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
# self.valid = False
# else:
# self.valid = True
#
# if commit:
# self.save()
#
# def minion_count(self):
# return len(self.minion.all())
#
# def worst_grade(self):
# if self.ssl_lab_status.get('grades', []):
# return max([g for g in self.ssl_lab_status.get('grades', [])])
# return '0'
#
# def save(self, *args, **kwargs):
# if not self.id:
# self.check_if_valid(commit=False)
# super().save(*args, **kwargs)
#
# def __str__(self):
# return self.fqdn
. Output only the next line. | readonly_fields = ('fqdn', 'data', 'last_updated') |
Continue the code snippet: <|code_start|>
class NetworkInterfaceInline(admin.TabularInline):
model = NetworkInterface
extra = 0
readonly_fields = ('network', 'minion', 'ip_address', 'mac_address', 'name')
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class MinionAdmin(admin.ModelAdmin):
inlines = [NetworkInterfaceInline]
<|code_end|>
. Use current file imports:
from django.contrib import admin
from salt_observer.models import (
Minion, Network, NetworkInterface, Domain
)
and context (classes, functions, or code) from other files:
# Path: salt_observer/models.py
# class Minion(MarkdownContent):
# ''' Representation of a Server in Salt '''
#
# fqdn = models.CharField(max_length=255)
# networks = models.ManyToManyField(Network, through='NetworkInterface')
# _data = models.TextField(default='{}')
#
# last_updated = models.DateTimeField()
#
# @property
# def data(self):
# try:
# return json.loads(self._data)
# except ValueError:
# return dict()
#
# @data.setter
# def data(self, value):
# self._data = json.dumps(value)
#
# def update_data(self, value):
# ''' In order to update self.data without 3 lines of code
# self.data.update() wont work!
# '''
# data = self.data
# data.update(value)
# self.data = data
#
# @property
# def user_count(self):
# return len(self.data.get('grains', {}).get('users', []))
#
# @property
# def package_count(self):
# return len(self.data.get('packages', []))
#
# @property
# def network_count(self):
# return len(self.networks.all())
#
# def outdated_package_count(self):
# return len([p for p, v in self.data.get('packages', {}).items() if v['latest_version']])
#
# def fullest_partition_percentage(self):
# try:
# return max([p.get('percent', 0) for p in self.data.get('mounted_devices', [])])
# except (AttributeError, ValueError):
# return 0
#
# def __str__(self):
# return self.fqdn
#
# class Network(MarkdownContent):
# ''' Representation of an Network '''
#
# ipv4 = models.CharField(max_length=15)
# mask = models.CharField(max_length=15)
#
# last_updated = models.DateTimeField()
#
# def __str__(self):
# return self.ipv4
#
# class NetworkInterface(models.Model):
# ''' Representing a network card '''
#
# network = models.ForeignKey(Network, on_delete=models.CASCADE)
# minion = models.ForeignKey(Minion, on_delete=models.CASCADE)
#
# name = models.CharField(max_length=255)
# ip_address = models.CharField(max_length=17)
# mac_address = models.CharField(max_length=17)
#
# def __str__(self):
# return '{} ({})'.format(self.name, self.mac_address)
#
# class Domain(MarkdownContent):
# ''' Represents a Fully qualified domain name '''
#
# fqdn = models.CharField(max_length=255)
# minion = models.ManyToManyField('Minion', blank=True)
# _ssl_lab_status = models.TextField(default='{}')
#
# can_speak_https = models.BooleanField(help_text='Is there a service listening on port 443')
# public = models.BooleanField(help_text='Is this domain public accessible')
# valid = models.BooleanField()
#
# @property
# def ssl_lab_status(self):
# try:
# return json.loads(self._ssl_lab_status)
# except ValueError:
# return dict()
#
# @ssl_lab_status.setter
# def ssl_lab_status(self, value):
# self._ssl_lab_status = json.dumps(value)
#
# def check_if_valid(self, commit=True):
# try:
# a = requests.get('http://{}/'.format(self.fqdn), timeout=5, verify=False)
# b = requests.get('https://{}/'.format(self.fqdn), timeout=5, verify=False)
# except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
# self.valid = False
# else:
# self.valid = True
#
# if commit:
# self.save()
#
# def minion_count(self):
# return len(self.minion.all())
#
# def worst_grade(self):
# if self.ssl_lab_status.get('grades', []):
# return max([g for g in self.ssl_lab_status.get('grades', [])])
# return '0'
#
# def save(self, *args, **kwargs):
# if not self.id:
# self.check_if_valid(commit=False)
# super().save(*args, **kwargs)
#
# def __str__(self):
# return self.fqdn
. Output only the next line. | readonly_fields = ('fqdn', 'data', 'last_updated') |
Using the snippet: <|code_start|>
class NetworkInterfaceInline(admin.TabularInline):
model = NetworkInterface
extra = 0
readonly_fields = ('network', 'minion', 'ip_address', 'mac_address', 'name')
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class MinionAdmin(admin.ModelAdmin):
inlines = [NetworkInterfaceInline]
readonly_fields = ('fqdn', 'data', 'last_updated')
exclude = ('_data', 'md_content', 'md_last_edited', 'md_last_autor')
admin.site.register(Minion, MinionAdmin)
class NetworkAdmin(admin.ModelAdmin):
inlines = [NetworkInterfaceInline]
<|code_end|>
, determine the next line of code. You have imports:
from django.contrib import admin
from salt_observer.models import (
Minion, Network, NetworkInterface, Domain
)
and context (class names, function names, or code) available:
# Path: salt_observer/models.py
# class Minion(MarkdownContent):
# ''' Representation of a Server in Salt '''
#
# fqdn = models.CharField(max_length=255)
# networks = models.ManyToManyField(Network, through='NetworkInterface')
# _data = models.TextField(default='{}')
#
# last_updated = models.DateTimeField()
#
# @property
# def data(self):
# try:
# return json.loads(self._data)
# except ValueError:
# return dict()
#
# @data.setter
# def data(self, value):
# self._data = json.dumps(value)
#
# def update_data(self, value):
# ''' In order to update self.data without 3 lines of code
# self.data.update() wont work!
# '''
# data = self.data
# data.update(value)
# self.data = data
#
# @property
# def user_count(self):
# return len(self.data.get('grains', {}).get('users', []))
#
# @property
# def package_count(self):
# return len(self.data.get('packages', []))
#
# @property
# def network_count(self):
# return len(self.networks.all())
#
# def outdated_package_count(self):
# return len([p for p, v in self.data.get('packages', {}).items() if v['latest_version']])
#
# def fullest_partition_percentage(self):
# try:
# return max([p.get('percent', 0) for p in self.data.get('mounted_devices', [])])
# except (AttributeError, ValueError):
# return 0
#
# def __str__(self):
# return self.fqdn
#
# class Network(MarkdownContent):
# ''' Representation of an Network '''
#
# ipv4 = models.CharField(max_length=15)
# mask = models.CharField(max_length=15)
#
# last_updated = models.DateTimeField()
#
# def __str__(self):
# return self.ipv4
#
# class NetworkInterface(models.Model):
# ''' Representing a network card '''
#
# network = models.ForeignKey(Network, on_delete=models.CASCADE)
# minion = models.ForeignKey(Minion, on_delete=models.CASCADE)
#
# name = models.CharField(max_length=255)
# ip_address = models.CharField(max_length=17)
# mac_address = models.CharField(max_length=17)
#
# def __str__(self):
# return '{} ({})'.format(self.name, self.mac_address)
#
# class Domain(MarkdownContent):
# ''' Represents a Fully qualified domain name '''
#
# fqdn = models.CharField(max_length=255)
# minion = models.ManyToManyField('Minion', blank=True)
# _ssl_lab_status = models.TextField(default='{}')
#
# can_speak_https = models.BooleanField(help_text='Is there a service listening on port 443')
# public = models.BooleanField(help_text='Is this domain public accessible')
# valid = models.BooleanField()
#
# @property
# def ssl_lab_status(self):
# try:
# return json.loads(self._ssl_lab_status)
# except ValueError:
# return dict()
#
# @ssl_lab_status.setter
# def ssl_lab_status(self, value):
# self._ssl_lab_status = json.dumps(value)
#
# def check_if_valid(self, commit=True):
# try:
# a = requests.get('http://{}/'.format(self.fqdn), timeout=5, verify=False)
# b = requests.get('https://{}/'.format(self.fqdn), timeout=5, verify=False)
# except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
# self.valid = False
# else:
# self.valid = True
#
# if commit:
# self.save()
#
# def minion_count(self):
# return len(self.minion.all())
#
# def worst_grade(self):
# if self.ssl_lab_status.get('grades', []):
# return max([g for g in self.ssl_lab_status.get('grades', [])])
# return '0'
#
# def save(self, *args, **kwargs):
# if not self.id:
# self.check_if_valid(commit=False)
# super().save(*args, **kwargs)
#
# def __str__(self):
# return self.fqdn
. Output only the next line. | readonly_fields = ('ipv4', 'mask', 'last_updated') |
Predict the next line after this snippet: <|code_start|>
class MarkdownFormMixin(forms.ModelForm):
md_content = forms.CharField(
widget=forms.Textarea(attrs={'class': 'form-control', 'rows': 15, 'autofocus': 'True', 'placeholder': '# Some Markdown'}),
label='',
required=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = ''
self.helper.field_class = 'col-lg-12'
self.helper.layout = Layout(
Div(Field('md_content')),
Div(StrictButton('Save', type='submit', css_class='btn-primary'))
)
<|code_end|>
using the current file's imports:
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext, ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Div, Field, HTML
from crispy_forms.bootstrap import StrictButton, PrependedText
from salt_observer.models import (
Domain, Minion, Network
)
and any relevant context from other files:
# Path: salt_observer/models.py
# class Domain(MarkdownContent):
# ''' Represents a Fully qualified domain name '''
#
# fqdn = models.CharField(max_length=255)
# minion = models.ManyToManyField('Minion', blank=True)
# _ssl_lab_status = models.TextField(default='{}')
#
# can_speak_https = models.BooleanField(help_text='Is there a service listening on port 443')
# public = models.BooleanField(help_text='Is this domain public accessible')
# valid = models.BooleanField()
#
# @property
# def ssl_lab_status(self):
# try:
# return json.loads(self._ssl_lab_status)
# except ValueError:
# return dict()
#
# @ssl_lab_status.setter
# def ssl_lab_status(self, value):
# self._ssl_lab_status = json.dumps(value)
#
# def check_if_valid(self, commit=True):
# try:
# a = requests.get('http://{}/'.format(self.fqdn), timeout=5, verify=False)
# b = requests.get('https://{}/'.format(self.fqdn), timeout=5, verify=False)
# except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
# self.valid = False
# else:
# self.valid = True
#
# if commit:
# self.save()
#
# def minion_count(self):
# return len(self.minion.all())
#
# def worst_grade(self):
# if self.ssl_lab_status.get('grades', []):
# return max([g for g in self.ssl_lab_status.get('grades', [])])
# return '0'
#
# def save(self, *args, **kwargs):
# if not self.id:
# self.check_if_valid(commit=False)
# super().save(*args, **kwargs)
#
# def __str__(self):
# return self.fqdn
#
# class Minion(MarkdownContent):
# ''' Representation of a Server in Salt '''
#
# fqdn = models.CharField(max_length=255)
# networks = models.ManyToManyField(Network, through='NetworkInterface')
# _data = models.TextField(default='{}')
#
# last_updated = models.DateTimeField()
#
# @property
# def data(self):
# try:
# return json.loads(self._data)
# except ValueError:
# return dict()
#
# @data.setter
# def data(self, value):
# self._data = json.dumps(value)
#
# def update_data(self, value):
# ''' In order to update self.data without 3 lines of code
# self.data.update() wont work!
# '''
# data = self.data
# data.update(value)
# self.data = data
#
# @property
# def user_count(self):
# return len(self.data.get('grains', {}).get('users', []))
#
# @property
# def package_count(self):
# return len(self.data.get('packages', []))
#
# @property
# def network_count(self):
# return len(self.networks.all())
#
# def outdated_package_count(self):
# return len([p for p, v in self.data.get('packages', {}).items() if v['latest_version']])
#
# def fullest_partition_percentage(self):
# try:
# return max([p.get('percent', 0) for p in self.data.get('mounted_devices', [])])
# except (AttributeError, ValueError):
# return 0
#
# def __str__(self):
# return self.fqdn
#
# class Network(MarkdownContent):
# ''' Representation of an Network '''
#
# ipv4 = models.CharField(max_length=15)
# mask = models.CharField(max_length=15)
#
# last_updated = models.DateTimeField()
#
# def __str__(self):
# return self.ipv4
. Output only the next line. | class Meta: |
Next line prediction: <|code_start|> Div(StrictButton('Save', type='submit', css_class='btn-primary'))
)
class Meta:
abstract = True
fields = ['md_content']
class LoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.error_messages.update({
'invalid_login': _(
'Please enter a corrent %(username)s and password. '
'Note that these credentials must be a valid api user!'
)
})
self.fields['username'].widget.attrs.update({'autofocus': 'True'})
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = ''
self.helper.field_class = 'col-lg-12'
self.helper.layout = Layout(
PrependedText('username', '<i class="fa fa-user fa-fw"></i>', placeholder='Username'),
<|code_end|>
. Use current file imports:
(from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext, ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Div, Field, HTML
from crispy_forms.bootstrap import StrictButton, PrependedText
from salt_observer.models import (
Domain, Minion, Network
))
and context including class names, function names, or small code snippets from other files:
# Path: salt_observer/models.py
# class Domain(MarkdownContent):
# ''' Represents a Fully qualified domain name '''
#
# fqdn = models.CharField(max_length=255)
# minion = models.ManyToManyField('Minion', blank=True)
# _ssl_lab_status = models.TextField(default='{}')
#
# can_speak_https = models.BooleanField(help_text='Is there a service listening on port 443')
# public = models.BooleanField(help_text='Is this domain public accessible')
# valid = models.BooleanField()
#
# @property
# def ssl_lab_status(self):
# try:
# return json.loads(self._ssl_lab_status)
# except ValueError:
# return dict()
#
# @ssl_lab_status.setter
# def ssl_lab_status(self, value):
# self._ssl_lab_status = json.dumps(value)
#
# def check_if_valid(self, commit=True):
# try:
# a = requests.get('http://{}/'.format(self.fqdn), timeout=5, verify=False)
# b = requests.get('https://{}/'.format(self.fqdn), timeout=5, verify=False)
# except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
# self.valid = False
# else:
# self.valid = True
#
# if commit:
# self.save()
#
# def minion_count(self):
# return len(self.minion.all())
#
# def worst_grade(self):
# if self.ssl_lab_status.get('grades', []):
# return max([g for g in self.ssl_lab_status.get('grades', [])])
# return '0'
#
# def save(self, *args, **kwargs):
# if not self.id:
# self.check_if_valid(commit=False)
# super().save(*args, **kwargs)
#
# def __str__(self):
# return self.fqdn
#
# class Minion(MarkdownContent):
# ''' Representation of a Server in Salt '''
#
# fqdn = models.CharField(max_length=255)
# networks = models.ManyToManyField(Network, through='NetworkInterface')
# _data = models.TextField(default='{}')
#
# last_updated = models.DateTimeField()
#
# @property
# def data(self):
# try:
# return json.loads(self._data)
# except ValueError:
# return dict()
#
# @data.setter
# def data(self, value):
# self._data = json.dumps(value)
#
# def update_data(self, value):
# ''' In order to update self.data without 3 lines of code
# self.data.update() wont work!
# '''
# data = self.data
# data.update(value)
# self.data = data
#
# @property
# def user_count(self):
# return len(self.data.get('grains', {}).get('users', []))
#
# @property
# def package_count(self):
# return len(self.data.get('packages', []))
#
# @property
# def network_count(self):
# return len(self.networks.all())
#
# def outdated_package_count(self):
# return len([p for p, v in self.data.get('packages', {}).items() if v['latest_version']])
#
# def fullest_partition_percentage(self):
# try:
# return max([p.get('percent', 0) for p in self.data.get('mounted_devices', [])])
# except (AttributeError, ValueError):
# return 0
#
# def __str__(self):
# return self.fqdn
#
# class Network(MarkdownContent):
# ''' Representation of an Network '''
#
# ipv4 = models.CharField(max_length=15)
# mask = models.CharField(max_length=15)
#
# last_updated = models.DateTimeField()
#
# def __str__(self):
# return self.ipv4
. Output only the next line. | PrependedText('password', '<i class="fa fa-unlock-alt fa-fw"></i>', placeholder='Password'), |
Based on the snippet: <|code_start|>
class MarkdownFormMixin(forms.ModelForm):
md_content = forms.CharField(
widget=forms.Textarea(attrs={'class': 'form-control', 'rows': 15, 'autofocus': 'True', 'placeholder': '# Some Markdown'}),
label='',
required=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = ''
self.helper.field_class = 'col-lg-12'
self.helper.layout = Layout(
Div(Field('md_content')),
Div(StrictButton('Save', type='submit', css_class='btn-primary'))
)
class Meta:
<|code_end|>
, predict the immediate next line with the help of imports:
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext, ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Div, Field, HTML
from crispy_forms.bootstrap import StrictButton, PrependedText
from salt_observer.models import (
Domain, Minion, Network
)
and context (classes, functions, sometimes code) from other files:
# Path: salt_observer/models.py
# class Domain(MarkdownContent):
# ''' Represents a Fully qualified domain name '''
#
# fqdn = models.CharField(max_length=255)
# minion = models.ManyToManyField('Minion', blank=True)
# _ssl_lab_status = models.TextField(default='{}')
#
# can_speak_https = models.BooleanField(help_text='Is there a service listening on port 443')
# public = models.BooleanField(help_text='Is this domain public accessible')
# valid = models.BooleanField()
#
# @property
# def ssl_lab_status(self):
# try:
# return json.loads(self._ssl_lab_status)
# except ValueError:
# return dict()
#
# @ssl_lab_status.setter
# def ssl_lab_status(self, value):
# self._ssl_lab_status = json.dumps(value)
#
# def check_if_valid(self, commit=True):
# try:
# a = requests.get('http://{}/'.format(self.fqdn), timeout=5, verify=False)
# b = requests.get('https://{}/'.format(self.fqdn), timeout=5, verify=False)
# except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
# self.valid = False
# else:
# self.valid = True
#
# if commit:
# self.save()
#
# def minion_count(self):
# return len(self.minion.all())
#
# def worst_grade(self):
# if self.ssl_lab_status.get('grades', []):
# return max([g for g in self.ssl_lab_status.get('grades', [])])
# return '0'
#
# def save(self, *args, **kwargs):
# if not self.id:
# self.check_if_valid(commit=False)
# super().save(*args, **kwargs)
#
# def __str__(self):
# return self.fqdn
#
# class Minion(MarkdownContent):
# ''' Representation of a Server in Salt '''
#
# fqdn = models.CharField(max_length=255)
# networks = models.ManyToManyField(Network, through='NetworkInterface')
# _data = models.TextField(default='{}')
#
# last_updated = models.DateTimeField()
#
# @property
# def data(self):
# try:
# return json.loads(self._data)
# except ValueError:
# return dict()
#
# @data.setter
# def data(self, value):
# self._data = json.dumps(value)
#
# def update_data(self, value):
# ''' In order to update self.data without 3 lines of code
# self.data.update() wont work!
# '''
# data = self.data
# data.update(value)
# self.data = data
#
# @property
# def user_count(self):
# return len(self.data.get('grains', {}).get('users', []))
#
# @property
# def package_count(self):
# return len(self.data.get('packages', []))
#
# @property
# def network_count(self):
# return len(self.networks.all())
#
# def outdated_package_count(self):
# return len([p for p, v in self.data.get('packages', {}).items() if v['latest_version']])
#
# def fullest_partition_percentage(self):
# try:
# return max([p.get('percent', 0) for p in self.data.get('mounted_devices', [])])
# except (AttributeError, ValueError):
# return 0
#
# def __str__(self):
# return self.fqdn
#
# class Network(MarkdownContent):
# ''' Representation of an Network '''
#
# ipv4 = models.CharField(max_length=15)
# mask = models.CharField(max_length=15)
#
# last_updated = models.DateTimeField()
#
# def __str__(self):
# return self.ipv4
. Output only the next line. | abstract = True |
Using the snippet: <|code_start|> while self.pc < len(self.code):
op = self.code[self.pc]
if type(op) == ops.BinaryAdd:
x = self.stack.pop()
y = self.stack.pop()
v = obj.LNumber(y.value + x.value)
self.stack.append(v)
elif type(op) == ops.BinarySubtract:
x = self.stack.pop()
y = self.stack.pop()
v = obj.LNumber(y.value - x.value)
self.stack.append(v)
elif type(op) == ops.Call:
func = self.stack.pop()
arg = self.stack.pop()
if type(arg) == obj.LVar:
arg = self.env[arg]
func(arg)
elif type(op) == ops.LoadConst:
self.stack.append(self.consts[op.index])
elif type(op) == ops.LoadName:
lvar = self.vars[op.index]
value = self.env.get(lvar)
if value is None:
value = getattr(builtin, 'lua_' + lvar.value)
<|code_end|>
, determine the next line of code. You have imports:
from luna import objects as obj
from luna.stdlib import builtin
from luna.vm import opcodes as ops
and context (class names, function names, or code) available:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
#
# Path: luna/stdlib/builtin.py
# def lua_print(luavalue):
#
# Path: luna/vm/opcodes.py
# class OpCode(object):
# class BinaryAdd(OpCode):
# class BinarySubtract(OpCode):
# class Call(OpCode):
# class LoadConst(OpCode):
# class LoadName(OpCode):
# class PopBlock(OpCode):
# class SetupDo(OpCode):
# class StoreName(OpCode):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, index):
# def __init__(self, index):
# def __init__(self, index):
. Output only the next line. | self.stack.append(value) |
Using the snippet: <|code_start|> v = obj.LNumber(y.value + x.value)
self.stack.append(v)
elif type(op) == ops.BinarySubtract:
x = self.stack.pop()
y = self.stack.pop()
v = obj.LNumber(y.value - x.value)
self.stack.append(v)
elif type(op) == ops.Call:
func = self.stack.pop()
arg = self.stack.pop()
if type(arg) == obj.LVar:
arg = self.env[arg]
func(arg)
elif type(op) == ops.LoadConst:
self.stack.append(self.consts[op.index])
elif type(op) == ops.LoadName:
lvar = self.vars[op.index]
value = self.env.get(lvar)
if value is None:
value = getattr(builtin, 'lua_' + lvar.value)
self.stack.append(value)
elif type(op) == ops.StoreName:
var = self.vars[op.index]
val = self.stack.pop()
if type(val) == obj.LVar:
<|code_end|>
, determine the next line of code. You have imports:
from luna import objects as obj
from luna.stdlib import builtin
from luna.vm import opcodes as ops
and context (class names, function names, or code) available:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
#
# Path: luna/stdlib/builtin.py
# def lua_print(luavalue):
#
# Path: luna/vm/opcodes.py
# class OpCode(object):
# class BinaryAdd(OpCode):
# class BinarySubtract(OpCode):
# class Call(OpCode):
# class LoadConst(OpCode):
# class LoadName(OpCode):
# class PopBlock(OpCode):
# class SetupDo(OpCode):
# class StoreName(OpCode):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, index):
# def __init__(self, index):
# def __init__(self, index):
. Output only the next line. | val = self.env[val] |
Here is a snippet: <|code_start|> obj.LVar('a'),
]
def test_arith1(compile_expr):
frame = compile_expr('1 - 2')
assert frame.code == [
ops.LoadConst(0),
ops.LoadConst(1),
ops.BinarySubtract(),
]
assert frame.consts == [
obj.LNumber(1.0),
obj.LNumber(2.0),
]
def test_arith2(compile_expr):
frame = compile_expr('a - b')
assert frame.code == [
ops.LoadName(0),
ops.LoadName(1),
ops.BinarySubtract(),
]
assert frame.vars == [
obj.LVar('a'),
obj.LVar('b'),
]
<|code_end|>
. Write the next line using the current file imports:
from luna import objects as obj
from luna.vm import opcodes as ops
and context from other files:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
#
# Path: luna/vm/opcodes.py
# class OpCode(object):
# class BinaryAdd(OpCode):
# class BinarySubtract(OpCode):
# class Call(OpCode):
# class LoadConst(OpCode):
# class LoadName(OpCode):
# class PopBlock(OpCode):
# class SetupDo(OpCode):
# class StoreName(OpCode):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, index):
# def __init__(self, index):
# def __init__(self, index):
, which may include functions, classes, or code. Output only the next line. | def test_arith3(compile_expr): |
Given the following code snippet before the placeholder: <|code_start|>
def test_ass1(compile_stmt):
frame = compile_stmt('a = 1')
assert frame.code == [
ops.LoadConst(0),
ops.StoreName(0),
]
assert frame.consts == [
obj.LNumber(1.0),
]
<|code_end|>
, predict the next line using imports from the current file:
from luna import objects as obj
from luna.vm import opcodes as ops
and context including class names, function names, and sometimes code from other files:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
#
# Path: luna/vm/opcodes.py
# class OpCode(object):
# class BinaryAdd(OpCode):
# class BinarySubtract(OpCode):
# class Call(OpCode):
# class LoadConst(OpCode):
# class LoadName(OpCode):
# class PopBlock(OpCode):
# class SetupDo(OpCode):
# class StoreName(OpCode):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, index):
# def __init__(self, index):
# def __init__(self, index):
. Output only the next line. | assert frame.vars == [ |
Given the code snippet: <|code_start|>
def lua_print(luavalue):
pyvalue = 'nil'
if type(luavalue) == obj.LString:
pyvalue = luavalue.value
elif type(luavalue) == obj.LNumber:
pyvalue = luavalue.value
<|code_end|>
, generate the next line using the imports in this file:
from luna import objects as obj
and context (functions, classes, or occasionally code) from other files:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
. Output only the next line. | if int(pyvalue) == pyvalue: |
Continue the code snippet: <|code_start|> ops.LoadConst(1),
ops.StoreName(),
]
assert frame.consts == [
obj.LVar('a'),
obj.LNumber(1.0),
]
def test_binop1(compile_expr):
frame = compile_expr('1 + 2')
assert frame.code == [
ops.LoadConst(0),
ops.LoadConst(1),
ops.BinaryAdd(),
]
assert frame.consts == [
obj.LNumber(1.0),
obj.LNumber(2.0),
]
def test_call1(compile_stmt):
frame = compile_stmt('print(1)')
assert frame.code == [
ops.LoadConst(0),
ops.LoadConst(1),
ops.Call(),
<|code_end|>
. Use current file imports:
from luna import objects as obj
from luna.vm import opcodes as ops
and context (classes, functions, or code) from other files:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
#
# Path: luna/vm/opcodes.py
# class OpCode(object):
# class BinaryAdd(OpCode):
# class BinarySubtract(OpCode):
# class Call(OpCode):
# class LoadConst(OpCode):
# class LoadName(OpCode):
# class PopBlock(OpCode):
# class SetupDo(OpCode):
# class StoreName(OpCode):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, index):
# def __init__(self, index):
# def __init__(self, index):
. Output only the next line. | ] |
Here is a snippet: <|code_start|>
def test_ass1(compile_stmt):
frame = compile_stmt('a = 1')
assert frame.code == [
ops.LoadConst(0),
ops.LoadConst(1),
ops.StoreName(),
]
assert frame.consts == [
obj.LVar('a'),
obj.LNumber(1.0),
<|code_end|>
. Write the next line using the current file imports:
from luna import objects as obj
from luna.vm import opcodes as ops
and context from other files:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
#
# Path: luna/vm/opcodes.py
# class OpCode(object):
# class BinaryAdd(OpCode):
# class BinarySubtract(OpCode):
# class Call(OpCode):
# class LoadConst(OpCode):
# class LoadName(OpCode):
# class PopBlock(OpCode):
# class SetupDo(OpCode):
# class StoreName(OpCode):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, index):
# def __init__(self, index):
# def __init__(self, index):
, which may include functions, classes, or code. Output only the next line. | ] |
Using the snippet: <|code_start|>
def test_ass1(interp_stmt):
frame = interp_stmt('a = 1')
assert frame.env == {
obj.LVar('a'): obj.LNumber(1.0),
<|code_end|>
, determine the next line of code. You have imports:
from luna import objects as obj
and context (class names, function names, or code) available:
# Path: luna/objects.py
# class LuaValue(object):
# class LBoolean(LuaValue):
# class LNil(LuaValue):
# class LNumber(LuaValue):
# class LString(LuaValue):
# class LVar(LuaValue):
# def __eq__(self, other):
# def __ne__(self, other):
# def __iter__(self):
# def __repr__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
# def __init__(self, value):
# def __hash__(self):
. Output only the next line. | } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.