Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|>
class PatentName(BaseItem):
is_required = True
crawler_id = url_detail.get('crawler_id')
english = ['patent_name', 'invention_name']
chinese = '专利名称'
@classmethod
def parse(cls, raw, item, process=None):
if process is not None:
patent_name = process.get('abstractInfoDTO').get('tioIndex').get('value')
item.patent_name = ResultItem(title=cls.title, value=str(patent_name))
return item
class Abstract(BaseItem):
crawler_id = url_detail.get('crawler_id')
english = 'abstract'
chinese = '摘要'
@classmethod
def parse(cls, raw, item, process=None):
if process is not None:
abstract = BeautifulSoup(process.get('abstractInfoDTO').get('abIndexList')[0].get('value'),
'lxml').text.replace('\n', '').strip()
item.abstract = ResultItem(title=cls.title, value=abstract)
return item
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from bs4 import BeautifulSoup
from controller.url_config import url_search, url_detail, url_related_info, url_full_text
from crawler.items import DataItem
from entity.crawler_item import BaseItem, ResultItem
and context:
# Path: controller/url_config.py
#
# Path: crawler/items.py
# class DataItem:
#
# def __repr__(self):
# return str(self.__dict__)
#
# Path: entity/crawler_item.py
# class BaseItem:
# # 是否必须参数,即无论用户在config.ini如何配置都会进行采集记录
# is_required = False
# # 在url_config.py对应的url标志
# crawler_id = -1
# # 采集结果对应的中文名称
# chinese = None
# # 采集结果对应的英文名称
# english = None
# # 字段标题
# title = None
# # 隶属表名称
# table_name = 'main'
# # 字段名称
# field_names = None
#
# @classmethod
# def set_title(cls, title):
# if cls.title is None:
# cls.title = title
#
# @classmethod
# def get_chinese(cls):
# """
# 如果中文为字符串,则直接返回,若为list则返回第一个元素
# :return:
# """
# if isinstance(cls.chinese, str):
# return cls.chinese
# elif isinstance(cls.chinese, list):
# return str(cls.chinese[0])
#
# @classmethod
# def check_chinese(cls, chinese):
# if isinstance(cls.chinese, str):
# if chinese == cls.chinese:
# return True
# else:
# return False
# elif isinstance(cls.chinese, list):
# if chinese in cls.chinese:
# cls.chinese = chinese
# return True
# else:
# return False
#
# @classmethod
# def check_english(cls, english):
# if isinstance(cls.english, str):
# if english == cls.english:
# return True
# else:
# return False
# elif isinstance(cls.english, list):
# if english in cls.english:
# cls.english = english
# return True
# else:
# return False
#
# @classmethod
# def get_english(cls):
# """
# 如果英文为字符串,则直接返回,若为list则返回第一个元素
# :return:
# """
# if isinstance(cls.english, str):
# return cls.english
# elif isinstance(cls.english, list):
# return str(cls.english[0])
#
# @classmethod
# def parse(cls, raw, item, process=None):
# """
# 数据解析函数
# :param raw: 原生内容
# :param item: scarpy采集的item
# :param process: 如果非json则传递BeautifulSoup对象
# :return:
# """
# pass
#
# class ResultItem:
# """
# 储存采集结果的对象
# """
# def __init__(self, table='main', title=None, value=None):
# self.table = table
# self.title = title
# self.value = value
#
# def __repr__(self):
# return str(self.__dict__)
which might include code, classes, or functions. Output only the next line. | def push_item(json_list, item: DataItem, title, name): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""
Created on 2018/3/14
@author: will4906
采集的内容、方式定义
"""
<|code_end|>
. Use current file imports:
from bs4 import BeautifulSoup
from controller.url_config import url_search, url_detail, url_related_info, url_full_text
from crawler.items import DataItem
from entity.crawler_item import BaseItem, ResultItem
and context (classes, functions, or code) from other files:
# Path: controller/url_config.py
#
# Path: crawler/items.py
# class DataItem:
#
# def __repr__(self):
# return str(self.__dict__)
#
# Path: entity/crawler_item.py
# class BaseItem:
# # 是否必须参数,即无论用户在config.ini如何配置都会进行采集记录
# is_required = False
# # 在url_config.py对应的url标志
# crawler_id = -1
# # 采集结果对应的中文名称
# chinese = None
# # 采集结果对应的英文名称
# english = None
# # 字段标题
# title = None
# # 隶属表名称
# table_name = 'main'
# # 字段名称
# field_names = None
#
# @classmethod
# def set_title(cls, title):
# if cls.title is None:
# cls.title = title
#
# @classmethod
# def get_chinese(cls):
# """
# 如果中文为字符串,则直接返回,若为list则返回第一个元素
# :return:
# """
# if isinstance(cls.chinese, str):
# return cls.chinese
# elif isinstance(cls.chinese, list):
# return str(cls.chinese[0])
#
# @classmethod
# def check_chinese(cls, chinese):
# if isinstance(cls.chinese, str):
# if chinese == cls.chinese:
# return True
# else:
# return False
# elif isinstance(cls.chinese, list):
# if chinese in cls.chinese:
# cls.chinese = chinese
# return True
# else:
# return False
#
# @classmethod
# def check_english(cls, english):
# if isinstance(cls.english, str):
# if english == cls.english:
# return True
# else:
# return False
# elif isinstance(cls.english, list):
# if english in cls.english:
# cls.english = english
# return True
# else:
# return False
#
# @classmethod
# def get_english(cls):
# """
# 如果英文为字符串,则直接返回,若为list则返回第一个元素
# :return:
# """
# if isinstance(cls.english, str):
# return cls.english
# elif isinstance(cls.english, list):
# return str(cls.english[0])
#
# @classmethod
# def parse(cls, raw, item, process=None):
# """
# 数据解析函数
# :param raw: 原生内容
# :param item: scarpy采集的item
# :param process: 如果非json则传递BeautifulSoup对象
# :return:
# """
# pass
#
# class ResultItem:
# """
# 储存采集结果的对象
# """
# def __init__(self, table='main', title=None, value=None):
# self.table = table
# self.title = title
# self.value = value
#
# def __repr__(self):
# return str(self.__dict__)
. Output only the next line. | class PatentId(BaseItem): |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
"""
Created on 2018/3/14
@author: will4906
采集的内容、方式定义
"""
class PatentId(BaseItem):
is_required = True
crawler_id = url_search.get('crawler_id')
english = 'patent_id'
chinese = ['专利标志', '专利id', '专利ID', '专利Id']
@classmethod
def parse(cls, raw, item, process=None):
if process is not None:
patent_id = process.find(attrs={'name': 'idHidden'}).get('value')
<|code_end|>
using the current file's imports:
from bs4 import BeautifulSoup
from controller.url_config import url_search, url_detail, url_related_info, url_full_text
from crawler.items import DataItem
from entity.crawler_item import BaseItem, ResultItem
and any relevant context from other files:
# Path: controller/url_config.py
#
# Path: crawler/items.py
# class DataItem:
#
# def __repr__(self):
# return str(self.__dict__)
#
# Path: entity/crawler_item.py
# class BaseItem:
# # 是否必须参数,即无论用户在config.ini如何配置都会进行采集记录
# is_required = False
# # 在url_config.py对应的url标志
# crawler_id = -1
# # 采集结果对应的中文名称
# chinese = None
# # 采集结果对应的英文名称
# english = None
# # 字段标题
# title = None
# # 隶属表名称
# table_name = 'main'
# # 字段名称
# field_names = None
#
# @classmethod
# def set_title(cls, title):
# if cls.title is None:
# cls.title = title
#
# @classmethod
# def get_chinese(cls):
# """
# 如果中文为字符串,则直接返回,若为list则返回第一个元素
# :return:
# """
# if isinstance(cls.chinese, str):
# return cls.chinese
# elif isinstance(cls.chinese, list):
# return str(cls.chinese[0])
#
# @classmethod
# def check_chinese(cls, chinese):
# if isinstance(cls.chinese, str):
# if chinese == cls.chinese:
# return True
# else:
# return False
# elif isinstance(cls.chinese, list):
# if chinese in cls.chinese:
# cls.chinese = chinese
# return True
# else:
# return False
#
# @classmethod
# def check_english(cls, english):
# if isinstance(cls.english, str):
# if english == cls.english:
# return True
# else:
# return False
# elif isinstance(cls.english, list):
# if english in cls.english:
# cls.english = english
# return True
# else:
# return False
#
# @classmethod
# def get_english(cls):
# """
# 如果英文为字符串,则直接返回,若为list则返回第一个元素
# :return:
# """
# if isinstance(cls.english, str):
# return cls.english
# elif isinstance(cls.english, list):
# return str(cls.english[0])
#
# @classmethod
# def parse(cls, raw, item, process=None):
# """
# 数据解析函数
# :param raw: 原生内容
# :param item: scarpy采集的item
# :param process: 如果非json则传递BeautifulSoup对象
# :return:
# """
# pass
#
# class ResultItem:
# """
# 储存采集结果的对象
# """
# def __init__(self, table='main', title=None, value=None):
# self.table = table
# self.title = title
# self.value = value
#
# def __repr__(self):
# return str(self.__dict__)
. Output only the next line. | item.patent_id = ResultItem(title=cls.title, value=str(patent_id)) |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
"""
Created on 2017/3/19
@author: will4906
基础路径模块
以下地址、文件名可根据用户使用自行修改,工程所有地址将会采用。
"""
"""
路径设置
"""
# 工程根目录,注意此处以初次调用这个变量的元素为准,工程起始目录定位在main,若有修改请注意这个位置
BASE_PATH = os.path.split(os.path.split(__file__)[0])[0]
# 输出目录
OUTPUT_PATH = os.path.join(BASE_PATH, 'output')
# 输出分组,默认按年月日_时分秒分组
<|code_end|>
with the help of current file imports:
import configparser
import os
import click
from util.TimeUtil import TimeUtil
and context from other files:
# Path: util/TimeUtil.py
# class TimeUtil:
#
# @staticmethod
# def getFormatTime(strFormat):
# return time.strftime(strFormat,time.localtime(time.time()))
, which may contain function names, class names, or code. Output only the next line. | OUTPUT_GROUP_PATH = os.path.join(OUTPUT_PATH, TimeUtil.getFormatTime('%Y%m%d_%H%M%S')) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""
Created on 2018/2/25
@author: will4906
代理模块
程序的代理决定使用https://github.com/jhao104/proxy_pool的代理池作为代理方式,
开发者可以修改下方get_proxy函数进行自定义
"""
logger = Logger(__name__)
def notify_ip_address():
"""
通知专利网我们的ip地址,
这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
:return:
"""
<|code_end|>
. Use current file imports:
(import json
import controller as ctrl
import requests
from logbook import Logger
from requests.exceptions import RequestException, ReadTimeout
from config import base_settings as bs
from controller.url_config import url_pre_execute, url_index)
and context including class names, function names, or small code snippets from other files:
# Path: config/base_settings.py
# BASE_PATH = os.path.split(os.path.split(__file__)[0])[0]
# OUTPUT_PATH = os.path.join(BASE_PATH, 'output')
# OUTPUT_GROUP_PATH = os.path.join(OUTPUT_PATH, TimeUtil.getFormatTime('%Y%m%d_%H%M%S'))
# DATABASE_NAME = os.path.join(OUTPUT_GROUP_PATH, 'Patent.db')
# EXCEL_NAME = os.path.join(OUTPUT_GROUP_PATH, '专利.xlsx')
# CHARTS_NAME = os.path.join(OUTPUT_GROUP_PATH, 'charts.html')
# LOG_FILENAME = os.path.join(OUTPUT_GROUP_PATH, "PatentCrawler.log")
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
# AD_PATH = os.path.join(BASE_PATH, 'res', 'advertisement', 'ad.html')
# USE_PROXY = False
# PROXY_URL = 'http://127.0.0.1:5010/get'
# TIMEOUT = 10
# DOWNLOAD_DELAY = 1
# OUTPUT_ITEMS = ['data', 'log', 'chart']
# USE_PROXY = use_proxy
# PROXY_URL = proxy_url
# TIMEOUT = timeout
# DOWNLOAD_DELAY = delay
# OUTPUT_ITEMS = output_items
# def check_proxy(cfg):
# def check_request(cfg: configparser.ConfigParser):
# def check_output(cfg):
#
# Path: controller/url_config.py
. Output only the next line. | resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
Created on 2018/2/25
@author: will4906
代理模块
程序的代理决定使用https://github.com/jhao104/proxy_pool的代理池作为代理方式,
开发者可以修改下方get_proxy函数进行自定义
"""
logger = Logger(__name__)
def notify_ip_address():
"""
通知专利网我们的ip地址,
这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
:return:
"""
<|code_end|>
, determine the next line of code. You have imports:
import json
import controller as ctrl
import requests
from logbook import Logger
from requests.exceptions import RequestException, ReadTimeout
from config import base_settings as bs
from controller.url_config import url_pre_execute, url_index
and context (class names, function names, or code) available:
# Path: config/base_settings.py
# BASE_PATH = os.path.split(os.path.split(__file__)[0])[0]
# OUTPUT_PATH = os.path.join(BASE_PATH, 'output')
# OUTPUT_GROUP_PATH = os.path.join(OUTPUT_PATH, TimeUtil.getFormatTime('%Y%m%d_%H%M%S'))
# DATABASE_NAME = os.path.join(OUTPUT_GROUP_PATH, 'Patent.db')
# EXCEL_NAME = os.path.join(OUTPUT_GROUP_PATH, '专利.xlsx')
# CHARTS_NAME = os.path.join(OUTPUT_GROUP_PATH, 'charts.html')
# LOG_FILENAME = os.path.join(OUTPUT_GROUP_PATH, "PatentCrawler.log")
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
# AD_PATH = os.path.join(BASE_PATH, 'res', 'advertisement', 'ad.html')
# USE_PROXY = False
# PROXY_URL = 'http://127.0.0.1:5010/get'
# TIMEOUT = 10
# DOWNLOAD_DELAY = 1
# OUTPUT_ITEMS = ['data', 'log', 'chart']
# USE_PROXY = use_proxy
# PROXY_URL = proxy_url
# TIMEOUT = timeout
# DOWNLOAD_DELAY = delay
# OUTPUT_ITEMS = output_items
# def check_proxy(cfg):
# def check_request(cfg: configparser.ConfigParser):
# def check_output(cfg):
#
# Path: controller/url_config.py
. Output only the next line. | resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES) |
Using the snippet: <|code_start|> return None
def update_proxy():
"""
获取并校验代理ip地址
:return:
"""
if bs.USE_PROXY:
i = 0
while True:
try:
get_proxy()
notify_ip_address()
return True
except Exception:
i += 1
logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
else:
logger.info('notify address')
notify_ip_address()
def update_cookies(cookies=None):
"""
更新或获取cookies
:param cookies:
:return:
"""
if cookies is None:
<|code_end|>
, determine the next line of code. You have imports:
import json
import controller as ctrl
import requests
from logbook import Logger
from requests.exceptions import RequestException, ReadTimeout
from config import base_settings as bs
from controller.url_config import url_pre_execute, url_index
and context (class names, function names, or code) available:
# Path: config/base_settings.py
# BASE_PATH = os.path.split(os.path.split(__file__)[0])[0]
# OUTPUT_PATH = os.path.join(BASE_PATH, 'output')
# OUTPUT_GROUP_PATH = os.path.join(OUTPUT_PATH, TimeUtil.getFormatTime('%Y%m%d_%H%M%S'))
# DATABASE_NAME = os.path.join(OUTPUT_GROUP_PATH, 'Patent.db')
# EXCEL_NAME = os.path.join(OUTPUT_GROUP_PATH, '专利.xlsx')
# CHARTS_NAME = os.path.join(OUTPUT_GROUP_PATH, 'charts.html')
# LOG_FILENAME = os.path.join(OUTPUT_GROUP_PATH, "PatentCrawler.log")
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
# AD_PATH = os.path.join(BASE_PATH, 'res', 'advertisement', 'ad.html')
# USE_PROXY = False
# PROXY_URL = 'http://127.0.0.1:5010/get'
# TIMEOUT = 10
# DOWNLOAD_DELAY = 1
# OUTPUT_ITEMS = ['data', 'log', 'chart']
# USE_PROXY = use_proxy
# PROXY_URL = proxy_url
# TIMEOUT = timeout
# DOWNLOAD_DELAY = delay
# OUTPUT_ITEMS = output_items
# def check_proxy(cfg):
# def check_request(cfg: configparser.ConfigParser):
# def check_output(cfg):
#
# Path: controller/url_config.py
. Output only the next line. | ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
logger = Logger(__name__)
class PatentMiddleware(RetryMiddleware):
def process_request(self, request, spider):
<|code_end|>
. Write the next line using the current file imports:
import time
import controller as ctrl
from logbook import Logger
from requests.utils import dict_from_cookiejar
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from config.base_settings import USE_PROXY
from service.account import login
and context from other files:
# Path: config/base_settings.py
# USE_PROXY = False
#
# Path: service/account.py
# def login(username=None, password=None):
# """
# 登录API
# :return: True: 登录成功; False: 登录失败
# """
# if username is None or password is None:
# username = account.username
# password = account.password
# ctrl.BEING_LOG = True
# if check_login_status():
# ctrl.BEING_LOG = False
# return True
#
# error_times = 0
# while True:
# try:
# # logger.debug("before proxy")
# update_proxy()
# # logger.debug("before cookie")
# update_cookies()
# # logger.debug("after cookie")
# busername = change_to_base64(username)
# bpassword = change_to_base64(password)
# captcha = get_captcha()
# logger.info('验证码识别结果:%s' % captcha)
# form_data = url_login.get('form_data')
# form_data.__setitem__('j_validation_code', captcha)
# form_data.__setitem__('j_username', busername)
# form_data.__setitem__('j_password', bpassword)
#
# resp = requests.post(url=url_login.get('url'), headers=url_login.get('headers'), data=form_data,
# cookies=ctrl.COOKIES, proxies=ctrl.PROXIES, timeout=TIMEOUT)
# if resp.text.find(username + ',欢迎访问') != -1:
# # 网站调整了逻辑,下面这句不用了
# # print(resp.cookies)
# # ctrl.COOKIES.__delitem__('IS_LOGIN')
# # ctrl.COOKIES.set('IS_LOGIN', 'true', domain='www.pss-system.gov.cn/sipopublicsearch/patentsearch')
# jsession = ctrl.COOKIES.get('JSESSIONID')
# resp.cookies.__delitem__('JSESSIONID')
# resp.cookies.set('JSESSIONID', jsession, domain='www.pss-system.gov.cn')
# update_cookies(resp.cookies)
# requests.post(
# 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml',
# cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
# ctrl.BEING_LOG = False
# logger.info('登录成功')
# return True
# else:
# if error_times > 5:
# break
# logger.error('登录失败')
# error_times += 1
# except Exception as e:
# logger.error(e)
#
# ctrl.BEING_LOG = False
# return False
, which may include functions, classes, or code. Output only the next line. | if USE_PROXY and ctrl.PROXIES is not None: |
Given the following code snippet before the placeholder: <|code_start|># Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
logger = Logger(__name__)
class PatentMiddleware(RetryMiddleware):
def process_request(self, request, spider):
if USE_PROXY and ctrl.PROXIES is not None:
request.meta['proxy'] = "http://%s" % (ctrl.PROXIES.get('http'))
if ctrl.COOKIES is not None:
request.cookies = dict_from_cookiejar(ctrl.COOKIES)
def process_response(self, request, response, spider):
body = response.body_as_unicode()
# logger.info(body.find('window.location.href = contextPath +"/portal/uilogin-forwardLogin.shtml";'))
# logger.info(body.find('访问受限'))
# logger.info(response.status)
if response.status == 404 or response.status == 417:
pass
# logger.info(body)
if body.find('window.location.href = contextPath +"/portal/uilogin-forwardLogin.shtml";') != -1 or body.find(
'访问受限') != -1 or response.status == 404:
logger.info('未登录,登陆中,请稍后···')
login_ok = False
if ctrl.BEING_LOG is False:
<|code_end|>
, predict the next line using imports from the current file:
import time
import controller as ctrl
from logbook import Logger
from requests.utils import dict_from_cookiejar
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from config.base_settings import USE_PROXY
from service.account import login
and context including class names, function names, and sometimes code from other files:
# Path: config/base_settings.py
# USE_PROXY = False
#
# Path: service/account.py
# def login(username=None, password=None):
# """
# 登录API
# :return: True: 登录成功; False: 登录失败
# """
# if username is None or password is None:
# username = account.username
# password = account.password
# ctrl.BEING_LOG = True
# if check_login_status():
# ctrl.BEING_LOG = False
# return True
#
# error_times = 0
# while True:
# try:
# # logger.debug("before proxy")
# update_proxy()
# # logger.debug("before cookie")
# update_cookies()
# # logger.debug("after cookie")
# busername = change_to_base64(username)
# bpassword = change_to_base64(password)
# captcha = get_captcha()
# logger.info('验证码识别结果:%s' % captcha)
# form_data = url_login.get('form_data')
# form_data.__setitem__('j_validation_code', captcha)
# form_data.__setitem__('j_username', busername)
# form_data.__setitem__('j_password', bpassword)
#
# resp = requests.post(url=url_login.get('url'), headers=url_login.get('headers'), data=form_data,
# cookies=ctrl.COOKIES, proxies=ctrl.PROXIES, timeout=TIMEOUT)
# if resp.text.find(username + ',欢迎访问') != -1:
# # 网站调整了逻辑,下面这句不用了
# # print(resp.cookies)
# # ctrl.COOKIES.__delitem__('IS_LOGIN')
# # ctrl.COOKIES.set('IS_LOGIN', 'true', domain='www.pss-system.gov.cn/sipopublicsearch/patentsearch')
# jsession = ctrl.COOKIES.get('JSESSIONID')
# resp.cookies.__delitem__('JSESSIONID')
# resp.cookies.set('JSESSIONID', jsession, domain='www.pss-system.gov.cn')
# update_cookies(resp.cookies)
# requests.post(
# 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml',
# cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
# ctrl.BEING_LOG = False
# logger.info('登录成功')
# return True
# else:
# if error_times > 5:
# break
# logger.error('登录失败')
# error_times += 1
# except Exception as e:
# logger.error(e)
#
# ctrl.BEING_LOG = False
# return False
. Output only the next line. | login_ok = login() |
Here is a snippet: <|code_start|>
def process_item(self, item, spider):
data = item.get('data').__dict__
table_dict = {}
logger.info('采集内容:%s' % str(data))
for name, fields in info.data_table.items():
table_list = [[]]
for key, value in data.items():
if not isinstance(value, list):
if value.table == name or value.title in info.required_list:
for t in table_list:
t.append(value.value)
else:
old = copy.deepcopy(table_list[0])
for list_index, value_list in enumerate(value):
if not isinstance(value_list, list):
value_list = [value_list]
for vi, va in enumerate(value_list):
part_list = []
for v in va:
if v.table == name or v.title in info.required_list:
part_list.append(v.value)
if len(part_list) == 0:
break
if list_index > 0:
table_list.append(old)
table_list[-1] = old + part_list
table_dict.__setitem__(name, table_list)
<|code_end|>
. Write the next line using the current file imports:
import copy
import sqlite3
from logbook import Logger
from config import base_settings as bs
from entity.models import Patents
from service import info
and context from other files:
# Path: config/base_settings.py
# BASE_PATH = os.path.split(os.path.split(__file__)[0])[0]
# OUTPUT_PATH = os.path.join(BASE_PATH, 'output')
# OUTPUT_GROUP_PATH = os.path.join(OUTPUT_PATH, TimeUtil.getFormatTime('%Y%m%d_%H%M%S'))
# DATABASE_NAME = os.path.join(OUTPUT_GROUP_PATH, 'Patent.db')
# EXCEL_NAME = os.path.join(OUTPUT_GROUP_PATH, '专利.xlsx')
# CHARTS_NAME = os.path.join(OUTPUT_GROUP_PATH, 'charts.html')
# LOG_FILENAME = os.path.join(OUTPUT_GROUP_PATH, "PatentCrawler.log")
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
# AD_PATH = os.path.join(BASE_PATH, 'res', 'advertisement', 'ad.html')
# USE_PROXY = False
# PROXY_URL = 'http://127.0.0.1:5010/get'
# TIMEOUT = 10
# DOWNLOAD_DELAY = 1
# OUTPUT_ITEMS = ['data', 'log', 'chart']
# USE_PROXY = use_proxy
# PROXY_URL = proxy_url
# TIMEOUT = timeout
# DOWNLOAD_DELAY = delay
# OUTPUT_ITEMS = output_items
# def check_proxy(cfg):
# def check_request(cfg: configparser.ConfigParser):
# def check_output(cfg):
#
# Path: entity/models.py
# class Patents(BaseModel):
#
# # 行号
# row_id = PrimaryKeyField()
# # 专利id
# patent_id = CharField(unique=True)
# # 申请号
# request_number = CharField()
# # 申请日
# request_date = DateField()
# # 公开(公告)号
# publish_number = CharField()
# # 公开(公告)日
# publish_date = DateField()
# # 发明名称
# invention_name = CharField()
# # 申请(专利权)人
# proposer = CharField()
# # 发明人
# inventor = CharField()
# # 法律状态
# legal_status = CharField(null=True)
# # 法律状态生效日期
# legal_status_effective_date = DateField(null=True)
# # 摘要
# abstract = TextField(null=True)
# # IPC分类号
# ipc_class_number = CharField(null=True)
# # 优先权号
# priority_number = CharField(null=True)
# # 优先权日
# priority_date = DateField(null=True)
# # 外观设计洛迦诺分类号
# locarno_class_number = CharField(null=True)
# # 代理人
# agent = CharField(null=True)
# # 代理机构
# agency = CharField(null=True)
# # 申请人邮编
# proposer_post_code = CharField(null=True)
# # 申请人地址
# proposer_address = CharField(null=True)
# # 申请人所在国(省)
# proposer_location = CharField(null=True)
# # 发明类型
# invention_type = CharField(null=True)
# # 公开国
# publish_country = CharField(null=True)
# # 权利要求
# claim = CharField(null=True)
# # 说明书
# instructions = TextField(null=True)
# # FT分类号
# FT_class_number = CharField(null=True)
# # UC分类号
# UC_class_number = CharField(null=True)
# # ECLA分类号
# ECLA_class_number = CharField(null=True)
# # FI分类号
# FI_class_number = CharField(null=True)
# # CPC分类号
# CPC_class_number = CharField(null=True)
#
# Path: service/info.py
# def init_crawler(cfg: configparser.ConfigParser):
# def gen_crawler_list(content_list):
# def push_crawler_list():
# def create_tables():
, which may include functions, classes, or code. Output only the next line. | conn = sqlite3.connect(bs.DATABASE_NAME) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
logger = Logger(__name__)
class CrawlerPipeline(object):
LINE_INDEX = 1
def process_item(self, item, spider):
data = item.get('data').__dict__
table_dict = {}
<|code_end|>
, predict the next line using imports from the current file:
import copy
import sqlite3
from logbook import Logger
from config import base_settings as bs
from entity.models import Patents
from service import info
and context including class names, function names, and sometimes code from other files:
# Path: config/base_settings.py
# BASE_PATH = os.path.split(os.path.split(__file__)[0])[0]
# OUTPUT_PATH = os.path.join(BASE_PATH, 'output')
# OUTPUT_GROUP_PATH = os.path.join(OUTPUT_PATH, TimeUtil.getFormatTime('%Y%m%d_%H%M%S'))
# DATABASE_NAME = os.path.join(OUTPUT_GROUP_PATH, 'Patent.db')
# EXCEL_NAME = os.path.join(OUTPUT_GROUP_PATH, '专利.xlsx')
# CHARTS_NAME = os.path.join(OUTPUT_GROUP_PATH, 'charts.html')
# LOG_FILENAME = os.path.join(OUTPUT_GROUP_PATH, "PatentCrawler.log")
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
# AD_PATH = os.path.join(BASE_PATH, 'res', 'advertisement', 'ad.html')
# USE_PROXY = False
# PROXY_URL = 'http://127.0.0.1:5010/get'
# TIMEOUT = 10
# DOWNLOAD_DELAY = 1
# OUTPUT_ITEMS = ['data', 'log', 'chart']
# USE_PROXY = use_proxy
# PROXY_URL = proxy_url
# TIMEOUT = timeout
# DOWNLOAD_DELAY = delay
# OUTPUT_ITEMS = output_items
# def check_proxy(cfg):
# def check_request(cfg: configparser.ConfigParser):
# def check_output(cfg):
#
# Path: entity/models.py
# class Patents(BaseModel):
#
# # 行号
# row_id = PrimaryKeyField()
# # 专利id
# patent_id = CharField(unique=True)
# # 申请号
# request_number = CharField()
# # 申请日
# request_date = DateField()
# # 公开(公告)号
# publish_number = CharField()
# # 公开(公告)日
# publish_date = DateField()
# # 发明名称
# invention_name = CharField()
# # 申请(专利权)人
# proposer = CharField()
# # 发明人
# inventor = CharField()
# # 法律状态
# legal_status = CharField(null=True)
# # 法律状态生效日期
# legal_status_effective_date = DateField(null=True)
# # 摘要
# abstract = TextField(null=True)
# # IPC分类号
# ipc_class_number = CharField(null=True)
# # 优先权号
# priority_number = CharField(null=True)
# # 优先权日
# priority_date = DateField(null=True)
# # 外观设计洛迦诺分类号
# locarno_class_number = CharField(null=True)
# # 代理人
# agent = CharField(null=True)
# # 代理机构
# agency = CharField(null=True)
# # 申请人邮编
# proposer_post_code = CharField(null=True)
# # 申请人地址
# proposer_address = CharField(null=True)
# # 申请人所在国(省)
# proposer_location = CharField(null=True)
# # 发明类型
# invention_type = CharField(null=True)
# # 公开国
# publish_country = CharField(null=True)
# # 权利要求
# claim = CharField(null=True)
# # 说明书
# instructions = TextField(null=True)
# # FT分类号
# FT_class_number = CharField(null=True)
# # UC分类号
# UC_class_number = CharField(null=True)
# # ECLA分类号
# ECLA_class_number = CharField(null=True)
# # FI分类号
# FI_class_number = CharField(null=True)
# # CPC分类号
# CPC_class_number = CharField(null=True)
#
# Path: service/info.py
# def init_crawler(cfg: configparser.ConfigParser):
# def gen_crawler_list(content_list):
# def push_crawler_list():
# def create_tables():
. Output only the next line. | logger.info('采集内容:%s' % str(data)) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
"""
Created on 2018/2/27
@author: will4906
"""
def resolve_data(item, title, itemvalue):
<|code_end|>
, determine the next line of code. You have imports:
from entity.query_item import title_define
and context (class names, function names, or code) available:
# Path: entity/query_item.py
# def handle_item_group(item_group):
# def handle_number(title, request_number):
# def handle_date_element(title, date_element):
# def handle_invention_type(title, invention_type):
# def default_handle(title, default):
# def find_element_in_item_group(element, item_group):
# def __init__(self, select='=', date='2001-01-01', enddate=None):
# def __repr__(self):
# def __str__(self):
# def __init__(self, And=None, Or=None, Not=None):
# def add_or(self, *parm):
# def __repr__(self):
# def __init__(self, *parm):
# def add_parm(self, *ps):
# def __repr__(self):
# def __init__(self, *parm):
# def add_parm(self, *ps):
# def __repr__(self):
# def __init__(self, *parm):
# def __repr__(self):
# def __init__(self, **kwargs):
# def __prepare_item(self, items):
# def __check_target_parm(self, parm):
# def __repr__(self):
# AND = ' AND '
# OR = ' OR '
# NOT = ' NOT '
# OR = ' OR '
# class DateSelect:
# class ItemGroup:
# class And:
# class Or:
# class Not:
# class SipoItem:
. Output only the next line. | for key, value in title_define.items(): |
Predict the next line for this snippet: <|code_start|> while True:
try:
password = click.prompt('密码出错,请填写')
self.password = password
break
except:
pass
# 账户信息的单例
account = Account()
def change_to_base64(source):
"""
将参数进行base64加密
:param source:
:return:
"""
return str(base64.b64encode(bytes(source, encoding='utf-8')), 'utf-8')
def get_captcha():
"""
获取验证码
:return:
"""
resp = requests.get(url=url_captcha.get('url'), cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
with open('captcha.png', 'wb') as f:
f.write(resp.content)
<|code_end|>
with the help of current file imports:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context from other files:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
, which may contain function names, class names, or code. Output only the next line. | result = get_captcha_result(CAPTCHA_MODEL_NAME, 'captcha.png') |
Given the following code snippet before the placeholder: <|code_start|> """
登录API
:return: True: 登录成功; False: 登录失败
"""
if username is None or password is None:
username = account.username
password = account.password
ctrl.BEING_LOG = True
if check_login_status():
ctrl.BEING_LOG = False
return True
error_times = 0
while True:
try:
# logger.debug("before proxy")
update_proxy()
# logger.debug("before cookie")
update_cookies()
# logger.debug("after cookie")
busername = change_to_base64(username)
bpassword = change_to_base64(password)
captcha = get_captcha()
logger.info('验证码识别结果:%s' % captcha)
form_data = url_login.get('form_data')
form_data.__setitem__('j_validation_code', captcha)
form_data.__setitem__('j_username', busername)
form_data.__setitem__('j_password', bpassword)
resp = requests.post(url=url_login.get('url'), headers=url_login.get('headers'), data=form_data,
<|code_end|>
, predict the next line using imports from the current file:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context including class names, function names, and sometimes code from other files:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | cookies=ctrl.COOKIES, proxies=ctrl.PROXIES, timeout=TIMEOUT) |
Using the snippet: <|code_start|> except:
pass
# 账户信息的单例
account = Account()
def change_to_base64(source):
"""
将参数进行base64加密
:param source:
:return:
"""
return str(base64.b64encode(bytes(source, encoding='utf-8')), 'utf-8')
def get_captcha():
"""
获取验证码
:return:
"""
resp = requests.get(url=url_captcha.get('url'), cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
with open('captcha.png', 'wb') as f:
f.write(resp.content)
result = get_captcha_result(CAPTCHA_MODEL_NAME, 'captcha.png')
return result
def check_login_status():
<|code_end|>
, determine the next line of code. You have imports:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context (class names, function names, or code) available:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | if USE_PROXY: |
Continue the code snippet: <|code_start|> password = cfg.get('account', 'password')
self.password = password
except:
while True:
try:
password = click.prompt('密码出错,请填写')
self.password = password
break
except:
pass
# 账户信息的单例
account = Account()
def change_to_base64(source):
"""
将参数进行base64加密
:param source:
:return:
"""
return str(base64.b64encode(bytes(source, encoding='utf-8')), 'utf-8')
def get_captcha():
"""
获取验证码
:return:
"""
<|code_end|>
. Use current file imports:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context (classes, functions, or code) from other files:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | resp = requests.get(url=url_captcha.get('url'), cookies=ctrl.COOKIES, proxies=ctrl.PROXIES) |
Using the snippet: <|code_start|> except:
pass
return False
def login(username=None, password=None):
"""
登录API
:return: True: 登录成功; False: 登录失败
"""
if username is None or password is None:
username = account.username
password = account.password
ctrl.BEING_LOG = True
if check_login_status():
ctrl.BEING_LOG = False
return True
error_times = 0
while True:
try:
# logger.debug("before proxy")
update_proxy()
# logger.debug("before cookie")
update_cookies()
# logger.debug("after cookie")
busername = change_to_base64(username)
bpassword = change_to_base64(password)
captcha = get_captcha()
logger.info('验证码识别结果:%s' % captcha)
<|code_end|>
, determine the next line of code. You have imports:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context (class names, function names, or code) available:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | form_data = url_login.get('form_data') |
Given the following code snippet before the placeholder: <|code_start|>
def check_login_status():
if USE_PROXY:
try:
if ctrl.PROXIES is not None:
notify_ip_address()
logger.info('当前已有登录状态')
return True
except:
pass
return False
def login(username=None, password=None):
"""
登录API
:return: True: 登录成功; False: 登录失败
"""
if username is None or password is None:
username = account.username
password = account.password
ctrl.BEING_LOG = True
if check_login_status():
ctrl.BEING_LOG = False
return True
error_times = 0
while True:
try:
# logger.debug("before proxy")
<|code_end|>
, predict the next line using imports from the current file:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context including class names, function names, and sometimes code from other files:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | update_proxy() |
Using the snippet: <|code_start|>
# 账户信息的单例
account = Account()
def change_to_base64(source):
"""
将参数进行base64加密
:param source:
:return:
"""
return str(base64.b64encode(bytes(source, encoding='utf-8')), 'utf-8')
def get_captcha():
"""
获取验证码
:return:
"""
resp = requests.get(url=url_captcha.get('url'), cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
with open('captcha.png', 'wb') as f:
f.write(resp.content)
result = get_captcha_result(CAPTCHA_MODEL_NAME, 'captcha.png')
return result
def check_login_status():
if USE_PROXY:
try:
if ctrl.PROXIES is not None:
<|code_end|>
, determine the next line of code. You have imports:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context (class names, function names, or code) available:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | notify_ip_address() |
Given the following code snippet before the placeholder: <|code_start|> if USE_PROXY:
try:
if ctrl.PROXIES is not None:
notify_ip_address()
logger.info('当前已有登录状态')
return True
except:
pass
return False
def login(username=None, password=None):
"""
登录API
:return: True: 登录成功; False: 登录失败
"""
if username is None or password is None:
username = account.username
password = account.password
ctrl.BEING_LOG = True
if check_login_status():
ctrl.BEING_LOG = False
return True
error_times = 0
while True:
try:
# logger.debug("before proxy")
update_proxy()
# logger.debug("before cookie")
<|code_end|>
, predict the next line using imports from the current file:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context including class names, function names, and sometimes code from other files:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | update_cookies() |
Given the code snippet: <|code_start|> while True:
try:
password = click.prompt('密码出错,请填写')
self.password = password
break
except:
pass
# 账户信息的单例
account = Account()
def change_to_base64(source):
"""
将参数进行base64加密
:param source:
:return:
"""
return str(base64.b64encode(bytes(source, encoding='utf-8')), 'utf-8')
def get_captcha():
"""
获取验证码
:return:
"""
resp = requests.get(url=url_captcha.get('url'), cookies=ctrl.COOKIES, proxies=ctrl.PROXIES)
with open('captcha.png', 'wb') as f:
f.write(resp.content)
<|code_end|>
, generate the next line using the imports in this file:
import base64
import configparser
import click
import requests
import controller as ctrl
from logbook import *
from config.base_settings import CAPTCHA_MODEL_NAME, TIMEOUT, USE_PROXY
from controller.url_config import url_captcha, url_login
from service.proxy import update_proxy, notify_ip_address, update_cookies
from service.sipoknn import get_captcha_result
and context (functions, classes, or occasionally code) from other files:
# Path: config/base_settings.py
# CAPTCHA_MODEL_NAME = os.path.join(BASE_PATH, 'res', 'captcha', 'sipo3.job')
#
# TIMEOUT = 10
#
# USE_PROXY = False
#
# Path: controller/url_config.py
#
# Path: service/proxy.py
# def update_proxy():
# """
# 获取并校验代理ip地址
# :return:
# """
# if bs.USE_PROXY:
# i = 0
# while True:
# try:
# get_proxy()
# notify_ip_address()
# return True
# except Exception:
# i += 1
# logger.error("代理获取失败,尝试重试,重试次数%s" % (i, ))
# else:
# logger.info('notify address')
# notify_ip_address()
#
# def notify_ip_address():
# """
# 通知专利网我们的ip地址,
# 这个网站比较特别,每当有陌生ip地址时,都需要通过这个方法向网站发送一次请求先。
# :return:
# """
# resp = requests.post(url_pre_execute.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT, cookies=ctrl.COOKIES)
# # logger.debug(resp.text)
# ip_address = json.loads(resp.text)
# if ctrl.PROXIES is not None:
# if ip_address.get('IP') == ctrl.PROXIES.get('http').split(':')[0]:
# return resp.text
# else:
# raise Exception('ip error')
# else:
# return resp.text
#
# def update_cookies(cookies=None):
# """
# 更新或获取cookies
# :param cookies:
# :return:
# """
# if cookies is None:
# ctrl.COOKIES = requests.get(url=url_index.get('url'), proxies=ctrl.PROXIES, timeout=bs.TIMEOUT).cookies
#
# else:
# ctrl.COOKIES = cookies
#
# logger.info(ctrl.COOKIES)
# if len(ctrl.COOKIES) == 0:
# logger.error('cookie有问题')
# raise ReadTimeout('cookie有问题')
#
# Path: service/sipoknn.py
# def get_captcha_result(model_path, filename):
# """
# 验证码解析
# :param model_path: 之前训练的模型位置
# :param filename: 需要解析的验证码全路径
# :return: 解析结果
# """
# image = np.asarray(Image.open(filename).convert('L'))
# image = (image > 135) * 255
# letters = [image[:, 6:18].reshape(20 * 12), image[:, 19:31].reshape(20 * 12), image[:, 33:45].reshape(20 * 12),
# image[:, 45:57].reshape(20 * 12)]
# model = joblib.load(model_path)
# result = ''
# for c in model.predict(letters):
# result += c
# return eval(result)
. Output only the next line. | result = get_captcha_result(CAPTCHA_MODEL_NAME, 'captcha.png') |
Predict the next line after this snippet: <|code_start|>"""
文件清理模块,调用后清理所有log和output文件
"""
logging.getLogger(__name__).setLevel(logging.DEBUG)
def clean_outputs():
"""
清理输出文件夹的内容
:return:
"""
<|code_end|>
using the current file's imports:
import logging
import os
import sys
import shutil
from config.base_settings import OUTPUT_PATH
and any relevant context from other files:
# Path: config/base_settings.py
# OUTPUT_PATH = os.path.join(BASE_PATH, 'output')
. Output only the next line. | output_list = os.listdir(OUTPUT_PATH) |
Given the following code snippet before the placeholder: <|code_start|>
# 添加表
def addSheet(self, sheet):
return self.getExcel("WRITE").add_sheet(sheet)
# 获取表
def getSheet(self, which, mode):
try:
wb = self.getExcel(mode)
if isinstance(which, str):
if mode.upper() == "READ":
return wb.sheet_by_name(which)
else:
return None
elif isinstance(which, int):
if mode.upper() == "READ":
return wb.sheet_by_index(which)
elif mode.upper() == "WRITE":
return wb.get_sheet(which)
else:
return None
else:
return None
except Exception as e:
print("excel报错-------------------------------" + str(e))
return
# 调用这个方法生成editor,主要模仿安卓的SharedPreferences
def edit(self):
wb = self.getExcel("write")
<|code_end|>
, predict the next line using imports from the current file:
import xlwt
import xlrd
import xlsxwriter
from xlutils.copy import copy
from util.excel.ExcelEditor import ExcelEditor
and context including class names, function names, and sometimes code from other files:
# Path: util/excel/ExcelEditor.py
# class ExcelEditor:
#
# def __init__(self, wb, fileName):
# self.__workBook = wb
# self.__fileName = fileName
#
# # 各种写操作执行完之后,一定要调用这个方法保存
# def commit(self):
# self.__workBook.save(self.__fileName)
#
# def getSheet(self, index):
# return self.__workBook.get_sheet(index)
. Output only the next line. | return ExcelEditor(wb, self.__fileName) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
"""
Created on 2018/2/27
@author: will4906
"""
logging.getLogger(__name__).setLevel(logging.DEBUG)
<|code_end|>
. Use current file imports:
(import logging
import requests
import controller as ctrl
from service.account import *
from service.proxy import check_proxy)
and context including class names, function names, or small code snippets from other files:
# Path: service/proxy.py
# def check_proxy(func):
# """
# 校验代理的装饰器,使用情况较特殊,只针对请求超时异常
# :param func:
# :return:
# """
# def wrapper(*args, **kwargs):
# for i in range(5):
# try:
# resp = func(*args, **kwargs)
# return resp
# except RequestException:
# update_proxy()
# raise Exception('函数重试5次,仍无法成功')
# return wrapper
. Output only the next line. | @check_proxy |
Given snippet: <|code_start|> transcoded = collections.OrderedDict()
cls = type(self)
for field in self.fields:
descriptor = getattr(cls, field)
value = self._fields[field]
# Note that the descriptor handles nested fields and Nones
transcoded[field] = descriptor.encode(value)
return transcoded
def detranscode(self, data):
''' Apply the natively deserialized ordereddict into
self._fields.
'''
cls = type(self)
for field in self.fields:
descriptor = getattr(cls, field)
# Make sure we can optionally support configs with incomplete data
if data is None or field not in data:
logger.warning('Healed config w/ missing field: ' + field)
else:
try:
# Note that the descriptor handles nested fields
self._fields[field] = descriptor.decode(data[field])
except Exception as exc:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pathlib
import collections
import copy
import webbrowser
import yaml
import inspect
import os
import warnings
import logging
from golix import Ghid
from golix import Secret
from .exceptions import ConfigError
from .exceptions import ConfigIncomplete
from .exceptions import ConfigMissing
and context:
# Path: hypergolix/exceptions.py
# class ConfigError(HypergolixException, RuntimeError):
# ''' This exception (or a subclass thereof) is raised for all failed
# operations with configuration.
# '''
# pass
#
# Path: hypergolix/exceptions.py
# class ConfigIncomplete(ConfigError):
# ''' This exception is raised when attempting to load an incomplete
# configuration.
# '''
# pass
#
# Path: hypergolix/exceptions.py
# class ConfigMissing(ConfigError, FileNotFoundError):
# ''' This exception is raised when attempting to load an incomplete
# configuration.
# '''
# pass
which might include code, classes, or functions. Output only the next line. | raise ConfigError('Failed to decode field: ' + |
Continue the code snippet: <|code_start|> # Get the environment config setting, if it exists. If not, use a
# long random path which we "know" will not exist.
envpath = os.getenv(
'HYPERGOLIX_HOME',
default = '/qdubuddfsyvfafhlqcqetfkokykqeulsguoasnzjkc'
)
appdatapath = os.getenv(
'LOCALAPPDATA',
default = '/qdubuddfsyvfafhlqcqetfkokykqeulsguoasnzjkc'
)
search_order = []
search_order.append(pathlib.Path(envpath))
search_order.append(pathlib.Path('.').absolute())
search_order.append(pathlib.Path.home() / '.hypergolix')
# It really doesn't matter if we do this on Windows too, since it'll
# just not exist.
search_order.append(pathlib.Path('/etc/hypergolix'))
search_order.append(pathlib.Path(appdatapath) / 'Hypergolix')
# Collapse the nested loop into a single for loop with a list comp
fnames = {cls.TARGET_FNAME, *cls.OLD_FNAMES}
fpaths = (dirpath / fname for dirpath in search_order
for fname in fnames)
# Check all of those paths
for fpath in fpaths:
if fpath.exists():
break
# Not found; raise.
else:
<|code_end|>
. Use current file imports:
import pathlib
import collections
import copy
import webbrowser
import yaml
import inspect
import os
import warnings
import logging
from golix import Ghid
from golix import Secret
from .exceptions import ConfigError
from .exceptions import ConfigIncomplete
from .exceptions import ConfigMissing
and context (classes, functions, or code) from other files:
# Path: hypergolix/exceptions.py
# class ConfigError(HypergolixException, RuntimeError):
# ''' This exception (or a subclass thereof) is raised for all failed
# operations with configuration.
# '''
# pass
#
# Path: hypergolix/exceptions.py
# class ConfigIncomplete(ConfigError):
# ''' This exception is raised when attempting to load an incomplete
# configuration.
# '''
# pass
#
# Path: hypergolix/exceptions.py
# class ConfigMissing(ConfigError, FileNotFoundError):
# ''' This exception is raised when attempting to load an incomplete
# configuration.
# '''
# pass
. Output only the next line. | raise ConfigMissing() |
Based on the snippet: <|code_start|>------------------------------------------------------
'''
# Global dependencies
# Internal deps
# ###############################################
# Boilerplate
# ###############################################
logger = logging.getLogger(__name__)
# Control * imports.
__all__ = [
# 'Inquisitor',
]
# ###############################################
# Library
# ###############################################
<|code_end|>
, predict the immediate next line with the help of imports:
import weakref
import traceback
import asyncio
import operator
import inspect
import json
import pickle
import logging
from loopa.utils import triplicated
from loopa.utils import Triplicate
from .exceptions import DeadObject
from .exceptions import LocallyImmutable
from .exceptions import Unsharable
from .utils import run_coroutine_loopsafe
from .utils import call_coroutine_threadsafe
from .utils import ApiID
from .utils import _reap_wrapped_task
from .embed import TriplicateAPI
from .hypothetical import public_api
from .hypothetical import fixture_api
and context (classes, functions, sometimes code) from other files:
# Path: hypergolix/exceptions.py
# class DeadObject(IPCError, PersistenceError, TypeError):
# ''' Raised when operations are attempted on a local object that is
# already dead.
# '''
# pass
#
# Path: hypergolix/exceptions.py
# class LocallyImmutable(IPCError, TypeError):
# ''' Raised when an object is locally immutable. That means either:
# 1. the object is static
# 2. the object is not "owned" by the currently-logged-in Hypergolix
# process.
# '''
# pass
#
# Path: hypergolix/exceptions.py
# class Unsharable(IPCError, TypeError):
# ''' Raised when an object cannot be shared, typically because it is
# private.
# '''
# pass
#
# Path: hypergolix/utils.py
# async def run_coroutine_loopsafe(coro, target_loop):
# ''' Threadsafe, asyncsafe (ie non-loop-blocking) call to run a coro
# in a different event loop and return the result. Wrap in an asyncio
# future (or await it) to access the result.
#
# Resolves the event loop for the current thread by calling
# asyncio.get_event_loop(). Because of internal use of await, CANNOT
# be called explicitly from a third loop.
# '''
# # This returns a concurrent.futures.Future, so we need to wait for it, but
# # we cannot block our event loop, soooo...
# thread_future = asyncio.run_coroutine_threadsafe(coro, target_loop)
# return (await await_sync_future(thread_future))
#
# Path: hypergolix/utils.py
# def call_coroutine_threadsafe(coro, loop):
# ''' Wrapper on asyncio.run_coroutine_threadsafe that makes a coro
# behave as if it were called synchronously. In other words, instead
# of returning a future, it raises the exception or returns the coro's
# result.
#
# Leaving loop as default None will result in asyncio inferring the
# loop from the default from the current context (aka usually thread).
# '''
# fut = asyncio.run_coroutine_threadsafe(
# coro = coro,
# loop = loop
# )
#
# # Block on completion of coroutine and then raise any created exception
# exc = fut.exception()
# if exc:
# raise exc
#
# return fut.result()
#
# Path: hypergolix/utils.py
# class ApiID(Ghid):
# ''' Subclass Ghid in a way that makes the API ID seem like a normal
# 64-byte string.
#
# Remind me again why the hell I'm subclassing ghid for this when I'm
# ending up removing just about everything that makes it a ghid?
# '''
#
# def __init__(self, address, algo=None):
# ''' Wrap the normal Ghid creation with a forced algo.
# '''
# if len(address) != 64:
# raise ValueError('Improper API ID length.')
#
# elif algo is None:
# algo = 0
#
# elif algo != 0:
# raise ValueError('Improper API ID format.')
#
# super().__init__(algo, address)
#
# def __repr__(self):
# ''' Hide that this is a ghid.
# '''
# return type(self).__name__ + '(' + repr(self.address) + ')'
#
# @property
# def algo(self):
# return self._algo
#
# @algo.setter
# def algo(self, value):
# # Currently, ensure algo is b'\x00'
# if value == 0:
# self._algo = value
# else:
# raise ValueError('Invalid address algorithm.')
#
# @property
# def address(self):
# return self._address
#
# @address.setter
# def address(self, address):
# self._address = address
#
# @classmethod
# def pseudorandom(cls):
# return super().pseudorandom(0)
#
# Path: hypergolix/utils.py
# def _reap_wrapped_task(task):
# ''' Reap a task that was wrapped to never raise and then
# executed autonomously using ensure_future.
# '''
# task.result()
#
# Path: hypergolix/embed.py
# class TriplicateAPI(Triplicate, API):
# ''' Combine loopa's triplicate metaclass with hypothetical.API.
# '''
#
# Path: hypergolix/hypothetical.py
# def public_api(func):
# ''' Decorator to automatically mark the object as the normal thing.
# '''
#
# def fixture_closure(fixture_func, public_func=func):
# ''' Defines the decorator for @method.fixture.
# '''
# # This is the actual __fixture__ method, to be defined via decorator
# public_func.__fixture__ = fixture_func
# return public_func
#
# def interface_closure(interface_func, public_func=func):
# ''' Defines the decorator for @method.fixture.
# '''
# # This is the actual __interface method, to be defined via decorator
# public_func.__interface__ = interface_func
# return public_func
#
# func.fixture = fixture_closure
# func.interface = interface_closure
#
# # This denotes that it is an API
# func.__is_api__ = True
#
# return func
#
# Path: hypergolix/hypothetical.py
# def fixture_api(func):
# ''' Decorator to mark the method as a fixture-only object.
# '''
# ''' Decorator to automatically mark the object as the normal thing.
# '''
#
# # Huh, well, this is easy.
# func.__is_fixture__ = True
# return func
. Output only the next line. | class ObjCore(metaclass=TriplicateAPI): |
Based on the snippet: <|code_start|> return self.createIndex(row, column, self.vocabularyList[row])
def find(self, entry):
# TODO
for i, e in enumerate(self.vocabularyList):
for key in entry.keys():
if key in e and e[key] != entry[key]:
break
else:
return self.index(i)
def insertRows(self, row, count, parent=QModelIndex()):
self.beginInsertRows(parent, row, row+count-1)
for i in range(row, row + count):
self.vocabularyList.insert(i, None)
self.endInsertRows()
self._buildVocabularyHeadwordIndex()
return True
def removeRows(self, row, count, parent=QModelIndex()):
self.beginRemoveRows(parent, row, row+count-1)
for i in range(row, row + count):
del self.vocabularyList[row]
self.endRemoveRows()
self._buildVocabularyHeadwordIndex()
return True
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import locale
import codecs
import os
import sys
import functools
from datetime import date
from PyQt4.QtCore import Qt, SIGNAL, QVariant, QModelIndex, QAbstractListModel
from PyQt4.QtGui import QWidget, QAction, QAbstractItemView, QShortcut, QMenu
from PyKDE4.kdeui import KIcon, KMenu, KStandardAction, KMessageBox
from PyKDE4.kdecore import i18n, KUrl
from PyKDE4.kio import KFileDialog
from eclectusqt.forms import VocabularyPageUI
from eclectusqt import util
and context (classes, functions, sometimes code) from other files:
# Path: eclectusqt/forms/VocabularyPageUI.py
# class MyQListView(QtGui.QListView):
# class Ui_Form(object):
# def keyPressEvent(self, event):
# def setupUi(self, Form):
# def retranslateUi(self, Form):
#
# Path: eclectusqt/util.py
# def getIcon(fileName):
# def getData(fileName):
# def getLocalData(fileName):
# def getDataPaths():
# def readConfigString(config, option, default=None):
# def _readConfig(config, option, default, conversionFunc):
# def readConfigInt(config, option, default=None):
# def __init__(self, parent):
# def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
# def javaScriptAlert(self, frame, msg):
# class HandyWebpage(QWebPage):
. Output only the next line. | class VocabularyPage(QWidget, VocabularyPageUI.Ui_Form): |
Here is a snippet: <|code_start|> lambda: self._removeAction.setEnabled(
self.vocabularyListView.selectionModel().hasSelection()))
self.connect(self._removeAction, SIGNAL("triggered(bool)"),
lambda: self.vocabularyModel.remove(
self.vocabularyListView.selectedIndexes()))
def showEvent(self, event):
if not self.initialised:
self.initialised = True
self.exportHistoryButton.setIcon(KIcon('document-export'))
exportMenu = QMenu(self.exportHistoryButton)
for exporter, actionText in self.EXPORTERS:
exportAction = QAction(actionText, self)
self.connect(exportAction, SIGNAL("triggered(bool)"),
functools.partial(self.doExport,
exporter(pluginConfig=self.pluginConfig)))
exportMenu.addAction(exportAction)
self.exportHistoryButton.setMenu(exportMenu)
self.loadVocabulary()
QWidget.showEvent(self, event)
def loadVocabulary(self):
if self.vocabularyChanged == None:
csv = csvImporter = CSVImporter(pluginConfig=self.pluginConfig)
<|code_end|>
. Write the next line using the current file imports:
import re
import locale
import codecs
import os
import sys
import functools
from datetime import date
from PyQt4.QtCore import Qt, SIGNAL, QVariant, QModelIndex, QAbstractListModel
from PyQt4.QtGui import QWidget, QAction, QAbstractItemView, QShortcut, QMenu
from PyKDE4.kdeui import KIcon, KMenu, KStandardAction, KMessageBox
from PyKDE4.kdecore import i18n, KUrl
from PyKDE4.kio import KFileDialog
from eclectusqt.forms import VocabularyPageUI
from eclectusqt import util
and context from other files:
# Path: eclectusqt/forms/VocabularyPageUI.py
# class MyQListView(QtGui.QListView):
# class Ui_Form(object):
# def keyPressEvent(self, event):
# def setupUi(self, Form):
# def retranslateUi(self, Form):
#
# Path: eclectusqt/util.py
# def getIcon(fileName):
# def getData(fileName):
# def getLocalData(fileName):
# def getDataPaths():
# def readConfigString(config, option, default=None):
# def _readConfig(config, option, default, conversionFunc):
# def readConfigInt(config, option, default=None):
# def __init__(self, parent):
# def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
# def javaScriptAlert(self, frame, msg):
# class HandyWebpage(QWebPage):
, which may include functions, classes, or code. Output only the next line. | fileName = util.getLocalData('eclectus.csv') |
Given snippet: <|code_start|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Component chooser plugin for accessing characters by searching for their
components.
@todo Fix: Component view has buggy updates, lesser characters are highlighted than
actually chosen
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from PyQt4.QtCore import Qt, SIGNAL, QByteArray
from PyQt4.QtGui import QWidget, QIcon
from PyQt4.QtWebKit import QWebPage
from PyKDE4.kdeui import KIcon
from PyKDE4.kdecore import i18n
from eclectusqt.forms import ComponentPageUI
from eclectusqt import util
from libeclectus.componentview import ComponentView
from libeclectus.util import decodeBase64
and context:
# Path: eclectusqt/forms/ComponentPageUI.py
# class Ui_Form(object):
# def setupUi(self, Form):
# def retranslateUi(self, Form):
#
# Path: eclectusqt/util.py
# def getIcon(fileName):
# def getData(fileName):
# def getLocalData(fileName):
# def getDataPaths():
# def readConfigString(config, option, default=None):
# def _readConfig(config, option, default, conversionFunc):
# def readConfigInt(config, option, default=None):
# def __init__(self, parent):
# def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
# def javaScriptAlert(self, frame, msg):
# class HandyWebpage(QWebPage):
#
# Path: libeclectus/componentview.py
# class ComponentView:
# def __init__(self, charDbInst=None, **options):
# self.charDB = charDbInst or chardb.CharacterDB(**options)
#
# self.radicalFormEquivalentCharacterMap \
# = self.charDB.radicalFormEquivalentCharacterMap
#
# def getComponentSearchTable(self, components=[],
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """
# Gets a table of minimal components for searching characters by
# component. Annotates given characters and characters that would
# result in zero results if selected.
# """
# componentsByStrokeCount = self.charDB.minimalCharacterComponents
#
# # TODO replace
# selected = components
# #selected = set([self.charDB.preferRadicalFormForCharacter(char) \
# #for char in components])
#
# if components:
# currentResultRadicals = self.charDB.getComponentsWithResults(
# components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
# else:
# currentResultRadicals = None
#
# htmlList = []
# htmlList.append('<table class="component">')
#
# strokeCountList = componentsByStrokeCount.keys()
# strokeCountList.sort()
# for strokeCount in strokeCountList:
# htmlList.append('<tr><th>%d</th><td>' % strokeCount)
# for form in sorted(componentsByStrokeCount[strokeCount]):
# if form in selected:
# formClass = 'selectedComponent'
# elif currentResultRadicals != None \
# and form not in currentResultRadicals:
# formClass = 'zeroResultComponent'
# else:
# formClass = ''
#
# formBase64 = util.encodeBase64(form)
# htmlList.append(
# '<a class="character" href="#component(%s)">' % formBase64 \
# + '<span class="component %s" id="c%s">%s</span>' \
# % (formClass, formBase64, form) \
# + '</a>')
# htmlList.append('</td></tr>')
# htmlList.append('</table>')
#
# return "\n".join(htmlList)
#
# def getComponentSearchResult(self, components,
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """Gets a list of characters containing the given components."""
# chars = self.charDB.getCharactersForComponents(components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
#
# if chars:
# charLinks = []
# for char in chars:
# charLinks.append(
# '<a class="character" href="#lookup(%s)">%s</a>' \
# % (util.encodeBase64(char), char))
# html = '<span class="character">%s</span>' % ' '.join(charLinks)
# else:
# html = '<p class="meta">%s</p>' % gettext('No entries')
#
# return html, len(chars)
#
# Path: libeclectus/util.py
# def decodeBase64(string):
# return base64.b64decode(string).decode('utf8')
which might include code, classes, or functions. Output only the next line. | class ComponentPage(QWidget, ComponentPageUI.Ui_Form): |
Continue the code snippet: <|code_start|>as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
class ComponentPage(QWidget, ComponentPageUI.Ui_Form):
def __init__(self, mainWindow, renderThread, pluginConfig=None):
QWidget.__init__(self, mainWindow)
self.mainWindow = mainWindow
self.renderThread = renderThread
self.pluginConfig = pluginConfig
# set up UI
self.setupUi(self)
self.databaseUrl = None
if self.pluginConfig:
<|code_end|>
. Use current file imports:
import re
from PyQt4.QtCore import Qt, SIGNAL, QByteArray
from PyQt4.QtGui import QWidget, QIcon
from PyQt4.QtWebKit import QWebPage
from PyKDE4.kdeui import KIcon
from PyKDE4.kdecore import i18n
from eclectusqt.forms import ComponentPageUI
from eclectusqt import util
from libeclectus.componentview import ComponentView
from libeclectus.util import decodeBase64
and context (classes, functions, or code) from other files:
# Path: eclectusqt/forms/ComponentPageUI.py
# class Ui_Form(object):
# def setupUi(self, Form):
# def retranslateUi(self, Form):
#
# Path: eclectusqt/util.py
# def getIcon(fileName):
# def getData(fileName):
# def getLocalData(fileName):
# def getDataPaths():
# def readConfigString(config, option, default=None):
# def _readConfig(config, option, default, conversionFunc):
# def readConfigInt(config, option, default=None):
# def __init__(self, parent):
# def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
# def javaScriptAlert(self, frame, msg):
# class HandyWebpage(QWebPage):
#
# Path: libeclectus/componentview.py
# class ComponentView:
# def __init__(self, charDbInst=None, **options):
# self.charDB = charDbInst or chardb.CharacterDB(**options)
#
# self.radicalFormEquivalentCharacterMap \
# = self.charDB.radicalFormEquivalentCharacterMap
#
# def getComponentSearchTable(self, components=[],
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """
# Gets a table of minimal components for searching characters by
# component. Annotates given characters and characters that would
# result in zero results if selected.
# """
# componentsByStrokeCount = self.charDB.minimalCharacterComponents
#
# # TODO replace
# selected = components
# #selected = set([self.charDB.preferRadicalFormForCharacter(char) \
# #for char in components])
#
# if components:
# currentResultRadicals = self.charDB.getComponentsWithResults(
# components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
# else:
# currentResultRadicals = None
#
# htmlList = []
# htmlList.append('<table class="component">')
#
# strokeCountList = componentsByStrokeCount.keys()
# strokeCountList.sort()
# for strokeCount in strokeCountList:
# htmlList.append('<tr><th>%d</th><td>' % strokeCount)
# for form in sorted(componentsByStrokeCount[strokeCount]):
# if form in selected:
# formClass = 'selectedComponent'
# elif currentResultRadicals != None \
# and form not in currentResultRadicals:
# formClass = 'zeroResultComponent'
# else:
# formClass = ''
#
# formBase64 = util.encodeBase64(form)
# htmlList.append(
# '<a class="character" href="#component(%s)">' % formBase64 \
# + '<span class="component %s" id="c%s">%s</span>' \
# % (formClass, formBase64, form) \
# + '</a>')
# htmlList.append('</td></tr>')
# htmlList.append('</table>')
#
# return "\n".join(htmlList)
#
# def getComponentSearchResult(self, components,
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """Gets a list of characters containing the given components."""
# chars = self.charDB.getCharactersForComponents(components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
#
# if chars:
# charLinks = []
# for char in chars:
# charLinks.append(
# '<a class="character" href="#lookup(%s)">%s</a>' \
# % (util.encodeBase64(char), char))
# html = '<span class="character">%s</span>' % ' '.join(charLinks)
# else:
# html = '<p class="meta">%s</p>' % gettext('No entries')
#
# return html, len(chars)
#
# Path: libeclectus/util.py
# def decodeBase64(string):
# return base64.b64decode(string).decode('utf8')
. Output only the next line. | self.includeSimilar = util.readConfigString(self.pluginConfig, |
Predict the next line for this snippet: <|code_start|> self.connect(self.includeSimilarButton, SIGNAL("clicked(bool)"),
self.componentIncludeSimilar)
self.connect(self.componentView, SIGNAL("linkClicked(const QUrl &)"),
self.componentClicked)
self.connect(self.componentView, SIGNAL("loadFinished(bool)"),
self.componentViewLoaded)
self.connect(self.componentResultView,
SIGNAL("linkClicked(const QUrl &)"), self.componentResultClicked)
self.connect(self.componentEdit,
SIGNAL("textChanged(const QString &)"),
self.componentEditChanged)
self.componentView.page().setLinkDelegationPolicy(
QWebPage.DelegateAllLinks)
self.componentResultView.page().setLinkDelegationPolicy(
QWebPage.DelegateAllLinks)
self.initialised = False
def showEvent(self, event):
if not self.initialised:
self.initialised = True
self.includeSimilarButton.setIcon(
QIcon(util.getIcon('similarforms.png')))
self.includeVariantsButton.setIcon(
QIcon(util.getIcon('radicalvariant.png')))
self.slotSettingsChanged()
<|code_end|>
with the help of current file imports:
import re
from PyQt4.QtCore import Qt, SIGNAL, QByteArray
from PyQt4.QtGui import QWidget, QIcon
from PyQt4.QtWebKit import QWebPage
from PyKDE4.kdeui import KIcon
from PyKDE4.kdecore import i18n
from eclectusqt.forms import ComponentPageUI
from eclectusqt import util
from libeclectus.componentview import ComponentView
from libeclectus.util import decodeBase64
and context from other files:
# Path: eclectusqt/forms/ComponentPageUI.py
# class Ui_Form(object):
# def setupUi(self, Form):
# def retranslateUi(self, Form):
#
# Path: eclectusqt/util.py
# def getIcon(fileName):
# def getData(fileName):
# def getLocalData(fileName):
# def getDataPaths():
# def readConfigString(config, option, default=None):
# def _readConfig(config, option, default, conversionFunc):
# def readConfigInt(config, option, default=None):
# def __init__(self, parent):
# def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
# def javaScriptAlert(self, frame, msg):
# class HandyWebpage(QWebPage):
#
# Path: libeclectus/componentview.py
# class ComponentView:
# def __init__(self, charDbInst=None, **options):
# self.charDB = charDbInst or chardb.CharacterDB(**options)
#
# self.radicalFormEquivalentCharacterMap \
# = self.charDB.radicalFormEquivalentCharacterMap
#
# def getComponentSearchTable(self, components=[],
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """
# Gets a table of minimal components for searching characters by
# component. Annotates given characters and characters that would
# result in zero results if selected.
# """
# componentsByStrokeCount = self.charDB.minimalCharacterComponents
#
# # TODO replace
# selected = components
# #selected = set([self.charDB.preferRadicalFormForCharacter(char) \
# #for char in components])
#
# if components:
# currentResultRadicals = self.charDB.getComponentsWithResults(
# components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
# else:
# currentResultRadicals = None
#
# htmlList = []
# htmlList.append('<table class="component">')
#
# strokeCountList = componentsByStrokeCount.keys()
# strokeCountList.sort()
# for strokeCount in strokeCountList:
# htmlList.append('<tr><th>%d</th><td>' % strokeCount)
# for form in sorted(componentsByStrokeCount[strokeCount]):
# if form in selected:
# formClass = 'selectedComponent'
# elif currentResultRadicals != None \
# and form not in currentResultRadicals:
# formClass = 'zeroResultComponent'
# else:
# formClass = ''
#
# formBase64 = util.encodeBase64(form)
# htmlList.append(
# '<a class="character" href="#component(%s)">' % formBase64 \
# + '<span class="component %s" id="c%s">%s</span>' \
# % (formClass, formBase64, form) \
# + '</a>')
# htmlList.append('</td></tr>')
# htmlList.append('</table>')
#
# return "\n".join(htmlList)
#
# def getComponentSearchResult(self, components,
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """Gets a list of characters containing the given components."""
# chars = self.charDB.getCharactersForComponents(components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
#
# if chars:
# charLinks = []
# for char in chars:
# charLinks.append(
# '<a class="character" href="#lookup(%s)">%s</a>' \
# % (util.encodeBase64(char), char))
# html = '<span class="character">%s</span>' % ' '.join(charLinks)
# else:
# html = '<p class="meta">%s</p>' % gettext('No entries')
#
# return html, len(chars)
#
# Path: libeclectus/util.py
# def decodeBase64(string):
# return base64.b64decode(string).decode('utf8')
, which may contain function names, class names, or code. Output only the next line. | id = self.renderThread.enqueue(ComponentView, |
Using the snippet: <|code_start|> QIcon(util.getIcon('radicalvariant.png')))
self.slotSettingsChanged()
id = self.renderThread.enqueue(ComponentView,
'getComponentSearchTable', components=[],
includeEquivalentRadicalForms=self.includeVariants,
includeSimilarCharacters=self.includeSimilar)
self.checkForJob(id, 'getComponentSearchTable')
self.componentView.setHtml(i18n('Loading...'))
self.componentResultView.setHtml(i18n('Select components above'))
QWidget.showEvent(self, event)
def componentIncludeVariants(self, show):
self.includeVariants = show
self.updateComponentView()
def componentIncludeSimilar(self, show):
self.includeSimilar = show
self.updateComponentView()
def componentViewLoaded(self, ok):
self.componentView.page().mainFrame().setScrollBarValue(Qt.Vertical,
self.componentViewScroll)
def componentResultClicked(self, url):
cmd = unicode(url.toString()).replace('about:blank#', '')
if cmd.startswith('lookup'):
<|code_end|>
, determine the next line of code. You have imports:
import re
from PyQt4.QtCore import Qt, SIGNAL, QByteArray
from PyQt4.QtGui import QWidget, QIcon
from PyQt4.QtWebKit import QWebPage
from PyKDE4.kdeui import KIcon
from PyKDE4.kdecore import i18n
from eclectusqt.forms import ComponentPageUI
from eclectusqt import util
from libeclectus.componentview import ComponentView
from libeclectus.util import decodeBase64
and context (class names, function names, or code) available:
# Path: eclectusqt/forms/ComponentPageUI.py
# class Ui_Form(object):
# def setupUi(self, Form):
# def retranslateUi(self, Form):
#
# Path: eclectusqt/util.py
# def getIcon(fileName):
# def getData(fileName):
# def getLocalData(fileName):
# def getDataPaths():
# def readConfigString(config, option, default=None):
# def _readConfig(config, option, default, conversionFunc):
# def readConfigInt(config, option, default=None):
# def __init__(self, parent):
# def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
# def javaScriptAlert(self, frame, msg):
# class HandyWebpage(QWebPage):
#
# Path: libeclectus/componentview.py
# class ComponentView:
# def __init__(self, charDbInst=None, **options):
# self.charDB = charDbInst or chardb.CharacterDB(**options)
#
# self.radicalFormEquivalentCharacterMap \
# = self.charDB.radicalFormEquivalentCharacterMap
#
# def getComponentSearchTable(self, components=[],
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """
# Gets a table of minimal components for searching characters by
# component. Annotates given characters and characters that would
# result in zero results if selected.
# """
# componentsByStrokeCount = self.charDB.minimalCharacterComponents
#
# # TODO replace
# selected = components
# #selected = set([self.charDB.preferRadicalFormForCharacter(char) \
# #for char in components])
#
# if components:
# currentResultRadicals = self.charDB.getComponentsWithResults(
# components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
# else:
# currentResultRadicals = None
#
# htmlList = []
# htmlList.append('<table class="component">')
#
# strokeCountList = componentsByStrokeCount.keys()
# strokeCountList.sort()
# for strokeCount in strokeCountList:
# htmlList.append('<tr><th>%d</th><td>' % strokeCount)
# for form in sorted(componentsByStrokeCount[strokeCount]):
# if form in selected:
# formClass = 'selectedComponent'
# elif currentResultRadicals != None \
# and form not in currentResultRadicals:
# formClass = 'zeroResultComponent'
# else:
# formClass = ''
#
# formBase64 = util.encodeBase64(form)
# htmlList.append(
# '<a class="character" href="#component(%s)">' % formBase64 \
# + '<span class="component %s" id="c%s">%s</span>' \
# % (formClass, formBase64, form) \
# + '</a>')
# htmlList.append('</td></tr>')
# htmlList.append('</table>')
#
# return "\n".join(htmlList)
#
# def getComponentSearchResult(self, components,
# includeEquivalentRadicalForms=False, includeSimilarCharacters=False):
# """Gets a list of characters containing the given components."""
# chars = self.charDB.getCharactersForComponents(components,
# includeEquivalentRadicalForms=includeEquivalentRadicalForms,
# includeSimilarCharacters=includeSimilarCharacters)
#
# if chars:
# charLinks = []
# for char in chars:
# charLinks.append(
# '<a class="character" href="#lookup(%s)">%s</a>' \
# % (util.encodeBase64(char), char))
# html = '<span class="character">%s</span>' % ' '.join(charLinks)
# else:
# html = '<p class="meta">%s</p>' % gettext('No entries')
#
# return html, len(chars)
#
# Path: libeclectus/util.py
# def decodeBase64(string):
# return base64.b64decode(string).decode('utf8')
. Output only the next line. | char = decodeBase64(re.match('lookup\(([^\)]+)\)', cmd).group(1)) |
Given the code snippet: <|code_start|> subCharCount = 3
treeElements = []
curIndex = idx
while len(treeElements) < subCharCount:
curIndex = curIndex + 1
element = decomposition[curIndex]
if type(element) != type(()):
# ids element -> sub decomposition on same layer,
# break down into sub tree
curIndex, tree = splitFlatTree(None, decomposition,
curIndex)
treeElements.append(tree)
else:
# proper character
subChar, zVariant, subDecomposition = element
if subDecomposition:
_, tree = splitFlatTree(subChar, subDecomposition[0])
treeElements.append(tree)
else:
treeElements.append(subChar)
return curIndex, (layout, char, treeElements)
treeList = self.getDecompositionTreeList(char)
if not treeList:
return None
else:
# TODO more sophisticated, get the "nicest" decomposition
_, tree = splitFlatTree(char, treeList[0])
return tree
<|code_end|>
, generate the next line using the imports in this file:
import re
from sqlalchemy import select
from sqlalchemy.sql import and_, or_
from cjklib.dbconnector import getDBConnector
from cjklib.characterlookup import CharacterLookup
from cjklib import exception
from libeclectus.util import cachedproperty, getDatabaseConfiguration
and context (functions, classes, or occasionally code) from other files:
# Path: libeclectus/util.py
# def cachedproperty(fget):
# def fget_wrapper(self):
# try: return fget_wrapper._cached
# except AttributeError:
# fget_wrapper._cached = value = fget(self)
# return value
# def fdel(self):
# try: del fget_wrapper._cached
# except AttributeError: pass
# return property(fget_wrapper, fdel=fdel, doc=fget.__doc__)
#
# def getDatabaseConfiguration(databaseUrl=None):
# configuration = {}
# if databaseUrl:
# configuration['sqlalchemy.url'] = databaseUrl
# if databaseUrl.startswith('sqlite://'):
# configuration['attach'] = ([getDatabaseUrl()]
# + getAttachableDatabases())
# else:
# configuration['sqlalchemy.url'] = getDatabaseUrl()
# configuration['attach'] = getAttachableDatabases()
# return configuration
. Output only the next line. | @cachedproperty |
Given the following code snippet before the placeholder: <|code_start|> @classmethod
def getSimilarPlainEntities(cls, plainEntity, reading):
# TODO the following is not independent of reading and really slow
similar = [plainEntity]
if reading in cls.AMBIGUOUS_INITIALS:
for key in cls.AMBIGUOUS_INITIALS[reading]:
for tpl in cls.AMBIGUOUS_INITIALS[reading][key]:
a, b = tpl
if re.match(a + u'[aeiouü]', plainEntity):
similar.append(b + plainEntity[len(a):])
elif re.match(b + u'[aeiouü]', plainEntity):
similar.append(a + plainEntity[len(b):])
# for all initial derived forms change final
if reading in cls.AMBIGUOUS_FINALS:
for modEntity in similar[:]:
for key in cls.AMBIGUOUS_FINALS[reading]:
for tpl in cls.AMBIGUOUS_FINALS[reading][key]:
a, b = tpl
if re.search(u'[^aeiouü]' + a + '$',
modEntity):
similar.append(modEntity[:-len(a)] + b)
elif re.search(u'[^aeiouü]' + b + '$',
modEntity):
similar.append(modEntity[:-len(b)] + a)
return similar
def __init__(self, language, characterDomain=None, databaseUrl=None,
dbConnectInst=None, ignoreIllegalSettings=False, **options):
dbConnectInst = dbConnectInst or getDBConnector(
<|code_end|>
, predict the next line using imports from the current file:
import re
from sqlalchemy import select
from sqlalchemy.sql import and_, or_
from cjklib.dbconnector import getDBConnector
from cjklib.characterlookup import CharacterLookup
from cjklib import exception
from libeclectus.util import cachedproperty, getDatabaseConfiguration
and context including class names, function names, and sometimes code from other files:
# Path: libeclectus/util.py
# def cachedproperty(fget):
# def fget_wrapper(self):
# try: return fget_wrapper._cached
# except AttributeError:
# fget_wrapper._cached = value = fget(self)
# return value
# def fdel(self):
# try: del fget_wrapper._cached
# except AttributeError: pass
# return property(fget_wrapper, fdel=fdel, doc=fget.__doc__)
#
# def getDatabaseConfiguration(databaseUrl=None):
# configuration = {}
# if databaseUrl:
# configuration['sqlalchemy.url'] = databaseUrl
# if databaseUrl.startswith('sqlite://'):
# configuration['attach'] = ([getDatabaseUrl()]
# + getAttachableDatabases())
# else:
# configuration['sqlalchemy.url'] = getDatabaseUrl()
# configuration['attach'] = getAttachableDatabases()
# return configuration
. Output only the next line. | getDatabaseConfiguration(databaseUrl)) |
Predict the next line after this snippet: <|code_start|>along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
class UpdateVersionBuilder(builder.EntryGeneratorBuilder):
"""Table for keeping track of which date the release was."""
PROVIDES = 'UpdateVersion'
COLUMNS = ['TableName', 'ReleaseDate']
PRIMARY_KEYS = ['TableName']
COLUMN_TYPES = {'TableName': String(255), 'ReleaseDate': DateTime()}
def getGenerator(self):
return iter([])
class KanaExtendedCharacterSetBuilder(object):
BASE_BUILDER_CLASS = None
def toTuple(self, iterator):
for char in iterator:
yield (char, )
def getGenerator(self):
base = self.BASE_BUILDER_CLASS.getGenerator(self)
kanaRanges = []
<|code_end|>
using the current file's imports:
import locale
import types
import sys
import os
import re
import itertools
import xml.sax
import bz2
import codecs
import os.path as path
import codecs
import os.path as path
import codecs
import unicodedata
import glob
from datetime import datetime
from sqlalchemy import Table, Column, Integer, String, Text, DateTime, Index
from sqlalchemy.sql import and_, or_, not_
from sqlalchemy import select
from cjklib import characterlookup
from cjklib.reading import ReadingFactory
from cjklib.build import builder, cli, warn
from cjklib import exception
from cjklib.util import UnicodeCSVFileIterator, CharacterRangeIterator
from libeclectus import util
and any relevant context from other files:
# Path: libeclectus/util.py
# FILE_PATHS = {'default': [u'/usr/local/share/eclectus',
# u'/usr/share/eclectus'],
# 'cmn-caen-tan_ogg': [u'/usr/local/share/eclectus/cmn-caen-tan',
# u'/usr/share/eclectus/cmn-caen-tan'],
# 'chi-balm-hsk1_ogg': [u'/usr/local/share/eclectus/chi-balm-hsk1',
# u'/usr/share/eclectus/chi-balm-hsk1'],
# 'bw.png.segment': [
# u'/usr/local/share/eclectus/bw.png.segment/bw.png.segment',
# u'/usr/share/eclectus/bw.png.segment/bw.png.segment'],
# 'jbw.png.segment': [
# u'/usr/local/share/eclectus/bw.png.segment/jbw.png.segment',
# u'/usr/share/eclectus/bw.png.segment/jbw.png.segment'],
# 'tbw.png.segment': [
# u'/usr/local/share/eclectus/bw.png.segment/tbw.png.segment',
# u'/usr/share/eclectus/bw.png.segment/tbw.png.segment'],
# 'bw.png': [u'/usr/local/share/eclectus/bw.png/bw.png',
# u'/usr/share/eclectus/bw.png/bw.png'],
# 'jbw.png': [u'/usr/local/share/eclectus/bw.png/jbw.png',
# u'/usr/share/eclectus/bw.png/jbw.png'],
# 'tbw.png': [u'/usr/local/share/eclectus/bw.png/tbw.png',
# u'/usr/share/eclectus/bw.png/tbw.png'],
# 'order.gif': [u'/usr/local/share/eclectus/order.gif/order.gif',
# u'/usr/share/eclectus/order.gif/order.gif'],
# 'jorder.gif': [u'/usr/local/share/eclectus/order.gif/jorder.gif',
# u'/usr/share/eclectus/order.gif/jorder.gif'],
# 'torder.gif': [u'/usr/local/share/eclectus/order.gif/torder.gif',
# u'/usr/share/eclectus/order.gif/torder.gif'],
# 'red.png': [u'/usr/local/share/eclectus/red.png/red.png',
# u'/usr/share/eclectus/red.png/red.png'],
# 'jred.png': [u'/usr/local/share/eclectus/red.png/jred.png',
# u'/usr/share/eclectus/red.png/jred.png'],
# }
# UNICODE_SCRIPT_CLASSES = {'Han': [('2E80', '2E99'), ('2E9B', '2EF3'),
# ('2F00', '2FD5'), '3005', '3007', ('3021', '3029'),
# ('3038', '303A'), '303B', ('3400', '4DB5'), ('4E00', '9FCB'),
# ('F900', 'FA2D'), ('FA30', 'FA6D'), ('FA70', 'FAD9'),
# ('20000', '2A6D6'), ('2A700', '2B734'), ('2F800', '2FA1D')],
# 'Hiragana': [('3041', '3096'), ('309D', '309E'), '309F'],
# 'Katakana': [('30A1', '30FA'), ('30FD', '30FE'), '30FF',
# ('31F0', '31FF'), ('32D0', '32FE'), ('3300', '3357'),
# ('FF66', 'FF6F'), ('FF71', 'FF9D')],
# 'Hangul': [('1100', '1159'), ('115F', '11A2'), ('11A8', '11F9'),
# ('3131', '318E'), ('3200', '321E'), ('3260', '327E'),
# ('AC00', 'D7A3'), ('FFA0', 'FFBE'), ('FFC2', 'FFC7'),
# ('FFCA', 'FFCF'), ('FFD2', 'FFD7'), ('FFDA', 'FFDC')],
# 'Bopomofo': [('3105', '312D'), ('31A0', '31B7')],
# }
# def locatePath(name):
# def encodeBase64(string):
# def decodeBase64(string):
# def hasFontAvailability():
# def fontExists(fontFamily):
# def fontHasChar(fontFamily, char):
# def getCJKScriptClass(char):
# def getDatabaseConfiguration(databaseUrl=None):
# def getDatabaseUrl():
# def getAttachableDatabases():
# def attr(attr, value=True):
# def newFunc(func):
# def cachedproperty(fget):
# def fget_wrapper(self):
# def fdel(self):
. Output only the next line. | kanaRanges.extend(util.UNICODE_SCRIPT_CLASSES['Hiragana']) |
Next line prediction: <|code_start|># coding: utf-8
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-u', '--username', action='store', dest='username', required=True)
def handle(self, *args, **options):
active_user = options['username']
print(self.style.SUCCESS('Active user: @{0}'.format(active_user)))
<|code_end|>
. Use current file imports:
(from django.core.management.base import BaseCommand
from sklearn.metrics.pairwise import pairwise_distances
from app.utils_repo import prepare_user_item_df
import numpy as np
import pandas as pd)
and context including class names, function names, or small code snippets from other files:
# Path: app/utils_repo.py
# def prepare_user_item_df(min_stargazers_count):
# repos = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('repo_full_name', flat=True) \
# .order_by('repo_full_name') \
# .distinct()
# repos_array = np.fromiter(repos.iterator(), np.dtype('U140'))
# repos_array.shape
# n_repos = repos_array.shape[0]
#
# users = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('from_username', flat=True) \
# .order_by('from_username') \
# .distinct()
# users_array = np.fromiter(users.iterator(), np.dtype('U39'))
# users_array.shape
# n_users = users_array.shape[0]
#
# logger.info('Build the utility matrix')
# logger.info('The number of users: {0}'.format(n_users))
# logger.info('The number of items: {0}'.format(n_repos))
#
# filename = 'caches/df-{0}x{1}.pickle'.format(n_users, n_repos)
# try:
# user_item_df = pickle.load(open(filename, 'rb'))
# except IOError:
# shape = (n_users, n_repos)
# matrix = np.zeros(shape, dtype=np.int8)
# for i, username in enumerate(users_array):
# user_starred = RepoStarring.objects \
# .filter(from_username=username) \
# .values_list('repo_full_name', flat=True)
# user_starred_array = np.fromiter(user_starred.iterator(), np.dtype('U140'))
# row = np.in1d(repos_array, user_starred_array, assume_unique=True)
# matrix[i] = row.astype(np.float64)
#
# user_item_df = pd.DataFrame(matrix, columns=repos_array, index=users_array)
# user_item_df.to_pickle(filename)
#
# return user_item_df
. Output only the next line. | user_item_df = prepare_user_item_df(min_stargazers_count=500) |
Predict the next line after this snippet: <|code_start|># coding: utf-8
class Command(BaseCommand):
def handle(self, *args, **options):
UserRelation.objects.all().delete()
<|code_end|>
using the current file's imports:
from django.core.management.base import BaseCommand
from app.models import RepoStarring
from app.models import UserRelation
and any relevant context from other files:
# Path: app/models.py
# class RepoStarring(models.Model):
# user_id = models.IntegerField()
# user_username = models.CharField(max_length=39)
# repo_id = models.IntegerField()
# repo_full_name = models.CharField(max_length=140)
# starred_at = models.DateTimeField()
#
# class Meta:
# unique_together = (('user_id', 'repo_id'),)
#
# def __str__(self):
# return '@{0} starred {1}'.format(self.user_username, self.repo_full_name)
#
# @staticmethod
# def create_one(user_dict, repo_dict):
# rs = RepoStarring()
# try:
# rs.user_id = user_dict['id']
# rs.user_username = user_dict['login']
# rs.repo_id = repo_dict['id']
# rs.repo_full_name = repo_dict['full_name']
# rs.starred_at = repo_dict['starred_at']
# except (KeyError, TypeError) as e:
# print(e)
# print(user_dict)
# print(repo_dict)
# return
#
# try:
# rs.save()
# except IntegrityError:
# pass
#
# Path: app/models.py
# class UserRelation(models.Model):
# from_user_id = models.IntegerField()
# from_username = models.CharField(max_length=39)
# to_user_id = models.IntegerField()
# to_username = models.CharField(max_length=39)
# relation = models.CharField(max_length=16)
#
# class Meta:
# unique_together = (('from_user_id', 'relation', 'to_user_id'),)
#
# def __str__(self):
# return '@{0} {1} @{2}'.format(self.from_username, self.relation, self.to_username)
#
# @staticmethod
# def create_one(from_user, relation, to_user):
# ur = UserRelation()
# try:
# ur.from_user_id = from_user['id']
# ur.from_username = from_user['login']
# ur.relation = relation
# ur.to_user_id = to_user['id']
# ur.to_username = to_user['login']
# except (KeyError, TypeError) as e:
# print(e)
# print(from_user)
# print(to_user)
# return
#
# try:
# ur.save()
# except IntegrityError:
# pass
. Output only the next line. | RepoStarring.objects.all().delete() |
Given the code snippet: <|code_start|># coding: utf-8
class Command(BaseCommand):
def handle(self, *args, **options):
def batch_qs(qs, batch_size=500):
total = qs.count()
for start in range(0, total, batch_size):
end = min(start + batch_size, total)
yield (start, end, total, qs[start:end])
large_qs = RepoInfo.objects.filter(stargazers_count__gte=10, stargazers_count__lte=290000, fork=False)
for start, end, total, qs_chunk in batch_qs(large_qs):
documents = []
for repo_info in qs_chunk:
<|code_end|>
, generate the next line using the imports in this file:
from django.core.management.base import BaseCommand
from app.mappings import RepoInfoDoc
from app.models import RepoInfo
and context (functions, classes, or occasionally code) from other files:
# Path: app/mappings.py
# class RepoInfoDoc(DocType):
# owner_id = Keyword()
# owner_username = Keyword()
# owner_type = Keyword()
# name = Text(text_analyzer, fields={'raw': Keyword()})
# full_name = Text(text_analyzer, fields={'raw': Keyword()})
# description = Text(text_analyzer)
# language = Keyword()
# created_at = Date()
# updated_at = Date()
# pushed_at = Date()
# homepage = Keyword()
# size = Integer()
# stargazers_count = Integer()
# forks_count = Integer()
# subscribers_count = Integer()
# fork = Boolean()
# has_issues = Boolean()
# has_projects = Boolean()
# has_downloads = Boolean()
# has_wiki = Boolean()
# has_pages = Boolean()
# open_issues_count = Integer()
# topics = Keyword(multi=True)
#
# class Meta:
# index = repo_index._name
#
# @classmethod
# def bulk_save(cls, documents):
# dicts = (d.to_dict(include_meta=True) for d in documents)
# return bulk(client, dicts)
#
# def save(self, **kwargs):
# return super(RepoInfoDoc, self).save(**kwargs)
#
# Path: app/models.py
# class RepoInfo(models.Model):
# owner_id = models.IntegerField()
# owner_username = models.CharField(max_length=39)
# owner_type = models.CharField(max_length=16)
# name = models.CharField(max_length=100)
# full_name = models.CharField(max_length=140, unique=True)
# description = models.TextField(max_length=191)
# language = models.CharField(max_length=32)
# created_at = models.DateTimeField()
# updated_at = models.DateTimeField()
# pushed_at = models.DateTimeField()
# homepage = models.URLField(max_length=255, null=True, blank=True)
# size = models.IntegerField()
# stargazers_count = models.IntegerField()
# forks_count = models.IntegerField()
# subscribers_count = models.IntegerField()
# fork = models.BooleanField()
# has_issues = models.BooleanField()
# has_projects = models.BooleanField()
# has_downloads = models.BooleanField()
# has_wiki = models.BooleanField()
# has_pages = models.BooleanField()
# open_issues_count = models.IntegerField()
# topics = ListTextField(base_field=models.CharField(max_length=255))
#
# def __str__(self):
# return self.full_name
#
# @staticmethod
# def create_one(repo_dict):
# repo = RepoInfo()
# try:
# repo.id = repo_dict['id']
# repo.owner_id = repo_dict['owner']['id']
# repo.owner_username = repo_dict['owner']['login']
# repo.owner_type = repo_dict['owner']['type']
# repo.name = repo_dict['name']
# repo.full_name = repo_dict['full_name']
# repo.description = repo_dict['description']
# repo.language = repo_dict['language']
# repo.created_at = repo_dict['created_at']
# repo.updated_at = repo_dict['updated_at']
# repo.pushed_at = repo_dict['pushed_at']
# repo.homepage = repo_dict['homepage']
# repo.size = repo_dict['size']
# repo.subscribers_count = repo_dict['subscribers_count']
# repo.stargazers_count = repo_dict['stargazers_count']
# repo.forks_count = repo_dict['forks_count']
# repo.fork = repo_dict['fork']
# repo.has_issues = repo_dict['has_issues']
# repo.has_projects = repo_dict['has_projects']
# repo.has_downloads = repo_dict['has_downloads']
# repo.has_wiki = repo_dict['has_wiki']
# repo.has_pages = repo_dict['has_pages']
# repo.open_issues_count = repo_dict['open_issues_count']
# repo.topics = repo_dict['topics']
# except (KeyError, TypeError) as e:
# print(e)
# print(repo_dict)
# return
#
# try:
# repo.save()
# except IntegrityError:
# pass
. Output only the next line. | repo_info_doc = RepoInfoDoc() |
Based on the snippet: <|code_start|># coding: utf-8
class Command(BaseCommand):
def handle(self, *args, **options):
def batch_qs(qs, batch_size=500):
total = qs.count()
for start in range(0, total, batch_size):
end = min(start + batch_size, total)
yield (start, end, total, qs[start:end])
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.management.base import BaseCommand
from app.mappings import RepoInfoDoc
from app.models import RepoInfo
and context (classes, functions, sometimes code) from other files:
# Path: app/mappings.py
# class RepoInfoDoc(DocType):
# owner_id = Keyword()
# owner_username = Keyword()
# owner_type = Keyword()
# name = Text(text_analyzer, fields={'raw': Keyword()})
# full_name = Text(text_analyzer, fields={'raw': Keyword()})
# description = Text(text_analyzer)
# language = Keyword()
# created_at = Date()
# updated_at = Date()
# pushed_at = Date()
# homepage = Keyword()
# size = Integer()
# stargazers_count = Integer()
# forks_count = Integer()
# subscribers_count = Integer()
# fork = Boolean()
# has_issues = Boolean()
# has_projects = Boolean()
# has_downloads = Boolean()
# has_wiki = Boolean()
# has_pages = Boolean()
# open_issues_count = Integer()
# topics = Keyword(multi=True)
#
# class Meta:
# index = repo_index._name
#
# @classmethod
# def bulk_save(cls, documents):
# dicts = (d.to_dict(include_meta=True) for d in documents)
# return bulk(client, dicts)
#
# def save(self, **kwargs):
# return super(RepoInfoDoc, self).save(**kwargs)
#
# Path: app/models.py
# class RepoInfo(models.Model):
# owner_id = models.IntegerField()
# owner_username = models.CharField(max_length=39)
# owner_type = models.CharField(max_length=16)
# name = models.CharField(max_length=100)
# full_name = models.CharField(max_length=140, unique=True)
# description = models.TextField(max_length=191)
# language = models.CharField(max_length=32)
# created_at = models.DateTimeField()
# updated_at = models.DateTimeField()
# pushed_at = models.DateTimeField()
# homepage = models.URLField(max_length=255, null=True, blank=True)
# size = models.IntegerField()
# stargazers_count = models.IntegerField()
# forks_count = models.IntegerField()
# subscribers_count = models.IntegerField()
# fork = models.BooleanField()
# has_issues = models.BooleanField()
# has_projects = models.BooleanField()
# has_downloads = models.BooleanField()
# has_wiki = models.BooleanField()
# has_pages = models.BooleanField()
# open_issues_count = models.IntegerField()
# topics = ListTextField(base_field=models.CharField(max_length=255))
#
# def __str__(self):
# return self.full_name
#
# @staticmethod
# def create_one(repo_dict):
# repo = RepoInfo()
# try:
# repo.id = repo_dict['id']
# repo.owner_id = repo_dict['owner']['id']
# repo.owner_username = repo_dict['owner']['login']
# repo.owner_type = repo_dict['owner']['type']
# repo.name = repo_dict['name']
# repo.full_name = repo_dict['full_name']
# repo.description = repo_dict['description']
# repo.language = repo_dict['language']
# repo.created_at = repo_dict['created_at']
# repo.updated_at = repo_dict['updated_at']
# repo.pushed_at = repo_dict['pushed_at']
# repo.homepage = repo_dict['homepage']
# repo.size = repo_dict['size']
# repo.subscribers_count = repo_dict['subscribers_count']
# repo.stargazers_count = repo_dict['stargazers_count']
# repo.forks_count = repo_dict['forks_count']
# repo.fork = repo_dict['fork']
# repo.has_issues = repo_dict['has_issues']
# repo.has_projects = repo_dict['has_projects']
# repo.has_downloads = repo_dict['has_downloads']
# repo.has_wiki = repo_dict['has_wiki']
# repo.has_pages = repo_dict['has_pages']
# repo.open_issues_count = repo_dict['open_issues_count']
# repo.topics = repo_dict['topics']
# except (KeyError, TypeError) as e:
# print(e)
# print(repo_dict)
# return
#
# try:
# repo.save()
# except IntegrityError:
# pass
. Output only the next line. | large_qs = RepoInfo.objects.filter(stargazers_count__gte=10, stargazers_count__lte=290000, fork=False) |
Next line prediction: <|code_start|># coding: utf-8
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-u', '--username', action='store', dest='username', required=True)
def handle(self, *args, **options):
active_user = options['username']
print(self.style.SUCCESS('Active user: @{0}'.format(active_user)))
<|code_end|>
. Use current file imports:
(from django.core.management.base import BaseCommand
from sklearn.metrics.pairwise import pairwise_distances
from app.utils_repo import prepare_user_item_df
import numpy as np
import pandas as pd)
and context including class names, function names, or small code snippets from other files:
# Path: app/utils_repo.py
# def prepare_user_item_df(min_stargazers_count):
# repos = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('repo_full_name', flat=True) \
# .order_by('repo_full_name') \
# .distinct()
# repos_array = np.fromiter(repos.iterator(), np.dtype('U140'))
# repos_array.shape
# n_repos = repos_array.shape[0]
#
# users = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('from_username', flat=True) \
# .order_by('from_username') \
# .distinct()
# users_array = np.fromiter(users.iterator(), np.dtype('U39'))
# users_array.shape
# n_users = users_array.shape[0]
#
# logger.info('Build the utility matrix')
# logger.info('The number of users: {0}'.format(n_users))
# logger.info('The number of items: {0}'.format(n_repos))
#
# filename = 'caches/df-{0}x{1}.pickle'.format(n_users, n_repos)
# try:
# user_item_df = pickle.load(open(filename, 'rb'))
# except IOError:
# shape = (n_users, n_repos)
# matrix = np.zeros(shape, dtype=np.int8)
# for i, username in enumerate(users_array):
# user_starred = RepoStarring.objects \
# .filter(from_username=username) \
# .values_list('repo_full_name', flat=True)
# user_starred_array = np.fromiter(user_starred.iterator(), np.dtype('U140'))
# row = np.in1d(repos_array, user_starred_array, assume_unique=True)
# matrix[i] = row.astype(np.float64)
#
# user_item_df = pd.DataFrame(matrix, columns=repos_array, index=users_array)
# user_item_df.to_pickle(filename)
#
# return user_item_df
. Output only the next line. | user_item_df = prepare_user_item_df(min_stargazers_count=100) |
Predict the next line after this snippet: <|code_start|># coding: utf-8
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-i', '--repo_full_name', action='store', dest='repo_full_name', required=True)
def handle(self, *args, **options):
repo_full_name = options['repo_full_name']
self.stdout.write(self.style.SUCCESS('Active item: @{0}'.format(repo_full_name)))
min_stargazers_count = 500
user_item_df = prepare_user_item_df(min_stargazers_count=min_stargazers_count)
<|code_end|>
using the current file's imports:
import re
import pandas as pd
from django.core.management.base import BaseCommand
from django.db import connection
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from app.models import RepoStarring
from app.utils_repo import prepare_user_item_df
and any relevant context from other files:
# Path: app/models.py
# class RepoStarring(models.Model):
# user_id = models.IntegerField()
# user_username = models.CharField(max_length=39)
# repo_id = models.IntegerField()
# repo_full_name = models.CharField(max_length=140)
# starred_at = models.DateTimeField()
#
# class Meta:
# unique_together = (('user_id', 'repo_id'),)
#
# def __str__(self):
# return '@{0} starred {1}'.format(self.user_username, self.repo_full_name)
#
# @staticmethod
# def create_one(user_dict, repo_dict):
# rs = RepoStarring()
# try:
# rs.user_id = user_dict['id']
# rs.user_username = user_dict['login']
# rs.repo_id = repo_dict['id']
# rs.repo_full_name = repo_dict['full_name']
# rs.starred_at = repo_dict['starred_at']
# except (KeyError, TypeError) as e:
# print(e)
# print(user_dict)
# print(repo_dict)
# return
#
# try:
# rs.save()
# except IntegrityError:
# pass
#
# Path: app/utils_repo.py
# def prepare_user_item_df(min_stargazers_count):
# repos = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('repo_full_name', flat=True) \
# .order_by('repo_full_name') \
# .distinct()
# repos_array = np.fromiter(repos.iterator(), np.dtype('U140'))
# repos_array.shape
# n_repos = repos_array.shape[0]
#
# users = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('from_username', flat=True) \
# .order_by('from_username') \
# .distinct()
# users_array = np.fromiter(users.iterator(), np.dtype('U39'))
# users_array.shape
# n_users = users_array.shape[0]
#
# logger.info('Build the utility matrix')
# logger.info('The number of users: {0}'.format(n_users))
# logger.info('The number of items: {0}'.format(n_repos))
#
# filename = 'caches/df-{0}x{1}.pickle'.format(n_users, n_repos)
# try:
# user_item_df = pickle.load(open(filename, 'rb'))
# except IOError:
# shape = (n_users, n_repos)
# matrix = np.zeros(shape, dtype=np.int8)
# for i, username in enumerate(users_array):
# user_starred = RepoStarring.objects \
# .filter(from_username=username) \
# .values_list('repo_full_name', flat=True)
# user_starred_array = np.fromiter(user_starred.iterator(), np.dtype('U140'))
# row = np.in1d(repos_array, user_starred_array, assume_unique=True)
# matrix[i] = row.astype(np.float64)
#
# user_item_df = pd.DataFrame(matrix, columns=repos_array, index=users_array)
# user_item_df.to_pickle(filename)
#
# return user_item_df
. Output only the next line. | rs = RepoStarring.objects \ |
Based on the snippet: <|code_start|># coding: utf-8
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-i', '--repo_full_name', action='store', dest='repo_full_name', required=True)
def handle(self, *args, **options):
repo_full_name = options['repo_full_name']
self.stdout.write(self.style.SUCCESS('Active item: @{0}'.format(repo_full_name)))
min_stargazers_count = 500
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import pandas as pd
from django.core.management.base import BaseCommand
from django.db import connection
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from app.models import RepoStarring
from app.utils_repo import prepare_user_item_df
and context (classes, functions, sometimes code) from other files:
# Path: app/models.py
# class RepoStarring(models.Model):
# user_id = models.IntegerField()
# user_username = models.CharField(max_length=39)
# repo_id = models.IntegerField()
# repo_full_name = models.CharField(max_length=140)
# starred_at = models.DateTimeField()
#
# class Meta:
# unique_together = (('user_id', 'repo_id'),)
#
# def __str__(self):
# return '@{0} starred {1}'.format(self.user_username, self.repo_full_name)
#
# @staticmethod
# def create_one(user_dict, repo_dict):
# rs = RepoStarring()
# try:
# rs.user_id = user_dict['id']
# rs.user_username = user_dict['login']
# rs.repo_id = repo_dict['id']
# rs.repo_full_name = repo_dict['full_name']
# rs.starred_at = repo_dict['starred_at']
# except (KeyError, TypeError) as e:
# print(e)
# print(user_dict)
# print(repo_dict)
# return
#
# try:
# rs.save()
# except IntegrityError:
# pass
#
# Path: app/utils_repo.py
# def prepare_user_item_df(min_stargazers_count):
# repos = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('repo_full_name', flat=True) \
# .order_by('repo_full_name') \
# .distinct()
# repos_array = np.fromiter(repos.iterator(), np.dtype('U140'))
# repos_array.shape
# n_repos = repos_array.shape[0]
#
# users = RepoStarring.objects \
# .filter(stargazers_count__gte=min_stargazers_count) \
# .values_list('from_username', flat=True) \
# .order_by('from_username') \
# .distinct()
# users_array = np.fromiter(users.iterator(), np.dtype('U39'))
# users_array.shape
# n_users = users_array.shape[0]
#
# logger.info('Build the utility matrix')
# logger.info('The number of users: {0}'.format(n_users))
# logger.info('The number of items: {0}'.format(n_repos))
#
# filename = 'caches/df-{0}x{1}.pickle'.format(n_users, n_repos)
# try:
# user_item_df = pickle.load(open(filename, 'rb'))
# except IOError:
# shape = (n_users, n_repos)
# matrix = np.zeros(shape, dtype=np.int8)
# for i, username in enumerate(users_array):
# user_starred = RepoStarring.objects \
# .filter(from_username=username) \
# .values_list('repo_full_name', flat=True)
# user_starred_array = np.fromiter(user_starred.iterator(), np.dtype('U140'))
# row = np.in1d(repos_array, user_starred_array, assume_unique=True)
# matrix[i] = row.astype(np.float64)
#
# user_item_df = pd.DataFrame(matrix, columns=repos_array, index=users_array)
# user_item_df.to_pickle(filename)
#
# return user_item_df
. Output only the next line. | user_item_df = prepare_user_item_df(min_stargazers_count=min_stargazers_count) |
Here is a snippet: <|code_start|>
class Pshuf(Pattern):
"""
pattern to randomly shuffle elements from a list; the randomly shuffled list
then is repeated verbatim for repats times
"""
def __init__(self, alist=None, repeats=sys.maxsize):
super().__init__()
if alist is None:
alist = []
self.alist = copy.deepcopy(alist)
self.repeats = repeats
def __iter__(self):
# following shuffles the list after repeating
# return (i for i in random_permutation(itertools.chain.from_iterable(itertools.repeat(self.alist,
# self.repeats))))
# following shuffles the non-repeated list once and repeats it every time
return (i for i in
<|code_end|>
. Write the next line using the current file imports:
import copy
import itertools
import sys
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import random_permutation
and context from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def random_permutation(iterable, r=None):
# """Random selection from itertools.permutations(iterable, r)"""
# pool = tuple(iterable)
# r = len(pool) if r is None else r
# return tuple(random.sample(pool, r))
, which may include functions, classes, or code. Output only the next line. | itertools.chain.from_iterable(itertools.repeat(random_permutation(self.alist), self.repeats))) |
Here is a snippet: <|code_start|>
class TestPtween(unittest.TestCase):
def test_normal(self):
n = NumberAnimation(frm=60, to=90, tween=['linear'])
<|code_end|>
. Write the next line using the current file imports:
import unittest
from vectortween.NumberAnimation import NumberAnimation
from expremigen.patterns.ptween import Ptween
and context from other files:
# Path: expremigen/patterns/ptween.py
# class Ptween(Pattern):
# """
# class to glue pyvectortween to expremigen
# """
#
# def __init__(self, animation: Animation, birthframe=0, startframe=0, stopframe=0,
# deathframe=0, noiseframe=None):
# super().__init__()
# self.animation = animation
# self.bf = birthframe
# self.staf = startframe
# self.stof = stopframe
# self.df = deathframe
# self.nf = noiseframe
# self.current_frame = 0
#
# def __iter__(self):
# for i in range(int(self.df)):
# yield self.animation.make_frame(i, self.bf, self.staf, self.stof, self.df, self.nf)
#
# def __str__(self):
# return "{0}(<anim>, {1}, {2}, {3}, {4}, {5})".format(self.__class__.__name__, self.bf,
# self.staf, self.stof, self.df, self.nf)
, which may include functions, classes, or code. Output only the next line. | a = [i for i in Ptween(n, 0, 0, 10, 10, None)] |
Based on the snippet: <|code_start|>
def myrepeat(what, fn, times=None):
"""
:param what: something to repeat
:param fn: extra function to apply
:param times: number of times to repeat
:return:
"""
if times is None:
while True:
if fn is not None:
yield fn(what)
else:
yield what
else:
for i in range(times):
if fn is not None:
yield fn(what)
else:
yield what
def flatten(l):
"""
:param l: list l
:return: iterator to flatten list l; Pchord are not flattened
"""
for el in l:
<|code_end|>
, predict the immediate next line with the help of imports:
import itertools
import random
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.pchord import Pchord
and context (classes, functions, sometimes code) from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/pchord.py
# class Pchord(Pattern):
# """
# special pattern that is never flattened; interpreted as a chord by the rest of the system
# """
#
# def __init__(self, notes=0):
# super().__init__()
# self.notes = notes
#
# def __str__(self):
# return "{0}({1})".format(self.__class__.__name__, self.notes)
#
# def __iter__(self):
# yield Pchord(self.notes)
#
# def __repr__(self):
# return self.__str__()
#
# def __eq__(self, other):
# return self.notes == other.notes
. Output only the next line. | if isinstance(el, Pattern) and not isinstance(el, Pchord): |
Given the code snippet: <|code_start|>
def myrepeat(what, fn, times=None):
"""
:param what: something to repeat
:param fn: extra function to apply
:param times: number of times to repeat
:return:
"""
if times is None:
while True:
if fn is not None:
yield fn(what)
else:
yield what
else:
for i in range(times):
if fn is not None:
yield fn(what)
else:
yield what
def flatten(l):
"""
:param l: list l
:return: iterator to flatten list l; Pchord are not flattened
"""
for el in l:
<|code_end|>
, generate the next line using the imports in this file:
import itertools
import random
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.pchord import Pchord
and context (functions, classes, or occasionally code) from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/pchord.py
# class Pchord(Pattern):
# """
# special pattern that is never flattened; interpreted as a chord by the rest of the system
# """
#
# def __init__(self, notes=0):
# super().__init__()
# self.notes = notes
#
# def __str__(self):
# return "{0}({1})".format(self.__class__.__name__, self.notes)
#
# def __iter__(self):
# yield Pchord(self.notes)
#
# def __repr__(self):
# return self.__str__()
#
# def __eq__(self, other):
# return self.notes == other.notes
. Output only the next line. | if isinstance(el, Pattern) and not isinstance(el, Pchord): |
Based on the snippet: <|code_start|>
class Pconst(Pattern):
"""
pattern that returns a given "constant" for "repeats" time
"""
def __init__(self, constant=0, repeats=sys.maxsize):
super().__init__()
self.constant = constant
self.repeats = repeats
def __str__(self):
return "{0}({1}, {2})".format(self.__class__.__name__, self.constant, self.repeats)
def __iter__(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import itertools
import sys
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import flatten
and context (classes, functions, sometimes code) from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def flatten(l):
# """
#
# :param l: list l
# :return: iterator to flatten list l; Pchord are not flattened
# """
# for el in l:
# if isinstance(el, Pattern) and not isinstance(el, Pchord):
# yield from flatten(el)
# else:
# yield el
. Output only the next line. | return flatten(c for c in itertools.repeat(self.constant, self.repeats)) |
Given snippet: <|code_start|> """
internal helper function
:return: list of drumnotes for inclusion in the textX grammar
"""
def mycmp(s1, s2):
if len(s1) < len(s2):
return 1
if len(s1) > len(s2):
return -1
if s1 < s2:
return 1
if s1 > s2:
return -1
return 0
strng = "|".join(sorted(["'" + d.strip() + "'" for d in self.all_drum_notes], key=cmp_to_key(mycmp)))
return strng
def lookup(self, note):
"""
:param note: lookup simple note or Pchord
:return: midi number corresponding to the note name or the notes in the Pchord
"""
try:
if isinstance(note, Pchord):
return Pchord([self.note_to_midi[n.lower()] for n in note.notes])
else:
return self.note_to_midi[note.lower()]
except KeyError:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from expremigen.musicalmappings.constants import REST
from expremigen.patterns.pchord import Pchord
from functools import cmp_to_key
and context:
# Path: expremigen/musicalmappings/constants.py
# REST = 128
#
# Path: expremigen/patterns/pchord.py
# class Pchord(Pattern):
# """
# special pattern that is never flattened; interpreted as a chord by the rest of the system
# """
#
# def __init__(self, notes=0):
# super().__init__()
# self.notes = notes
#
# def __str__(self):
# return "{0}({1})".format(self.__class__.__name__, self.notes)
#
# def __iter__(self):
# yield Pchord(self.notes)
#
# def __repr__(self):
# return self.__str__()
#
# def __eq__(self, other):
# return self.notes == other.notes
which might include code, classes, or functions. Output only the next line. | return REST |
Based on the snippet: <|code_start|> assert d[1] not in self.all_drum_notes
self.all_drum_notes.add(d[0])
self.all_drum_notes.add(d[1])
def get_drumnotes_for_grammar(self):
"""
internal helper function
:return: list of drumnotes for inclusion in the textX grammar
"""
def mycmp(s1, s2):
if len(s1) < len(s2):
return 1
if len(s1) > len(s2):
return -1
if s1 < s2:
return 1
if s1 > s2:
return -1
return 0
strng = "|".join(sorted(["'" + d.strip() + "'" for d in self.all_drum_notes], key=cmp_to_key(mycmp)))
return strng
def lookup(self, note):
"""
:param note: lookup simple note or Pchord
:return: midi number corresponding to the note name or the notes in the Pchord
"""
try:
<|code_end|>
, predict the immediate next line with the help of imports:
from expremigen.musicalmappings.constants import REST
from expremigen.patterns.pchord import Pchord
from functools import cmp_to_key
and context (classes, functions, sometimes code) from other files:
# Path: expremigen/musicalmappings/constants.py
# REST = 128
#
# Path: expremigen/patterns/pchord.py
# class Pchord(Pattern):
# """
# special pattern that is never flattened; interpreted as a chord by the rest of the system
# """
#
# def __init__(self, notes=0):
# super().__init__()
# self.notes = notes
#
# def __str__(self):
# return "{0}({1})".format(self.__class__.__name__, self.notes)
#
# def __iter__(self):
# yield Pchord(self.notes)
#
# def __repr__(self):
# return self.__str__()
#
# def __eq__(self, other):
# return self.notes == other.notes
. Output only the next line. | if isinstance(note, Pchord): |
Predict the next line for this snippet: <|code_start|>
class Pgeom(Pattern):
"""
pattern that generates numbers in geometric series, e.g. Pgeom(1, 2, 5) -> 1, 2, 4, 8, 16
"""
def __init__(self, frm=0, factor=1, length=5):
"""
:param frm: starting number
:param factor: factor by which to keep multiplying the starting number
:param length: length of generated sequence
"""
super().__init__()
self.frm = frm
self.factor = factor
self.length = length
def __iter__(self):
<|code_end|>
with the help of current file imports:
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import take, geom
and context from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def take(n, iterable):
# """Return first n items of the iterable as a list"""
# return itertools.islice(iterable, n)
#
# def geom(start=1, factor=2):
# """
# iterator to generate geometric series
# :param start: start number
# :param factor: constant factor to apply
# :return: geometric series, e.g. geom(16, 0.5) -> 16 8 4 2 1 0.5 ...
# """
# # geom(1) --> 1 2 4 8 ...
# # geom(16, 0.5) -> 16 8 4 2 1 0.5 ...
# n = start
# while True:
# yield n
# n *= factor
, which may contain function names, class names, or code. Output only the next line. | return take(self.length, geom(self.frm, self.factor)) |
Next line prediction: <|code_start|>
class Pgeom(Pattern):
"""
pattern that generates numbers in geometric series, e.g. Pgeom(1, 2, 5) -> 1, 2, 4, 8, 16
"""
def __init__(self, frm=0, factor=1, length=5):
"""
:param frm: starting number
:param factor: factor by which to keep multiplying the starting number
:param length: length of generated sequence
"""
super().__init__()
self.frm = frm
self.factor = factor
self.length = length
def __iter__(self):
<|code_end|>
. Use current file imports:
(from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import take, geom)
and context including class names, function names, or small code snippets from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def take(n, iterable):
# """Return first n items of the iterable as a list"""
# return itertools.islice(iterable, n)
#
# def geom(start=1, factor=2):
# """
# iterator to generate geometric series
# :param start: start number
# :param factor: constant factor to apply
# :return: geometric series, e.g. geom(16, 0.5) -> 16 8 4 2 1 0.5 ...
# """
# # geom(1) --> 1 2 4 8 ...
# # geom(16, 0.5) -> 16 8 4 2 1 0.5 ...
# n = start
# while True:
# yield n
# n *= factor
. Output only the next line. | return take(self.length, geom(self.frm, self.factor)) |
Here is a snippet: <|code_start|>
class Pseries(Pattern):
"""
pattern to generate an arithmetic series, e.g. Pseries(0, 1, 5) -> 0, 1, 2, 3, 4
"""
def __init__(self, frm=0, step=1, length=5):
super().__init__()
self.frm = frm
self.step = step
self.length = length
def __iter__(self):
<|code_end|>
. Write the next line using the current file imports:
import itertools
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import take
and context from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def take(n, iterable):
# """Return first n items of the iterable as a list"""
# return itertools.islice(iterable, n)
, which may include functions, classes, or code. Output only the next line. | return take(self.length, itertools.count(self.frm, self.step)) |
Based on the snippet: <|code_start|>
class TestPseq(unittest.TestCase):
def test_normal(self):
a = [i for i in Pseq([4, 5, 6], 2)]
self.assertEqual(a, [4, 5, 6] * 2)
def test_empty(self):
b = [i for i in Pseq([4, 5, 6], 0)]
self.assertEqual(b, [])
def test_adult(self):
d = [i for i in Pseq(["X", "Y", "X"], 3)]
self.assertEqual(d, ["X", "Y", "X"] * 3)
def test_repr(self):
self.assertEqual("{0}".format(Pseq([1, -1, Pconst(2, 2)], 3)), "Pseq([1, -1, Pconst(2, 2)], 3)")
def test_defaultvalue(self):
e = [i for i in Pseq(repeats=2)]
self.assertEqual(e, [])
def test_nesting(self):
f = [i for i in Pseq([Pseq([1, Pconst(2, 2)], 2), Pseq([3, 4], 2)], 2)]
self.assertEqual(f, [1, 2, 2, 1, 2, 2, 3, 4, 3, 4, 1, 2, 2, 1, 2, 2, 3, 4, 3, 4])
def test_withchord(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
from expremigen.patterns.pchord import Pchord
from expremigen.patterns.pconst import Pconst
from expremigen.patterns.pseq import Pseq
and context (classes, functions, sometimes code) from other files:
# Path: expremigen/patterns/pchord.py
# class Pchord(Pattern):
# """
# special pattern that is never flattened; interpreted as a chord by the rest of the system
# """
#
# def __init__(self, notes=0):
# super().__init__()
# self.notes = notes
#
# def __str__(self):
# return "{0}({1})".format(self.__class__.__name__, self.notes)
#
# def __iter__(self):
# yield Pchord(self.notes)
#
# def __repr__(self):
# return self.__str__()
#
# def __eq__(self, other):
# return self.notes == other.notes
#
# Path: expremigen/patterns/pconst.py
# class Pconst(Pattern):
# """
# pattern that returns a given "constant" for "repeats" time
# """
#
# def __init__(self, constant=0, repeats=sys.maxsize):
# super().__init__()
# self.constant = constant
# self.repeats = repeats
#
# def __str__(self):
# return "{0}({1}, {2})".format(self.__class__.__name__, self.constant, self.repeats)
#
# def __iter__(self):
# return flatten(c for c in itertools.repeat(self.constant, self.repeats))
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/pseq.py
# class Pseq(Pattern):
# """
# pattern that generates numbers one by one from a list
# """
#
# def __init__(self, alist=None, repeats=sys.maxsize):
# super().__init__()
# if alist is None:
# alist = []
# self.alist = copy.deepcopy(alist)
# self.repeats = repeats
#
# def __iter__(self):
# return flatten(j for i in itertools.repeat(self.alist, self.repeats) for j in i)
#
# def __str__(self):
# return "{0}({1}, {2})".format(self.__class__.__name__, self.alist, self.repeats)
. Output only the next line. | f = [i for i in Pseq([Pseq([1, Pconst(2, 2)], 2), Pseq(Pchord([3, 4]), 2)], 2)] |
Using the snippet: <|code_start|>
class TestPseq(unittest.TestCase):
def test_normal(self):
a = [i for i in Pseq([4, 5, 6], 2)]
self.assertEqual(a, [4, 5, 6] * 2)
def test_empty(self):
b = [i for i in Pseq([4, 5, 6], 0)]
self.assertEqual(b, [])
def test_adult(self):
d = [i for i in Pseq(["X", "Y", "X"], 3)]
self.assertEqual(d, ["X", "Y", "X"] * 3)
def test_repr(self):
<|code_end|>
, determine the next line of code. You have imports:
import unittest
from expremigen.patterns.pchord import Pchord
from expremigen.patterns.pconst import Pconst
from expremigen.patterns.pseq import Pseq
and context (class names, function names, or code) available:
# Path: expremigen/patterns/pchord.py
# class Pchord(Pattern):
# """
# special pattern that is never flattened; interpreted as a chord by the rest of the system
# """
#
# def __init__(self, notes=0):
# super().__init__()
# self.notes = notes
#
# def __str__(self):
# return "{0}({1})".format(self.__class__.__name__, self.notes)
#
# def __iter__(self):
# yield Pchord(self.notes)
#
# def __repr__(self):
# return self.__str__()
#
# def __eq__(self, other):
# return self.notes == other.notes
#
# Path: expremigen/patterns/pconst.py
# class Pconst(Pattern):
# """
# pattern that returns a given "constant" for "repeats" time
# """
#
# def __init__(self, constant=0, repeats=sys.maxsize):
# super().__init__()
# self.constant = constant
# self.repeats = repeats
#
# def __str__(self):
# return "{0}({1}, {2})".format(self.__class__.__name__, self.constant, self.repeats)
#
# def __iter__(self):
# return flatten(c for c in itertools.repeat(self.constant, self.repeats))
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/pseq.py
# class Pseq(Pattern):
# """
# pattern that generates numbers one by one from a list
# """
#
# def __init__(self, alist=None, repeats=sys.maxsize):
# super().__init__()
# if alist is None:
# alist = []
# self.alist = copy.deepcopy(alist)
# self.repeats = repeats
#
# def __iter__(self):
# return flatten(j for i in itertools.repeat(self.alist, self.repeats) for j in i)
#
# def __str__(self):
# return "{0}({1}, {2})".format(self.__class__.__name__, self.alist, self.repeats)
. Output only the next line. | self.assertEqual("{0}".format(Pseq([1, -1, Pconst(2, 2)], 3)), "Pseq([1, -1, Pconst(2, 2)], 3)") |
Continue the code snippet: <|code_start|>
class Pseq(Pattern):
"""
pattern that generates numbers one by one from a list
"""
def __init__(self, alist=None, repeats=sys.maxsize):
super().__init__()
if alist is None:
alist = []
self.alist = copy.deepcopy(alist)
self.repeats = repeats
def __iter__(self):
<|code_end|>
. Use current file imports:
import copy
import itertools
import sys
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import flatten
and context (classes, functions, or code) from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def flatten(l):
# """
#
# :param l: list l
# :return: iterator to flatten list l; Pchord are not flattened
# """
# for el in l:
# if isinstance(el, Pattern) and not isinstance(el, Pchord):
# yield from flatten(el)
# else:
# yield el
. Output only the next line. | return flatten(j for i in itertools.repeat(self.alist, self.repeats) for j in i) |
Using the snippet: <|code_start|>
class Prand(Pattern):
"""
Pattern used to draw random numbers from a list
(numbers may repeat themselves)
"""
def __init__(self, alist=None, repeats=sys.maxsize):
"""
pattern that
:param alist: possible numbers
:param repeats: how many numbers to draw
"""
super().__init__()
if alist is None:
alist = []
self.alist = copy.deepcopy(alist)
self.repeats = repeats
def __iter__(self):
# following shuffles the list after repeating
<|code_end|>
, determine the next line of code. You have imports:
import copy
import itertools
import sys
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import flatten, take
from expremigen.patterns.utils import random_permutation
and context (class names, function names, or code) available:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def flatten(l):
# """
#
# :param l: list l
# :return: iterator to flatten list l; Pchord are not flattened
# """
# for el in l:
# if isinstance(el, Pattern) and not isinstance(el, Pchord):
# yield from flatten(el)
# else:
# yield el
#
# def take(n, iterable):
# """Return first n items of the iterable as a list"""
# return itertools.islice(iterable, n)
#
# Path: expremigen/patterns/utils.py
# def random_permutation(iterable, r=None):
# """Random selection from itertools.permutations(iterable, r)"""
# pool = tuple(iterable)
# r = len(pool) if r is None else r
# return tuple(random.sample(pool, r))
. Output only the next line. | return flatten(take(self.repeats, (i for i in random_permutation( |
Based on the snippet: <|code_start|>
class Prand(Pattern):
"""
Pattern used to draw random numbers from a list
(numbers may repeat themselves)
"""
def __init__(self, alist=None, repeats=sys.maxsize):
"""
pattern that
:param alist: possible numbers
:param repeats: how many numbers to draw
"""
super().__init__()
if alist is None:
alist = []
self.alist = copy.deepcopy(alist)
self.repeats = repeats
def __iter__(self):
# following shuffles the list after repeating
<|code_end|>
, predict the immediate next line with the help of imports:
import copy
import itertools
import sys
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import flatten, take
from expremigen.patterns.utils import random_permutation
and context (classes, functions, sometimes code) from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def flatten(l):
# """
#
# :param l: list l
# :return: iterator to flatten list l; Pchord are not flattened
# """
# for el in l:
# if isinstance(el, Pattern) and not isinstance(el, Pchord):
# yield from flatten(el)
# else:
# yield el
#
# def take(n, iterable):
# """Return first n items of the iterable as a list"""
# return itertools.islice(iterable, n)
#
# Path: expremigen/patterns/utils.py
# def random_permutation(iterable, r=None):
# """Random selection from itertools.permutations(iterable, r)"""
# pool = tuple(iterable)
# r = len(pool) if r is None else r
# return tuple(random.sample(pool, r))
. Output only the next line. | return flatten(take(self.repeats, (i for i in random_permutation( |
Continue the code snippet: <|code_start|>
class Prand(Pattern):
"""
Pattern used to draw random numbers from a list
(numbers may repeat themselves)
"""
def __init__(self, alist=None, repeats=sys.maxsize):
"""
pattern that
:param alist: possible numbers
:param repeats: how many numbers to draw
"""
super().__init__()
if alist is None:
alist = []
self.alist = copy.deepcopy(alist)
self.repeats = repeats
def __iter__(self):
# following shuffles the list after repeating
<|code_end|>
. Use current file imports:
import copy
import itertools
import sys
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.utils import flatten, take
from expremigen.patterns.utils import random_permutation
and context (classes, functions, or code) from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/utils.py
# def flatten(l):
# """
#
# :param l: list l
# :return: iterator to flatten list l; Pchord are not flattened
# """
# for el in l:
# if isinstance(el, Pattern) and not isinstance(el, Pchord):
# yield from flatten(el)
# else:
# yield el
#
# def take(n, iterable):
# """Return first n items of the iterable as a list"""
# return itertools.islice(iterable, n)
#
# Path: expremigen/patterns/utils.py
# def random_permutation(iterable, r=None):
# """Random selection from itertools.permutations(iterable, r)"""
# pool = tuple(iterable)
# r = len(pool) if r is None else r
# return tuple(random.sample(pool, r))
. Output only the next line. | return flatten(take(self.repeats, (i for i in random_permutation( |
Continue the code snippet: <|code_start|>
class Padd(Pbinop):
"""
pattern that returns the sum of two other patterns
"""
def __init__(self, a: Pattern, b: Pattern):
super().__init__(a, b)
def __iter__(self):
<|code_end|>
. Use current file imports:
import itertools
import operator
from expremigen.patterns.pattern import Pattern
from expremigen.patterns.pbinop import Pbinop
from expremigen.patterns.utils import flatten
and context (classes, functions, or code) from other files:
# Path: expremigen/patterns/pattern.py
# class Pattern(metaclass=abc.ABCMeta):
# """
# abstract base class for a Pattern
# """
#
# def __str__(self):
# return "{0}".format(self.__class__.__name__)
#
# def __repr__(self):
# return self.__str__()
#
# Path: expremigen/patterns/pbinop.py
# class Pbinop(Pattern, metaclass=abc.ABCMeta):
# """
# abstract base class for patterns that rely on two patterns
# """
#
# def __init__(self, a: Pattern, b: Pattern):
# super().__init__()
# self.a = a
# self.b = b
#
# def __str__(self):
# return "{0}({1}, {2})".format(self.__class__.__name__, self.a, self.b)
#
# Path: expremigen/patterns/utils.py
# def flatten(l):
# """
#
# :param l: list l
# :return: iterator to flatten list l; Pchord are not flattened
# """
# for el in l:
# if isinstance(el, Pattern) and not isinstance(el, Pchord):
# yield from flatten(el)
# else:
# yield el
. Output only the next line. | return flatten(i for i in itertools.starmap(operator.__add__, zip(self.a, self.b))) |
Predict the next line after this snippet: <|code_start|># Copyright © 2014-16, Ugo Pozo
# 2014-16, Câmara Municipal de São Paulo
# aggregators.py - aggregators for boolean expressions.
# This file is part of Anubis.
# Anubis is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anubis is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Aggregator:
def __init__(self):
pass
@staticmethod
def need_parenthesis(outside, inside):
<|code_end|>
using the current file's imports:
from anubis.url import Boolean
and any relevant context from other files:
# Path: anubis/url.py
# class Boolean:
#
# class Type:
# fields = ()
#
# @staticmethod
# def set_contents(expr, dictionary):
# expr.contents = dictionary
#
# class Expr(Type):
# fields = ("field", "args")
#
# @staticmethod
# def repr(expr):
# return "{}: {}".format(repr(expr["field"]), expr["args"])
#
# @staticmethod
# def set_contents(expr, dictionary):
# expr.contents = dictionary["contents"]
#
# @staticmethod
# def traverse(expr, func):
# return func(base_expression=expr)
#
# class Not(Type):
# fields = ("expr",)
#
# @staticmethod
# def repr(expr):
# return "NOT ({})".format(str(expr["expr"]))
#
# @staticmethod
# def set_contents(expr, dictionary):
# expr.contents = dict(expr=dictionary["contents"])
#
# @staticmethod
# def traverse(expr, func):
# return func(not_expression=expr["expr"].traverse(func),
# inside_type=expr["expr"].type_.__class__)
#
# class And(Type):
# fields = ("left", "right")
#
# @staticmethod
# def repr(expr):
# return "({}) AND ({})".format(
# str(expr["left"]), str(expr["right"]))
#
# @staticmethod
# def traverse(expr, func):
# return func(and_expression=(expr["left"].traverse(func),
# expr["right"].traverse(func)),
# left_type=expr["left"].type_.__class__,
# right_type=expr["right"].type_.__class__)
#
# class Or(Type):
# fields = ("left", "right")
#
# @staticmethod
# def repr(expr):
# return "({}) OR ({})".format(str(expr["left"]), str(expr["right"]))
#
# @staticmethod
# def traverse(expr, func):
# return func(or_expression=(expr["left"].traverse(func),
# expr["right"].traverse(func)),
# left_type=expr["left"].type_.__class__,
# right_type=expr["right"].type_.__class__)
#
# types = {"BooleanExpr": Expr(),
# "Not": Not(),
# "And": And(),
# "Or": Or()
# }
#
# precedence = [Expr, Not, And, Or]
#
# def __init__(self, contents, type_):
# assert isinstance(type_, self.Type)
#
# self.type_ = type_
# self.type_.set_contents(self, contents)
#
# @classmethod
# def build(cls, dictionary):
# if "tag" in dictionary.keys():
# if dictionary["tag"] in cls.types.keys():
# return Boolean(dictionary, cls.types[dictionary["tag"]])
# else:
# raise ValueError(dictionary["contents"])
#
# return dictionary
#
# def keys(self):
# return list(self.type_.fields) + ["type"]
#
# def traverse(self, func):
# return self.type_.traverse(self, func)
#
# def __getitem__(self, key):
# if key == "type":
# return self.type_.__class__.__name__
#
# return self.contents[key]
#
# # def __iter__(self):
# # # For dict() conversion
# # for key in self.keys() + ["tag"]:
# # yield (key, self.contents[key])
#
# def __eq__(self, other):
# return self.contents == other.contents
#
# def __str__(self):
# return self.type_.repr(self)
#
# def __repr__(self):
# return str(self)
. Output only the next line. | outside_precedence = Boolean.precedence.index(outside) |
Based on the snippet: <|code_start|> if not self.is_cacheable:
return super().get_full_state()
key = "api:" + self.cache_key if self.is_api else self.cache_key
cache = caches[self.cache]
cached_value = cache.get(key, None)
if cached_value is None:
state = super().get_full_state()
cache.set(key, dict(state))
else:
state = dict(cached_value)
return state
class CachedUnitMixin:
"""Caches individual units.
"""
unit_cache = None
def get_queryset_filter(self, queryset):
if self.unit_cache is None:
return super().get_queryset_filter(queryset)
cache = caches[self.unit_cache]
<|code_end|>
, predict the immediate next line with the help of imports:
from django.core.cache import caches
from anubis.aggregators import CachedQuerySetAggregator
and context (classes, functions, sometimes code) from other files:
# Path: anubis/aggregators.py
# class CachedQuerySetAggregator(QuerySetAggregator):
# def __init__(self, cache, base_queryset, allowed_filters):
# super().__init__(base_queryset, allowed_filters)
#
# self.cache = cache
#
# def make_cache_key(self, expr):
# model_name = self.base_queryset.model._meta.model_name
# keys = [model_name, expr["field"]] + expr["args"]
# keys = [k.replace(":", r"\:") for k in keys]
#
# return ":".join(keys)
#
# def handle_base_expression(self, base_expression):
# unit_key = self.make_cache_key(base_expression)
#
# cached_value = self.cache.get(unit_key, None)
#
# if cached_value is None:
# queryset = super().handle_base_expression(base_expression)
# self.cache.set(unit_key, queryset.values_list("id", flat=True))
# else:
# queryset = self.base_queryset.filter(id__in=cached_value)
#
# return queryset
. Output only the next line. | aggregator = CachedQuerySetAggregator(cache, queryset, |
Here is a snippet: <|code_start|> aggregate = self.procedure_aggregate(procname, *args, **kwargs)
return self.order_by_aggregates(aggregate, field=field,
extra_fields=extra_fields)
def order_by_aggregates(self, *aggregates, field="id", extra_fields=None):
annotation = OrderedDict([
("{}_{}".format(agg.default_alias, i), agg)
for i, agg in enumerate(aggregates)
])
fields = list(annotation.keys())
fields = [Ref(f, annotation[f]) for f in fields]
if extra_fields is not None:
fields += [F(f) for f in extra_fields]
else:
fields.append(F(field))
return self.annotate(**annotation).order_by(*fields)
def procedure_aggregate(self, procname, *args, **kwargs):
"""Helper method for generating ordering annotations.
Returns:
anubis.sql_aggregators.ProcedureOrderingAnnotation: An ordering
annotation with the correct procedure name (including table
name).
"""
procname = "{}_{}".format(self.model._meta.db_table, procname)
<|code_end|>
. Write the next line using the current file imports:
from collections import OrderedDict
from operator import itemgetter
from anubis.sql_aggregators import ProcedureOrderingAnnotation
from django.db import connection as base_connection, connections
from django.db import models
from django.db.models.query import QuerySet
from django.db.models.expressions import Ref, F
and context from other files:
# Path: anubis/sql_aggregators.py
# class ProcedureOrderingAnnotation(RawSQL):
# template = (
# "(select {function}_res.rank "
# "from {function}({args}) as {function}_res "
# "where {field} = {function}_res.id)"
# )
#
# def __init__(self, function, *args, lookup="id", **kwargs):
# self.name = function
# self.col_name = lookup
# self.col = self._parse_expressions(lookup)[0]
#
# super().__init__("", args)
#
# def resolve_expression(self, query=None, allow_joins=True, reuse=None,
# summarize=False, for_save=False):
# clone = self.copy()
# clone.is_summary = summarize
# clone.col = clone.col.resolve_expression(query, allow_joins, reuse,
# summarize, for_save)
#
# return clone
#
# @property
# def default_alias(self):
# return "{}_{}".format(self.col_name, self.name.lower())
#
# def as_sql(self, compiler, connection):
# field_name, _ = compiler.compile(self.col)
#
# args = list(self.params)
#
# arg_marks = ", ".join(["%s"] * len(args))
#
# self.sql = self.template.format(function=self.name, args=arg_marks,
# field=field_name)
#
# return super().as_sql(compiler, connection)
, which may include functions, classes, or code. Output only the next line. | return ProcedureOrderingAnnotation(procname, *args, **kwargs) |
Given snippet: <|code_start|> with pytest.raises(InvalidPublisher):
require_different_publisher(
Committed(
publisher=message.header.publisher,
batch_identifier=batch_identifier,
),
0,
message,
)
def test_stateful_validator_unhandled_starting_state(message):
validator = StatefulStreamValidator(lambda **kwargs: None, {})
with pytest.raises(InvalidEventError):
validator(None, 0, message)
def test_adds_expected_events_to_exception_for_unknown_event_for_state(message):
receivers = {None: {RollbackOperation: 3}}
validator = StatefulStreamValidator(lambda **kwargs: None, receivers)
with pytest.raises(InvalidEventError) as exception:
validator(None, 0, message)
assert exception.value.expected == {RollbackOperation}
def test_successful_transaction():
messages = list(make_batch_messages(batch_identifier, [
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import itertools
import uuid
import pytest
from collections import namedtuple
from pgshovel.interfaces.streams_pb2 import (
Message,
RollbackOperation,
)
from pgshovel.replication.validation.transactions import (
Committed,
InTransaction,
InvalidBatch,
InvalidEventError,
InvalidPublisher,
RolledBack,
StatefulStreamValidator,
get_operation,
require_batch_id_advanced_if_same_node,
require_batch_id_not_advanced_if_same_node,
require_different_publisher,
require_same_batch,
require_same_publisher,
validate_transaction_state,
)
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
copy,
make_batch_messages,
message,
mutation,
reserialize,
)
and context:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
which might include code, classes, or functions. Output only the next line. | {'begin_operation': begin}, |
Next line prediction: <|code_start|> Committed(
publisher=message.header.publisher,
batch_identifier=batch_identifier,
),
0,
message,
)
def test_stateful_validator_unhandled_starting_state(message):
validator = StatefulStreamValidator(lambda **kwargs: None, {})
with pytest.raises(InvalidEventError):
validator(None, 0, message)
def test_adds_expected_events_to_exception_for_unknown_event_for_state(message):
receivers = {None: {RollbackOperation: 3}}
validator = StatefulStreamValidator(lambda **kwargs: None, receivers)
with pytest.raises(InvalidEventError) as exception:
validator(None, 0, message)
assert exception.value.expected == {RollbackOperation}
def test_successful_transaction():
messages = list(make_batch_messages(batch_identifier, [
{'begin_operation': begin},
{'mutation_operation': mutation},
<|code_end|>
. Use current file imports:
(import itertools
import uuid
import pytest
from collections import namedtuple
from pgshovel.interfaces.streams_pb2 import (
Message,
RollbackOperation,
)
from pgshovel.replication.validation.transactions import (
Committed,
InTransaction,
InvalidBatch,
InvalidEventError,
InvalidPublisher,
RolledBack,
StatefulStreamValidator,
get_operation,
require_batch_id_advanced_if_same_node,
require_batch_id_not_advanced_if_same_node,
require_different_publisher,
require_same_batch,
require_same_publisher,
validate_transaction_state,
)
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
copy,
make_batch_messages,
message,
mutation,
reserialize,
))
and context including class names, function names, or small code snippets from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | {'commit_operation': commit}, |
Predict the next line for this snippet: <|code_start|>
def test_require_same_batch(message):
batch_identifier = get_operation(message).batch_identifier
require_same_batch(
InTransaction(
publisher=message.header.publisher,
batch_identifier=batch_identifier,
),
0,
message,
)
with pytest.raises(InvalidBatch):
require_same_batch(
InTransaction(
publisher=message.header.publisher,
<|code_end|>
with the help of current file imports:
import itertools
import uuid
import pytest
from collections import namedtuple
from pgshovel.interfaces.streams_pb2 import (
Message,
RollbackOperation,
)
from pgshovel.replication.validation.transactions import (
Committed,
InTransaction,
InvalidBatch,
InvalidEventError,
InvalidPublisher,
RolledBack,
StatefulStreamValidator,
get_operation,
require_batch_id_advanced_if_same_node,
require_batch_id_not_advanced_if_same_node,
require_different_publisher,
require_same_batch,
require_same_publisher,
validate_transaction_state,
)
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
copy,
make_batch_messages,
message,
mutation,
reserialize,
)
and context from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
, which may contain function names, class names, or code. Output only the next line. | batch_identifier=copy(batch_identifier, id=batch_identifier.id + 1), |
Predict the next line after this snippet: <|code_start|>
with pytest.raises(InvalidPublisher):
require_different_publisher(
Committed(
publisher=message.header.publisher,
batch_identifier=batch_identifier,
),
0,
message,
)
def test_stateful_validator_unhandled_starting_state(message):
validator = StatefulStreamValidator(lambda **kwargs: None, {})
with pytest.raises(InvalidEventError):
validator(None, 0, message)
def test_adds_expected_events_to_exception_for_unknown_event_for_state(message):
receivers = {None: {RollbackOperation: 3}}
validator = StatefulStreamValidator(lambda **kwargs: None, receivers)
with pytest.raises(InvalidEventError) as exception:
validator(None, 0, message)
assert exception.value.expected == {RollbackOperation}
def test_successful_transaction():
<|code_end|>
using the current file's imports:
import itertools
import uuid
import pytest
from collections import namedtuple
from pgshovel.interfaces.streams_pb2 import (
Message,
RollbackOperation,
)
from pgshovel.replication.validation.transactions import (
Committed,
InTransaction,
InvalidBatch,
InvalidEventError,
InvalidPublisher,
RolledBack,
StatefulStreamValidator,
get_operation,
require_batch_id_advanced_if_same_node,
require_batch_id_not_advanced_if_same_node,
require_different_publisher,
require_same_batch,
require_same_publisher,
validate_transaction_state,
)
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
copy,
make_batch_messages,
message,
mutation,
reserialize,
)
and any relevant context from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | messages = list(make_batch_messages(batch_identifier, [ |
Given snippet: <|code_start|> require_different_publisher(
Committed(
publisher=message.header.publisher,
batch_identifier=batch_identifier,
),
0,
message,
)
def test_stateful_validator_unhandled_starting_state(message):
validator = StatefulStreamValidator(lambda **kwargs: None, {})
with pytest.raises(InvalidEventError):
validator(None, 0, message)
def test_adds_expected_events_to_exception_for_unknown_event_for_state(message):
receivers = {None: {RollbackOperation: 3}}
validator = StatefulStreamValidator(lambda **kwargs: None, receivers)
with pytest.raises(InvalidEventError) as exception:
validator(None, 0, message)
assert exception.value.expected == {RollbackOperation}
def test_successful_transaction():
messages = list(make_batch_messages(batch_identifier, [
{'begin_operation': begin},
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import itertools
import uuid
import pytest
from collections import namedtuple
from pgshovel.interfaces.streams_pb2 import (
Message,
RollbackOperation,
)
from pgshovel.replication.validation.transactions import (
Committed,
InTransaction,
InvalidBatch,
InvalidEventError,
InvalidPublisher,
RolledBack,
StatefulStreamValidator,
get_operation,
require_batch_id_advanced_if_same_node,
require_batch_id_not_advanced_if_same_node,
require_different_publisher,
require_same_batch,
require_same_publisher,
validate_transaction_state,
)
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
copy,
make_batch_messages,
message,
mutation,
reserialize,
)
and context:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
which might include code, classes, or functions. Output only the next line. | {'mutation_operation': mutation}, |
Predict the next line for this snippet: <|code_start|> message,
)
def test_stateful_validator_unhandled_starting_state(message):
validator = StatefulStreamValidator(lambda **kwargs: None, {})
with pytest.raises(InvalidEventError):
validator(None, 0, message)
def test_adds_expected_events_to_exception_for_unknown_event_for_state(message):
receivers = {None: {RollbackOperation: 3}}
validator = StatefulStreamValidator(lambda **kwargs: None, receivers)
with pytest.raises(InvalidEventError) as exception:
validator(None, 0, message)
assert exception.value.expected == {RollbackOperation}
def test_successful_transaction():
messages = list(make_batch_messages(batch_identifier, [
{'begin_operation': begin},
{'mutation_operation': mutation},
{'commit_operation': commit},
]))
state = None
<|code_end|>
with the help of current file imports:
import itertools
import uuid
import pytest
from collections import namedtuple
from pgshovel.interfaces.streams_pb2 import (
Message,
RollbackOperation,
)
from pgshovel.replication.validation.transactions import (
Committed,
InTransaction,
InvalidBatch,
InvalidEventError,
InvalidPublisher,
RolledBack,
StatefulStreamValidator,
get_operation,
require_batch_id_advanced_if_same_node,
require_batch_id_not_advanced_if_same_node,
require_different_publisher,
require_same_batch,
require_same_publisher,
validate_transaction_state,
)
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
copy,
make_batch_messages,
message,
mutation,
reserialize,
)
and context from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
, which may contain function names, class names, or code. Output only the next line. | state = reserialize(validate_transaction_state(state, 0, messages[0])) |
Based on the snippet: <|code_start|> stream = KafkaStream.configure(configuration, cluster, 'default')
client.ensure_topic_exists(stream.topic)
yield stream
@pytest.yield_fixture
def client(configuration):
yield KafkaClient(configuration['hosts'])
@pytest.yield_fixture
def writer(client, stream):
producer = SimpleProducer(client)
yield KafkaWriter(producer, stream.topic)
@pytest.yield_fixture
def state():
bootstrap_state = BootstrapState(
node='1234',
snapshot=Snapshot(min=1, max=2),
)
yield State(bootstrap_state=bootstrap_state)
@pytest.yield_fixture
def sliced_transaction():
two_transactions = list(islice(transactions(), 6))
head, remainder = two_transactions[0], two_transactions[1:]
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from itertools import islice
from kafka import (
KafkaClient,
SimpleProducer,
)
from tests.pgshovel.fixtures import (
cluster,
create_temporary_database,
)
from tests.pgshovel.streams.fixtures import (
DEFAULT_PUBLISHER,
begin,
transaction,
transactions,
)
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration
from pgshovel.interfaces.replication_pb2 import (
ConsumerState,
State,
BootstrapState,
TransactionState,
)
from pgshovel.interfaces.streams_pb2 import (
Header,
Message,
)
from pgshovel.replication.streams.kafka import KafkaStream
from pgshovel.replication.validation.consumers import SequencingError
from pgshovel.replication.validation.transactions import InvalidEventError
from pgshovel.relay.streams.kafka import KafkaWriter
from pgshovel.streams.utilities import UnableToPrimeError
and context (classes, functions, sometimes code) from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | assert head.batch_operation.begin_operation == begin |
Continue the code snippet: <|code_start|>def client(configuration):
yield KafkaClient(configuration['hosts'])
@pytest.yield_fixture
def writer(client, stream):
producer = SimpleProducer(client)
yield KafkaWriter(producer, stream.topic)
@pytest.yield_fixture
def state():
bootstrap_state = BootstrapState(
node='1234',
snapshot=Snapshot(min=1, max=2),
)
yield State(bootstrap_state=bootstrap_state)
@pytest.yield_fixture
def sliced_transaction():
two_transactions = list(islice(transactions(), 6))
head, remainder = two_transactions[0], two_transactions[1:]
assert head.batch_operation.begin_operation == begin
yield remainder
def test_starts_at_beginning_of_stream_for_bootstrapped_state(writer, stream, state):
<|code_end|>
. Use current file imports:
import pytest
from itertools import islice
from kafka import (
KafkaClient,
SimpleProducer,
)
from tests.pgshovel.fixtures import (
cluster,
create_temporary_database,
)
from tests.pgshovel.streams.fixtures import (
DEFAULT_PUBLISHER,
begin,
transaction,
transactions,
)
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration
from pgshovel.interfaces.replication_pb2 import (
ConsumerState,
State,
BootstrapState,
TransactionState,
)
from pgshovel.interfaces.streams_pb2 import (
Header,
Message,
)
from pgshovel.replication.streams.kafka import KafkaStream
from pgshovel.replication.validation.consumers import SequencingError
from pgshovel.replication.validation.transactions import InvalidEventError
from pgshovel.relay.streams.kafka import KafkaWriter
from pgshovel.streams.utilities import UnableToPrimeError
and context (classes, functions, or code) from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | writer.push(transaction) |
Predict the next line after this snippet: <|code_start|>
@pytest.yield_fixture
def stream(configuration, cluster, client):
stream = KafkaStream.configure(configuration, cluster, 'default')
client.ensure_topic_exists(stream.topic)
yield stream
@pytest.yield_fixture
def client(configuration):
yield KafkaClient(configuration['hosts'])
@pytest.yield_fixture
def writer(client, stream):
producer = SimpleProducer(client)
yield KafkaWriter(producer, stream.topic)
@pytest.yield_fixture
def state():
bootstrap_state = BootstrapState(
node='1234',
snapshot=Snapshot(min=1, max=2),
)
yield State(bootstrap_state=bootstrap_state)
@pytest.yield_fixture
def sliced_transaction():
<|code_end|>
using the current file's imports:
import pytest
from itertools import islice
from kafka import (
KafkaClient,
SimpleProducer,
)
from tests.pgshovel.fixtures import (
cluster,
create_temporary_database,
)
from tests.pgshovel.streams.fixtures import (
DEFAULT_PUBLISHER,
begin,
transaction,
transactions,
)
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration
from pgshovel.interfaces.replication_pb2 import (
ConsumerState,
State,
BootstrapState,
TransactionState,
)
from pgshovel.interfaces.streams_pb2 import (
Header,
Message,
)
from pgshovel.replication.streams.kafka import KafkaStream
from pgshovel.replication.validation.consumers import SequencingError
from pgshovel.replication.validation.transactions import InvalidEventError
from pgshovel.relay.streams.kafka import KafkaWriter
from pgshovel.streams.utilities import UnableToPrimeError
and any relevant context from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | two_transactions = list(islice(transactions(), 6)) |
Based on the snippet: <|code_start|>from __future__ import absolute_import
with import_extras('kafka'):
def test_writer():
topic = '%s-mutations' % (uuid.uuid1().hex,)
client = KafkaClient('kafka')
producer = SimpleProducer(client)
writer = KafkaWriter(producer, topic)
<|code_end|>
, predict the immediate next line with the help of imports:
import operator
import uuid
from pgshovel.relay.streams.kafka import KafkaWriter
from pgshovel.utilities import import_extras
from tests.pgshovel.streams.fixtures import transaction
from kafka.client import KafkaClient
from kafka.consumer.simple import SimpleConsumer
from kafka.producer.simple import SimpleProducer
and context (classes, functions, sometimes code) from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | inputs = list(transaction) |
Using the snippet: <|code_start|>
@pytest.yield_fixture
def source_transaction_snapshot(source_connection):
with source_connection as conn, conn.cursor() as cursor:
cursor.execute('SELECT txid_current_snapshot();')
row = cursor.fetchone()
yield to_snapshot(row[0])
@pytest.yield_fixture
def invisible_transaction(source_transaction_snapshot):
# TODO: Figure out if we need to set the min, max here.
begin = BeginOperation(
start=Tick(
id=1,
snapshot=Snapshot(min=100, max=200),
timestamp=Timestamp(seconds=0, nanos=0),
),
end=Tick(
id=2,
snapshot=Snapshot(min=150, max=250),
timestamp=Timestamp(seconds=10, nanos=0),
),
)
mutation = get_mutation(source_transaction_snapshot.max + 1000)
commit = CommitOperation()
generator = make_batch_messages(
<|code_end|>
, determine the next line of code. You have imports:
import pytest
import psycopg2
import uuid
from copy import deepcopy
from itertools import islice
from kafka import (
KafkaClient,
SimpleProducer,
)
from kazoo.client import KazooClient
from kafka import KafkaClient
from tests.pgshovel.fixtures import (
cluster,
create_temporary_database
)
from tests.pgshovel.streams.fixtures import (
batch_identifier,
make_batch_messages,
transaction,
)
from pgshovel.administration import (
create_set,
get_managed_databases,
initialize_cluster,
)
from pgshovel.cluster import Cluster
from pgshovel.interfaces.common_pb2 import (
Column,
Row,
Snapshot,
Tick,
Timestamp,
)
from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration
from pgshovel.interfaces.streams_pb2 import (
BeginOperation,
CommitOperation,
MutationOperation,
RollbackOperation,
)
from pgshovel.relay.streams.kafka import KafkaWriter
from pgshovel.replication.loaders.simple import SimpleLoader
from pgshovel.replication.streams.kafka import KafkaStream
from pgshovel.replication.targets.postgresql import PostgreSQLTarget
from pgshovel.utilities.conversions import to_snapshot
and context (class names, function names, or code) available:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | batch_identifier, |
Continue the code snippet: <|code_start|>
@pytest.yield_fixture
def source_transaction_snapshot(source_connection):
with source_connection as conn, conn.cursor() as cursor:
cursor.execute('SELECT txid_current_snapshot();')
row = cursor.fetchone()
yield to_snapshot(row[0])
@pytest.yield_fixture
def invisible_transaction(source_transaction_snapshot):
# TODO: Figure out if we need to set the min, max here.
begin = BeginOperation(
start=Tick(
id=1,
snapshot=Snapshot(min=100, max=200),
timestamp=Timestamp(seconds=0, nanos=0),
),
end=Tick(
id=2,
snapshot=Snapshot(min=150, max=250),
timestamp=Timestamp(seconds=10, nanos=0),
),
)
mutation = get_mutation(source_transaction_snapshot.max + 1000)
commit = CommitOperation()
<|code_end|>
. Use current file imports:
import pytest
import psycopg2
import uuid
from copy import deepcopy
from itertools import islice
from kafka import (
KafkaClient,
SimpleProducer,
)
from kazoo.client import KazooClient
from kafka import KafkaClient
from tests.pgshovel.fixtures import (
cluster,
create_temporary_database
)
from tests.pgshovel.streams.fixtures import (
batch_identifier,
make_batch_messages,
transaction,
)
from pgshovel.administration import (
create_set,
get_managed_databases,
initialize_cluster,
)
from pgshovel.cluster import Cluster
from pgshovel.interfaces.common_pb2 import (
Column,
Row,
Snapshot,
Tick,
Timestamp,
)
from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration
from pgshovel.interfaces.streams_pb2 import (
BeginOperation,
CommitOperation,
MutationOperation,
RollbackOperation,
)
from pgshovel.relay.streams.kafka import KafkaWriter
from pgshovel.replication.loaders.simple import SimpleLoader
from pgshovel.replication.streams.kafka import KafkaStream
from pgshovel.replication.targets.postgresql import PostgreSQLTarget
from pgshovel.utilities.conversions import to_snapshot
and context (classes, functions, or code) from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | generator = make_batch_messages( |
Here is a snippet: <|code_start|>
def limited_to(self, limit):
self.limit = limit
return self
def consume(self, *args, **kwargs):
iterator = super(LimitedKafkaStream, self).consume(*args, **kwargs)
return islice(iterator, self.limit)
def get_mutation(transaction_id=1, user_1_name='kevin bacon'):
return MutationOperation(
id=1,
schema='public',
table='auth_user',
operation=MutationOperation.UPDATE,
identity_columns=['id'],
new=Row(
columns=[
Column(name='id', integer64=1),
Column(name='username', string=user_1_name),
],
),
old=Row(
columns=[
Column(name='id', integer64=1),
Column(name='username', string='example2'),
],
),
timestamp=Timestamp(seconds=0, nanos=0),
<|code_end|>
. Write the next line using the current file imports:
import pytest
import psycopg2
import uuid
from copy import deepcopy
from itertools import islice
from kafka import (
KafkaClient,
SimpleProducer,
)
from kazoo.client import KazooClient
from kafka import KafkaClient
from tests.pgshovel.fixtures import (
cluster,
create_temporary_database
)
from tests.pgshovel.streams.fixtures import (
batch_identifier,
make_batch_messages,
transaction,
)
from pgshovel.administration import (
create_set,
get_managed_databases,
initialize_cluster,
)
from pgshovel.cluster import Cluster
from pgshovel.interfaces.common_pb2 import (
Column,
Row,
Snapshot,
Tick,
Timestamp,
)
from pgshovel.interfaces.configurations_pb2 import ReplicationSetConfiguration
from pgshovel.interfaces.streams_pb2 import (
BeginOperation,
CommitOperation,
MutationOperation,
RollbackOperation,
)
from pgshovel.relay.streams.kafka import KafkaWriter
from pgshovel.replication.loaders.simple import SimpleLoader
from pgshovel.replication.streams.kafka import KafkaStream
from pgshovel.replication.targets.postgresql import PostgreSQLTarget
from pgshovel.utilities.conversions import to_snapshot
and context from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
, which may include functions, classes, or code. Output only the next line. | transaction=transaction_id, |
Using the snippet: <|code_start|>
def test_publisher():
messages = []
publisher = Publisher(messages.extend)
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.replication_pb2 import (
BootstrapState,
ConsumerState,
State,
StreamState,
TransactionState,
)
from pgshovel.replication.validation import validate_state
from pgshovel.streams.publisher import Publisher
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
mutation,
reserialize,
rollback,
)
and context (class names, function names, or code) available:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | with publisher.batch(batch_identifier, begin) as publish: |
Next line prediction: <|code_start|>
def test_publisher():
messages = []
publisher = Publisher(messages.extend)
<|code_end|>
. Use current file imports:
(import pytest
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.replication_pb2 import (
BootstrapState,
ConsumerState,
State,
StreamState,
TransactionState,
)
from pgshovel.replication.validation import validate_state
from pgshovel.streams.publisher import Publisher
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
mutation,
reserialize,
rollback,
))
and context including class names, function names, or small code snippets from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | with publisher.batch(batch_identifier, begin) as publish: |
Given the code snippet: <|code_start|>
def test_publisher():
messages = []
publisher = Publisher(messages.extend)
with publisher.batch(batch_identifier, begin) as publish:
publish(mutation)
published_messages = map(reserialize, messages)
assert get_oneof_value(
get_oneof_value(published_messages[0], 'operation'),
'operation'
) == begin
assert get_oneof_value(
get_oneof_value(published_messages[1], 'operation'),
'operation'
) == mutation
assert get_oneof_value(
get_oneof_value(published_messages[2], 'operation'),
'operation'
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.replication_pb2 import (
BootstrapState,
ConsumerState,
State,
StreamState,
TransactionState,
)
from pgshovel.replication.validation import validate_state
from pgshovel.streams.publisher import Publisher
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
mutation,
reserialize,
rollback,
)
and context (functions, classes, or occasionally code) from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | ) == commit |
Given the code snippet: <|code_start|>
def test_publisher():
messages = []
publisher = Publisher(messages.extend)
with publisher.batch(batch_identifier, begin) as publish:
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.replication_pb2 import (
BootstrapState,
ConsumerState,
State,
StreamState,
TransactionState,
)
from pgshovel.replication.validation import validate_state
from pgshovel.streams.publisher import Publisher
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
mutation,
reserialize,
rollback,
)
and context (functions, classes, or occasionally code) from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | publish(mutation) |
Given the following code snippet before the placeholder: <|code_start|>
def test_publisher():
messages = []
publisher = Publisher(messages.extend)
with publisher.batch(batch_identifier, begin) as publish:
publish(mutation)
<|code_end|>
, predict the next line using imports from the current file:
import pytest
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.replication_pb2 import (
BootstrapState,
ConsumerState,
State,
StreamState,
TransactionState,
)
from pgshovel.replication.validation import validate_state
from pgshovel.streams.publisher import Publisher
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
mutation,
reserialize,
rollback,
)
and context including class names, function names, and sometimes code from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | published_messages = map(reserialize, messages) |
Continue the code snippet: <|code_start|> 'operation'
) == commit
for i, message in enumerate(published_messages):
assert message.header.publisher == publisher.id
assert message.header.sequence == i
# Ensure it actually generates valid data.
state = None
for offset, message in enumerate(published_messages):
state = reserialize(validate_state(state, offset, message))
def test_publisher_failure():
messages = []
publisher = Publisher(messages.extend)
with pytest.raises(NotImplementedError):
with publisher.batch(batch_identifier, begin):
raise NotImplementedError
published_messages = map(reserialize, messages)
assert get_oneof_value(
get_oneof_value(published_messages[0], 'operation'),
'operation'
) == begin
assert get_oneof_value(
get_oneof_value(published_messages[1], 'operation'),
'operation'
<|code_end|>
. Use current file imports:
import pytest
from pgshovel.interfaces.common_pb2 import Snapshot
from pgshovel.interfaces.replication_pb2 import (
BootstrapState,
ConsumerState,
State,
StreamState,
TransactionState,
)
from pgshovel.replication.validation import validate_state
from pgshovel.streams.publisher import Publisher
from pgshovel.utilities.protobuf import get_oneof_value
from tests.pgshovel.streams.fixtures import (
batch_identifier,
begin,
commit,
mutation,
reserialize,
rollback,
)
and context (classes, functions, or code) from other files:
# Path: tests/pgshovel/streams/fixtures.py
# DEFAULT_PUBLISHER = uuid.uuid1().bytes
# def reserialize(message):
# def copy(message, **replacements):
# def make_message(payload, sequence=1, publisher=DEFAULT_PUBLISHER):
# def make_messages(payloads, publisher=DEFAULT_PUBLISHER):
# def make_batch_messages(batch_identifier, payloads, **kwargs):
# def transactions():
# def message():
. Output only the next line. | ) == rollback |
Here is a snippet: <|code_start|> class Meta:
abstract = True
@python_2_unicode_compatible
class Settings(ActionsMixin, TimeStampedMixin):
"""Settings for imported and exported files"""
name = models.CharField(
_('name'), blank=True, max_length=100,
help_text=_('Small description of operation'))
start_row = models.PositiveIntegerField(
_('start row'), null=True, blank=True,
help_text=_("Reading or writing start index (including itself)"))
end_row = models.PositiveIntegerField(
_('end row'), null=True, blank=True,
help_text=_("Reading or writing end index (including itself)"))
model = models.CharField(
_('model'), max_length=255, blank=True,
help_text=_("Choose database model if action need it"))
show_in_quick_menu = models.BooleanField(
_('show in quick menu list'), default=False)
processor = models.CharField(
_('format'), max_length=255,
<|code_end|>
. Write the next line using the current file imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
, which may include functions, classes, or code. Output only the next line. | default=SETTINGS['default_processor'], |
Here is a snippet: <|code_start|>
def __str__(self):
return '{} - {}'.format(self.started_at, self.get_status_display())
def get_absolute_url(self):
if self.buffer_file:
return self.buffer_file.url
@receiver(export_started)
def create_export_report(sender, **kwargs):
return Report.export_objects.create(
action=Report.EXPORT, settings=sender.settings)
@receiver(export_completed)
def save_export_report(sender, **kwargs):
report = sender.report
report.completed_at = kwargs['date']
report.buffer_file = kwargs['path']
report.status = report.SUCCESS
report.save()
return report
@receiver(import_started)
def create_import_report(sender, **kwargs):
return Report.import_objects.create(
<|code_end|>
. Write the next line using the current file imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
, which may include functions, classes, or code. Output only the next line. | buffer_file=strip_media_root(kwargs['path']), |
Using the snippet: <|code_start|> return super(ExportManager, self).get_queryset() \
.filter(action=self.model.EXPORT)
class ImportManager(models.Manager):
"""Shortcut for import queryset"""
def get_queryset(self):
return super(ImportManager, self).get_queryset() \
.filter(action=self.model.IMPORT)
class RunningManager(models.Manager):
"""Shortcut for running Report entry queryset"""
def get_queryset(self):
return super(RunningManager, self).get_queryset() \
.filter(status=self.model.RUNNING)
class ActionsMixin(models.Model):
"""Action choices mixin"""
EXPORT = 0
IMPORT = 1
ACTION_CHOICES = (
<|code_end|>
, determine the next line of code. You have imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context (class names, function names, or code) available:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
. Output only the next line. | (EXPORT, _('Export')), |
Predict the next line for this snippet: <|code_start|> max_row, max_col = processor.open(self.buffer_file.path)
processor.set_dimensions(0, 0, max_row, max_col)
start_row = 0
index = 1
while index < max_row:
row = processor.read(index - 1)
start_row = index
for col_index, col in enumerate(row):
if col:
index = max_row
break
index += 1
self.start_row = start_row
self.end_row = max_row
def create_default_fields(self, exclude=None, add_label=True):
"""Create all fields for selected model"""
fields = []
if not exclude:
exclude = []
if not self.model:
return []
<|code_end|>
with the help of current file imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
, which may contain function names, class names, or code. Output only the next line. | for name, label in model_attributes(self): |
Predict the next line after this snippet: <|code_start|> _('started at'), auto_now_add=True)
completed_at = models.DateTimeField(
_('completed at'), null=True, blank=True)
updated_at = models.DateTimeField(
_('updated at'), auto_now=True)
settings = models.ForeignKey(
Settings, verbose_name=_('settings'),
related_name='reports', null=True, blank=True
)
objects = models.Manager()
export_objects = ExportManager()
import_objects = ImportManager()
running_objects = RunningManager()
class Meta:
verbose_name = _('report')
verbose_name_plural = _('reports')
ordering = ('-id',)
def __str__(self):
return '{} - {}'.format(self.started_at, self.get_status_display())
def get_absolute_url(self):
if self.buffer_file:
return self.buffer_file.url
<|code_end|>
using the current file's imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and any relevant context from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
. Output only the next line. | @receiver(export_started) |
Based on the snippet: <|code_start|> settings = models.ForeignKey(
Settings, verbose_name=_('settings'),
related_name='reports', null=True, blank=True
)
objects = models.Manager()
export_objects = ExportManager()
import_objects = ImportManager()
running_objects = RunningManager()
class Meta:
verbose_name = _('report')
verbose_name_plural = _('reports')
ordering = ('-id',)
def __str__(self):
return '{} - {}'.format(self.started_at, self.get_status_display())
def get_absolute_url(self):
if self.buffer_file:
return self.buffer_file.url
@receiver(export_started)
def create_export_report(sender, **kwargs):
return Report.export_objects.create(
action=Report.EXPORT, settings=sender.settings)
<|code_end|>
, predict the immediate next line with the help of imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context (classes, functions, sometimes code) from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
. Output only the next line. | @receiver(export_completed) |
Predict the next line for this snippet: <|code_start|> verbose_name_plural = _('reports')
ordering = ('-id',)
def __str__(self):
return '{} - {}'.format(self.started_at, self.get_status_display())
def get_absolute_url(self):
if self.buffer_file:
return self.buffer_file.url
@receiver(export_started)
def create_export_report(sender, **kwargs):
return Report.export_objects.create(
action=Report.EXPORT, settings=sender.settings)
@receiver(export_completed)
def save_export_report(sender, **kwargs):
report = sender.report
report.completed_at = kwargs['date']
report.buffer_file = kwargs['path']
report.status = report.SUCCESS
report.save()
return report
<|code_end|>
with the help of current file imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
, which may contain function names, class names, or code. Output only the next line. | @receiver(import_started) |
Given the following code snippet before the placeholder: <|code_start|> if self.buffer_file:
return self.buffer_file.url
@receiver(export_started)
def create_export_report(sender, **kwargs):
return Report.export_objects.create(
action=Report.EXPORT, settings=sender.settings)
@receiver(export_completed)
def save_export_report(sender, **kwargs):
report = sender.report
report.completed_at = kwargs['date']
report.buffer_file = kwargs['path']
report.status = report.SUCCESS
report.save()
return report
@receiver(import_started)
def create_import_report(sender, **kwargs):
return Report.import_objects.create(
buffer_file=strip_media_root(kwargs['path']),
settings=sender.settings,
action=Report.IMPORT)
<|code_end|>
, predict the next line using imports from the current file:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context including class names, function names, and sometimes code from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
. Output only the next line. | @receiver(import_completed) |
Here is a snippet: <|code_start|> ERROR = 0
INFO = 1
MESSAGE_CHOICES = (
(ERROR, _('Error')),
(INFO, _('Info'))
)
report = models.ForeignKey(Report, related_name='messages')
message = models.TextField(_('message'), max_length=10000)
step = models.PositiveSmallIntegerField(
_('step'), choices=ErrorChoicesMixin.STEP_CHOICES,
default=ErrorChoicesMixin.UNDEFINED)
input_position = models.CharField(
_('input position'), max_length=10, blank=True)
input_value = models.TextField(
_('input value'), max_length=60000, null=True, blank=True)
type = models.PositiveSmallIntegerField(
_('type'), choices=MESSAGE_CHOICES, default=ERROR)
class Meta(PositionRelatedMixin.Meta):
verbose_name = _('message')
verbose_name_plural = _('messages')
def __str__(self):
return self.message
<|code_end|>
. Write the next line using the current file imports:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
, which may include functions, classes, or code. Output only the next line. | @receiver(error_raised) |
Given the code snippet: <|code_start|>def save_export_report(sender, **kwargs):
report = sender.report
report.completed_at = kwargs['date']
report.buffer_file = kwargs['path']
report.status = report.SUCCESS
report.save()
return report
@receiver(import_started)
def create_import_report(sender, **kwargs):
return Report.import_objects.create(
buffer_file=strip_media_root(kwargs['path']),
settings=sender.settings,
action=Report.IMPORT)
@receiver(import_completed)
def save_import_report(sender, **kwargs):
report = sender.report
report.completed_at = kwargs['date']
report.save()
return report
@python_2_unicode_compatible
<|code_end|>
, generate the next line using the imports in this file:
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings as django_settings
from .settings import SETTINGS, strip_media_root
from .translation import gettext_lazy as _
from .lib.helpers import model_attributes
from .lib.signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .lib.exceptions import ErrorChoicesMixin
from mtr.utils.models.mixins import PositionRelatedMixin, \
TimeStampedMixin
from .lib.manager import manager
from .lib.manager import manager
from .lib.manager import manager
from .tasks import export_data, import_data
from .tasks import export_data, import_data
and context (functions, classes, or occasionally code) from other files:
# Path: mtr/sync/settings.py
# def get_buffer_file_path(instance, filename, absolute=False):
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
#
# Path: mtr/sync/translation.py
#
# Path: mtr/sync/lib/helpers.py
# def model_attributes(settings, prefix=None, model=None, parent=None, level=0):
# """Return iterator of fields names by given model in settings"""
#
# model = model or make_model_class(settings)
# include_related = True and settings.include_related
#
# for name, field in model_fields(model, settings).items():
# label = name
# child_attrs = None
# m_prefix = None
#
# if level > 1:
# continue
#
# if isinstance(field, models.ForeignKey):
# if not include_related:
# continue
# m_prefix = '{}|_fk_|'
# elif isinstance(field, models.ManyToManyField):
# if not include_related:
# continue
# m_prefix = '{}|_m_|'
# elif isinstance(field, property):
# field = field.fget
#
# label = getattr(
# field, 'short_description', getattr(
# field, 'verbose_name', getattr(
# field, '__name__', repr(field))))
#
# if m_prefix:
# child_attrs = []
# child_attrs = model_attributes(
# settings, m_prefix.format(name),
# model=field.rel.to, parent=model, level=level + 1)
# if prefix:
# name = ''.join((prefix, name))
#
# if child_attrs:
# for name, label in child_attrs:
# yield (name, smart_text('{} | {}').format(
# field.rel.to._meta.verbose_name, label).capitalize())
# else:
# yield (name, label.capitalize())
#
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
. Output only the next line. | class Message(PositionRelatedMixin, ErrorChoicesMixin): |
Predict the next line for this snippet: <|code_start|>
return filter_params, can_create
def filter_attrs(raw_attrs, fields, mfields, name=None):
update_fields = [f.update for f in fields] or fields
update_values = {}
for field in update_fields:
if ('_|' not in field.attribute and name is None) or \
(name and name in field.attribute):
update_values[field.attribute] = field.set_value or \
raw_attrs[field.attribute]
return update_values
def find_instances(model, model_attrs, params, fields):
filter_params = params
instances = model._default_manager.all()
filter_params, can_create = filter_fields(
model_attrs, fields, can_create=False, params=filter_params)
if filter_params.keys():
instances = instances.filter(**filter_params)
return instances
<|code_end|>
with the help of current file imports:
from django.db import models
from .manager import manager
from ..translation import gettext_lazy as _
from mtr.utils.helpers import update_instance
and context from other files:
# Path: mtr/sync/lib/manager.py
# class Manager(BaseManager):
# def convert_value(self, value, field, export=False, action=None):
# def processor_choices(self):
# def dataset_choices(self):
# def action_choices(self):
# def converter_choices(self):
# def make_processor(self, settings, from_extension=False):
# def export_data(self, settings, data=None):
# def export_data_inline(self, **settings):
# def import_data_inline(self, **settings):
# def filter_dataset(self, settings, dataset=None):
# def prepare_export_dataset(self, settings):
# def prepare_export_data(self, processor, queryset):
# def prepare_handlers(self, key_name, processor, model, context=None):
# def prepare_context(self, settings, path):
# def prepare_import_data(self, processor, model):
# def import_data(self, settings, path=None):
# def prepare_import_dataset(self, settings, processor, model, fields):
# def model_data(self, settings, processor, model, fields):
#
# Path: mtr/sync/translation.py
, which may contain function names, class names, or code. Output only the next line. | @manager.register('action', _('Create only'), use_transaction=True) |
Given the following code snippet before the placeholder: <|code_start|>
return filter_params, can_create
def filter_attrs(raw_attrs, fields, mfields, name=None):
update_fields = [f.update for f in fields] or fields
update_values = {}
for field in update_fields:
if ('_|' not in field.attribute and name is None) or \
(name and name in field.attribute):
update_values[field.attribute] = field.set_value or \
raw_attrs[field.attribute]
return update_values
def find_instances(model, model_attrs, params, fields):
filter_params = params
instances = model._default_manager.all()
filter_params, can_create = filter_fields(
model_attrs, fields, can_create=False, params=filter_params)
if filter_params.keys():
instances = instances.filter(**filter_params)
return instances
<|code_end|>
, predict the next line using imports from the current file:
from django.db import models
from .manager import manager
from ..translation import gettext_lazy as _
from mtr.utils.helpers import update_instance
and context including class names, function names, and sometimes code from other files:
# Path: mtr/sync/lib/manager.py
# class Manager(BaseManager):
# def convert_value(self, value, field, export=False, action=None):
# def processor_choices(self):
# def dataset_choices(self):
# def action_choices(self):
# def converter_choices(self):
# def make_processor(self, settings, from_extension=False):
# def export_data(self, settings, data=None):
# def export_data_inline(self, **settings):
# def import_data_inline(self, **settings):
# def filter_dataset(self, settings, dataset=None):
# def prepare_export_dataset(self, settings):
# def prepare_export_data(self, processor, queryset):
# def prepare_handlers(self, key_name, processor, model, context=None):
# def prepare_context(self, settings, path):
# def prepare_import_data(self, processor, model):
# def import_data(self, settings, path=None):
# def prepare_import_dataset(self, settings, processor, model, fields):
# def model_data(self, settings, processor, model, fields):
#
# Path: mtr/sync/translation.py
. Output only the next line. | @manager.register('action', _('Create only'), use_transaction=True) |
Given the code snippet: <|code_start|>
def save(self):
"""Save result file"""
raise NotImplementedError
def create_export_path(self):
# TODO: refactor filepath
filename = '{}{}'.format(
self.settings.filename or str(self.report.id), self.file_format)
path = SETTINGS['path'](self.report, '', absolute=True)
if not os.path.exists(path):
os.makedirs(path)
return filename, os.path.join(path, filename)
def write_header(self, data):
if self.settings.include_header and data['fields']:
header_data = [
field.name or field.attribute
for field in data['fields']
]
self.write(self.start['row'], header_data)
def export_data(self, data):
"""Export data from queryset to file and return path"""
# send signal to create report
<|code_end|>
, generate the next line using the imports in this file:
import os
import traceback
from django.utils.six.moves import range
from django.utils import timezone
from django.db import transaction, Error
from django.core.exceptions import ValidationError
from .signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .exceptions import ErrorChoicesMixin
from ..settings import SETTINGS
and context (functions, classes, or occasionally code) from other files:
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
#
# Path: mtr/sync/settings.py
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
. Output only the next line. | for response in export_started.send(self): |
Predict the next line for this snippet: <|code_start|> def export_data(self, data):
"""Export data from queryset to file and return path"""
# send signal to create report
for response in export_started.send(self):
self.report = response[1]
self.set_dimensions(0, data['rows'], data['cols'])
filename, path = self.create_export_path()
self.create(path)
# write header
self.write_header(data)
# write data
data = data['items']
for row in self.rows:
row_data = []
for col in self.cells:
row_data.append(next(data))
self.write(row, row_data)
self.save()
# BUGFIX: created_at timezone +hours
# send signal to save report
<|code_end|>
with the help of current file imports:
import os
import traceback
from django.utils.six.moves import range
from django.utils import timezone
from django.db import transaction, Error
from django.core.exceptions import ValidationError
from .signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .exceptions import ErrorChoicesMixin
from ..settings import SETTINGS
and context from other files:
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
#
# Path: mtr/sync/settings.py
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
, which may contain function names, class names, or code. Output only the next line. | for response in export_completed.send( |
Given the following code snippet before the placeholder: <|code_start|>
attrs = related_attrs.get(key_model, {})
attrs[key_attr] = value
related_attrs[key_model] = attrs
return related_attrs
def prepare_attrs(self, _model):
model_attrs = {}
related_attrs = {}
for key in _model.keys():
if '|_fk_|' in key:
related_attrs = self._prepare_fk_attrs(
related_attrs, key, _model)
elif '|_m_|' in key:
related_attrs = self._prepare_mtm_attrs(
related_attrs, key, _model)
else:
model_attrs[key] = _model[key]
return model_attrs, related_attrs
def import_data(self, model, path=None):
"""Import data to model and return errors if exists"""
if self.settings.buffer_file:
path = self.settings.buffer_file.path
# send signal to create report
<|code_end|>
, predict the next line using imports from the current file:
import os
import traceback
from django.utils.six.moves import range
from django.utils import timezone
from django.db import transaction, Error
from django.core.exceptions import ValidationError
from .signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .exceptions import ErrorChoicesMixin
from ..settings import SETTINGS
and context including class names, function names, and sometimes code from other files:
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
#
# Path: mtr/sync/settings.py
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
. Output only the next line. | for response in import_started.send(self, path=path): |
Next line prediction: <|code_start|> model, model_attrs, related_attrs, context, **kwargs)
except (Error, ValueError, ValidationError,
AttributeError, TypeError, IndexError):
if use_transaction:
transaction.savepoint_rollback(sid)
error_message = traceback.format_exc()
value = {
'model_attrs': model_attrs,
'related_attrs': related_attrs
}
error_raised.send(
self,
error=error_message,
position=row,
value=value,
step=ErrorChoicesMixin.IMPORT_DATA)
self.report.status = self.report.ERROR
context.update(kwargs)
context['error_message'] = error_message
self.manager.prepare_handlers(
'error', self, model, context)
self.manager.prepare_handlers('after', self, model, context)
# send signal to save report
<|code_end|>
. Use current file imports:
(import os
import traceback
from django.utils.six.moves import range
from django.utils import timezone
from django.db import transaction, Error
from django.core.exceptions import ValidationError
from .signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .exceptions import ErrorChoicesMixin
from ..settings import SETTINGS)
and context including class names, function names, or small code snippets from other files:
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
#
# Path: mtr/sync/settings.py
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
. Output only the next line. | for response in import_completed.send( |
Predict the next line after this snippet: <|code_start|> action = transaction.atomic(action)
for row, _model in data['items']:
model_attrs, related_attrs = self.prepare_attrs(_model)
model_attrs.update(**params)
kwargs = dict(
processor=self, path=path, fields=data['fields'],
params=params, raw_attrs=_model,
mfields=data['mfields'],
)
if use_transaction:
sid = transaction.savepoint()
try:
context = action(
model, model_attrs, related_attrs, context, **kwargs)
except (Error, ValueError, ValidationError,
AttributeError, TypeError, IndexError):
if use_transaction:
transaction.savepoint_rollback(sid)
error_message = traceback.format_exc()
value = {
'model_attrs': model_attrs,
'related_attrs': related_attrs
}
<|code_end|>
using the current file's imports:
import os
import traceback
from django.utils.six.moves import range
from django.utils import timezone
from django.db import transaction, Error
from django.core.exceptions import ValidationError
from .signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .exceptions import ErrorChoicesMixin
from ..settings import SETTINGS
and any relevant context from other files:
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
#
# Path: mtr/sync/settings.py
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
. Output only the next line. | error_raised.send( |
Here is a snippet: <|code_start|>
kwargs = dict(
processor=self, path=path, fields=data['fields'],
params=params, raw_attrs=_model,
mfields=data['mfields'],
)
if use_transaction:
sid = transaction.savepoint()
try:
context = action(
model, model_attrs, related_attrs, context, **kwargs)
except (Error, ValueError, ValidationError,
AttributeError, TypeError, IndexError):
if use_transaction:
transaction.savepoint_rollback(sid)
error_message = traceback.format_exc()
value = {
'model_attrs': model_attrs,
'related_attrs': related_attrs
}
error_raised.send(
self,
error=error_message,
position=row,
value=value,
<|code_end|>
. Write the next line using the current file imports:
import os
import traceback
from django.utils.six.moves import range
from django.utils import timezone
from django.db import transaction, Error
from django.core.exceptions import ValidationError
from .signals import export_started, export_completed, \
import_started, import_completed, error_raised
from .exceptions import ErrorChoicesMixin
from ..settings import SETTINGS
and context from other files:
# Path: mtr/sync/lib/signals.py
#
# Path: mtr/sync/lib/exceptions.py
# class ErrorChoicesMixin(object):
# IMPORT_DATA = 0
# UNDEFINED = 1
#
# STEP_CHOICES = (
# (IMPORT_DATA, _('import data')),
# (UNDEFINED, _('unexpected error'))
# )
#
# Path: mtr/sync/settings.py
# SETTINGS = getattr_with_prefix('SYNC', 'SETTINGS', {
# 'path': get_buffer_file_path,
# 'default_processor': 'XlsxProcessor',
# 'actions': ['mtr.sync.lib.actions'],
# 'converters': ['mtr.sync.lib.converters'],
# 'processors': [
# 'mtr.sync.lib.processors.xlsx',
# 'mtr.sync.lib.processors.xls',
# 'mtr.sync.lib.processors.ods',
# 'mtr.sync.lib.processors.csv',
# ],
# 'broker': 'rq',
# 'include': {
# 'api': False,
# 'admin': True,
# }
# })
, which may include functions, classes, or code. Output only the next line. | step=ErrorChoicesMixin.IMPORT_DATA) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.