Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|> req_transaction_uuid = NullCharField(max_length=40)
request_token = NullCharField(max_length=200)
transaction_id = NullCharField(max_length=64)
# Timestamps
date_modified = models.DateTimeField(_("Date Modified"), auto_now=True)
date_created = models.DateTimeField(_("Date Received"), auto_now_add=True)
class Meta:
verbose_name = _("CyberSource Reply")
verbose_name_plural = _("CyberSource Replies")
ordering = ("date_created",)
def __str__(self):
return _("CyberSource Reply %(created)s") % dict(created=self.date_created)
@property
def signed_date_time(self):
try:
return dateutil.parser.parse(self.data["signed_date_time"])
except (AttributeError, ValueError, KeyError):
return self.date_created
def get_decision(self):
# Accept
if self.reason_code in (100,):
return DECISION_ACCEPT
# Review
if self.reason_code in (201, 480):
<|code_end|>
, generate the next line using the imports in this file:
from cryptography.fernet import InvalidToken
from django.conf import settings
from django.db import models
from django.contrib.postgres.fields import HStoreField
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from oscar.core.compat import AUTH_USER_MODEL
from oscar.models.fields import NullCharField
from fernet_fields import EncryptedTextField
from .constants import (
DECISION_ACCEPT,
DECISION_REVIEW,
DECISION_DECLINE,
DECISION_ERROR,
)
import dateutil.parser
import logging
and context (functions, classes, or occasionally code) from other files:
# Path: src/cybersource/constants.py
# DECISION_ACCEPT = "ACCEPT"
#
# DECISION_REVIEW = "REVIEW"
#
# DECISION_DECLINE = "DECLINE"
#
# DECISION_ERROR = "ERROR"
. Output only the next line. | return DECISION_REVIEW |
Using the snippet: <|code_start|> if self.reason_code in (100,):
return DECISION_ACCEPT
# Review
if self.reason_code in (201, 480):
return DECISION_REVIEW
# Rejections
if self.reason_code in (
110,
200,
202,
203,
204,
205,
207,
208,
210,
211,
221,
222,
230,
231,
232,
233,
234,
400,
481,
520,
):
<|code_end|>
, determine the next line of code. You have imports:
from cryptography.fernet import InvalidToken
from django.conf import settings
from django.db import models
from django.contrib.postgres.fields import HStoreField
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from oscar.core.compat import AUTH_USER_MODEL
from oscar.models.fields import NullCharField
from fernet_fields import EncryptedTextField
from .constants import (
DECISION_ACCEPT,
DECISION_REVIEW,
DECISION_DECLINE,
DECISION_ERROR,
)
import dateutil.parser
import logging
and context (class names, function names, or code) available:
# Path: src/cybersource/constants.py
# DECISION_ACCEPT = "ACCEPT"
#
# DECISION_REVIEW = "REVIEW"
#
# DECISION_DECLINE = "DECLINE"
#
# DECISION_ERROR = "ERROR"
. Output only the next line. | return DECISION_DECLINE |
Continue the code snippet: <|code_start|> if self.reason_code in (201, 480):
return DECISION_REVIEW
# Rejections
if self.reason_code in (
110,
200,
202,
203,
204,
205,
207,
208,
210,
211,
221,
222,
230,
231,
232,
233,
234,
400,
481,
520,
):
return DECISION_DECLINE
# Errors
if self.reason_code in (101, 102, 104, 150, 151, 152, 236, 240):
<|code_end|>
. Use current file imports:
from cryptography.fernet import InvalidToken
from django.conf import settings
from django.db import models
from django.contrib.postgres.fields import HStoreField
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from oscar.core.compat import AUTH_USER_MODEL
from oscar.models.fields import NullCharField
from fernet_fields import EncryptedTextField
from .constants import (
DECISION_ACCEPT,
DECISION_REVIEW,
DECISION_DECLINE,
DECISION_ERROR,
)
import dateutil.parser
import logging
and context (classes, functions, or code) from other files:
# Path: src/cybersource/constants.py
# DECISION_ACCEPT = "ACCEPT"
#
# DECISION_REVIEW = "REVIEW"
#
# DECISION_DECLINE = "DECLINE"
#
# DECISION_ERROR = "ERROR"
. Output only the next line. | return DECISION_ERROR |
Continue the code snippet: <|code_start|> return None # '!netboy key [' + name + '] does not exist'
except Exception:
raise NetBoy.Exception('netboy exception: ' + name)
def __setattr__(self, name, value):
# type: (str, Any) -> None
self[name] = value
def __init__(self, payload=None, share=None):
self.payload = payload
if share:
s = pycurl.CurlShare()
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
self.share = s
else:
self.share = None
def run(self, payload=None, loop=None):
real_payload = payload
if self.payload is None:
real_payload = payload
elif payload is None:
real_payload = self.payload
else:
real_payload = self.payload + payload
ress = run(net_boy(real_payload, self.share), loop=loop)
obj_ress = []
for v in ress:
<|code_end|>
. Use current file imports:
import typing
import pycurl
from falsy.netboy.curl_loop import CurlLoop
from falsy.netboy.fetch import net_boy
from falsy.netboy.run import run
and context (classes, functions, or code) from other files:
# Path: falsy/netboy/curl_loop.py
# class CurlLoop:
# class CurlException(Exception):
# def __init__(self, code, desc, data):
# self.code = code
# self.desc = desc
# self.data = data
#
# _multi = pycurl.CurlMulti()
# _multi.setopt(pycurl.M_PIPELINING, 1)
# atexit.register(_multi.close)
# _futures = {}
#
# @classmethod
# async def handler_ready(cls, c):
# cls._futures[c] = aio.Future()
# cls._multi.add_handle(c)
# try:
# try:
# curl_ret = await cls._futures[c]
# except CurlLoop.CurlException as e:
# return {
# 'url': c._raw_url,
# 'id': c._raw_id,
# 'payload': c._raw_payload,
# 'spider': 'pycurl',
# 'state': 'error',
# 'error_code': e.code,
# 'error_desc': e.desc,
# }
# except Exception as e:
# return {
# 'url': c._raw_url,
# 'id': c._raw_id,
# 'payload': c._raw_payload,
# 'spider': 'pycurl',
# 'state': 'critical',
# 'error_code': -1,
# 'error_desc': "{} - {}".format(type(e), str(e)),
# }
# return curl_ret
# finally:
# cls._multi.remove_handle(c)
#
# @classmethod
# def perform(cls):
# if cls._futures:
# while True:
# status, num_active = cls._multi.perform()
# if status != pycurl.E_CALL_MULTI_PERFORM:
# break
# while True:
# num_ready, success, fail = cls._multi.info_read()
# for c in success:
# cc = cls._futures.pop(c)
# result = curl_result(c)
# result['url'] = c._raw_url
# result['id'] = c._raw_id
# result['state'] = 'normal'
# result['spider'] = 'pycurl'
# result['payload'] = payload = c._raw_payload
#
# # post_func = payload.get('post_func')
# # if type(post_func) == str:
# # post_func = load(post_func)
# # if post_func:
# # result = post_func(payload, result)
#
# cc.set_result(result)
# for c, err_num, err_msg in fail:
# print('error:', err_num, err_msg, c.getinfo(pycurl.EFFECTIVE_URL))
# result = curl_result(c)
#
# result['url'] = c._raw_url
# result['id'] = c._raw_id
# result['state'] = 'error'
# result['spider'] = 'pycurl'
# result['error_code'] = err_num
# result['error_desc'] = err_msg
#
# result['payload'] = payload = c._raw_payload
#
# # post_func = payload.get('post_func')
# # if type(post_func) == str:
# # post_func = load(post_func)
# # if post_func:
# # result2 = post_func(payload, result)
# # if type(result2) is dict and len(result2) >= len(result):
# # result = result2
# cls._futures.pop(c).set_exception(CurlLoop.CurlException(code=err_num, desc=err_msg, data=result))
# if num_ready == 0:
# break
#
# Path: falsy/netboy/fetch.py
# async def net_boy(payload, share=None):
# targets = []
# for p in payload:
# if p.get('postfields'):
# targets.append(post_request(p, share))
# else:
# targets.append(get_request(p, share))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# Path: falsy/netboy/run.py
# def run(coro, loop=None):
# async def main_task():
# pycurl_task = aio.ensure_future(curl_loop())
# try:
# r = await coro
# finally:
# pycurl_task.cancel()
# with suppress(aio.CancelledError):
# await pycurl_task
# return r, pycurl_task
#
# if loop is None:
# loop = uvloop.new_event_loop()
# # loop = aio.get_event_loop()
# aio.set_event_loop(loop)
# loop.set_exception_handler(exception_handler)
# r, _ = loop.run_until_complete(main_task())
# return r
. Output only the next line. | if type(v) == CurlLoop.CurlException: |
Given the code snippet: <|code_start|> return self[name]
except KeyError:
# raise NetBoy.Exception('netboy key error: ' + name)
return None # '!netboy key [' + name + '] does not exist'
except Exception:
raise NetBoy.Exception('netboy exception: ' + name)
def __setattr__(self, name, value):
# type: (str, Any) -> None
self[name] = value
def __init__(self, payload=None, share=None):
self.payload = payload
if share:
s = pycurl.CurlShare()
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
self.share = s
else:
self.share = None
def run(self, payload=None, loop=None):
real_payload = payload
if self.payload is None:
real_payload = payload
elif payload is None:
real_payload = self.payload
else:
real_payload = self.payload + payload
<|code_end|>
, generate the next line using the imports in this file:
import typing
import pycurl
from falsy.netboy.curl_loop import CurlLoop
from falsy.netboy.fetch import net_boy
from falsy.netboy.run import run
and context (functions, classes, or occasionally code) from other files:
# Path: falsy/netboy/curl_loop.py
# class CurlLoop:
# class CurlException(Exception):
# def __init__(self, code, desc, data):
# self.code = code
# self.desc = desc
# self.data = data
#
# _multi = pycurl.CurlMulti()
# _multi.setopt(pycurl.M_PIPELINING, 1)
# atexit.register(_multi.close)
# _futures = {}
#
# @classmethod
# async def handler_ready(cls, c):
# cls._futures[c] = aio.Future()
# cls._multi.add_handle(c)
# try:
# try:
# curl_ret = await cls._futures[c]
# except CurlLoop.CurlException as e:
# return {
# 'url': c._raw_url,
# 'id': c._raw_id,
# 'payload': c._raw_payload,
# 'spider': 'pycurl',
# 'state': 'error',
# 'error_code': e.code,
# 'error_desc': e.desc,
# }
# except Exception as e:
# return {
# 'url': c._raw_url,
# 'id': c._raw_id,
# 'payload': c._raw_payload,
# 'spider': 'pycurl',
# 'state': 'critical',
# 'error_code': -1,
# 'error_desc': "{} - {}".format(type(e), str(e)),
# }
# return curl_ret
# finally:
# cls._multi.remove_handle(c)
#
# @classmethod
# def perform(cls):
# if cls._futures:
# while True:
# status, num_active = cls._multi.perform()
# if status != pycurl.E_CALL_MULTI_PERFORM:
# break
# while True:
# num_ready, success, fail = cls._multi.info_read()
# for c in success:
# cc = cls._futures.pop(c)
# result = curl_result(c)
# result['url'] = c._raw_url
# result['id'] = c._raw_id
# result['state'] = 'normal'
# result['spider'] = 'pycurl'
# result['payload'] = payload = c._raw_payload
#
# # post_func = payload.get('post_func')
# # if type(post_func) == str:
# # post_func = load(post_func)
# # if post_func:
# # result = post_func(payload, result)
#
# cc.set_result(result)
# for c, err_num, err_msg in fail:
# print('error:', err_num, err_msg, c.getinfo(pycurl.EFFECTIVE_URL))
# result = curl_result(c)
#
# result['url'] = c._raw_url
# result['id'] = c._raw_id
# result['state'] = 'error'
# result['spider'] = 'pycurl'
# result['error_code'] = err_num
# result['error_desc'] = err_msg
#
# result['payload'] = payload = c._raw_payload
#
# # post_func = payload.get('post_func')
# # if type(post_func) == str:
# # post_func = load(post_func)
# # if post_func:
# # result2 = post_func(payload, result)
# # if type(result2) is dict and len(result2) >= len(result):
# # result = result2
# cls._futures.pop(c).set_exception(CurlLoop.CurlException(code=err_num, desc=err_msg, data=result))
# if num_ready == 0:
# break
#
# Path: falsy/netboy/fetch.py
# async def net_boy(payload, share=None):
# targets = []
# for p in payload:
# if p.get('postfields'):
# targets.append(post_request(p, share))
# else:
# targets.append(get_request(p, share))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# Path: falsy/netboy/run.py
# def run(coro, loop=None):
# async def main_task():
# pycurl_task = aio.ensure_future(curl_loop())
# try:
# r = await coro
# finally:
# pycurl_task.cancel()
# with suppress(aio.CancelledError):
# await pycurl_task
# return r, pycurl_task
#
# if loop is None:
# loop = uvloop.new_event_loop()
# # loop = aio.get_event_loop()
# aio.set_event_loop(loop)
# loop.set_exception_handler(exception_handler)
# r, _ = loop.run_until_complete(main_task())
# return r
. Output only the next line. | ress = run(net_boy(real_payload, self.share), loop=loop) |
Given the code snippet: <|code_start|>class NetBoy:
class Exception(Exception):
pass
class Dict(typing.Dict[str, typing.Any]):
def __getattr__(self, name):
# type: (str) -> Any
try:
return self[name]
except KeyError:
# raise NetBoy.Exception('netboy key error: ' + name)
return None # '!netboy key [' + name + '] does not exist'
except Exception:
raise NetBoy.Exception('netboy exception: ' + name)
def __setattr__(self, name, value):
# type: (str, Any) -> None
self[name] = value
def __init__(self, payload=None, share=None):
self.payload = payload
if share:
s = pycurl.CurlShare()
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
self.share = s
else:
self.share = None
<|code_end|>
, generate the next line using the imports in this file:
import typing
import pycurl
from falsy.netboy.curl_loop import CurlLoop
from falsy.netboy.fetch import net_boy
from falsy.netboy.run import run
and context (functions, classes, or occasionally code) from other files:
# Path: falsy/netboy/curl_loop.py
# class CurlLoop:
# class CurlException(Exception):
# def __init__(self, code, desc, data):
# self.code = code
# self.desc = desc
# self.data = data
#
# _multi = pycurl.CurlMulti()
# _multi.setopt(pycurl.M_PIPELINING, 1)
# atexit.register(_multi.close)
# _futures = {}
#
# @classmethod
# async def handler_ready(cls, c):
# cls._futures[c] = aio.Future()
# cls._multi.add_handle(c)
# try:
# try:
# curl_ret = await cls._futures[c]
# except CurlLoop.CurlException as e:
# return {
# 'url': c._raw_url,
# 'id': c._raw_id,
# 'payload': c._raw_payload,
# 'spider': 'pycurl',
# 'state': 'error',
# 'error_code': e.code,
# 'error_desc': e.desc,
# }
# except Exception as e:
# return {
# 'url': c._raw_url,
# 'id': c._raw_id,
# 'payload': c._raw_payload,
# 'spider': 'pycurl',
# 'state': 'critical',
# 'error_code': -1,
# 'error_desc': "{} - {}".format(type(e), str(e)),
# }
# return curl_ret
# finally:
# cls._multi.remove_handle(c)
#
# @classmethod
# def perform(cls):
# if cls._futures:
# while True:
# status, num_active = cls._multi.perform()
# if status != pycurl.E_CALL_MULTI_PERFORM:
# break
# while True:
# num_ready, success, fail = cls._multi.info_read()
# for c in success:
# cc = cls._futures.pop(c)
# result = curl_result(c)
# result['url'] = c._raw_url
# result['id'] = c._raw_id
# result['state'] = 'normal'
# result['spider'] = 'pycurl'
# result['payload'] = payload = c._raw_payload
#
# # post_func = payload.get('post_func')
# # if type(post_func) == str:
# # post_func = load(post_func)
# # if post_func:
# # result = post_func(payload, result)
#
# cc.set_result(result)
# for c, err_num, err_msg in fail:
# print('error:', err_num, err_msg, c.getinfo(pycurl.EFFECTIVE_URL))
# result = curl_result(c)
#
# result['url'] = c._raw_url
# result['id'] = c._raw_id
# result['state'] = 'error'
# result['spider'] = 'pycurl'
# result['error_code'] = err_num
# result['error_desc'] = err_msg
#
# result['payload'] = payload = c._raw_payload
#
# # post_func = payload.get('post_func')
# # if type(post_func) == str:
# # post_func = load(post_func)
# # if post_func:
# # result2 = post_func(payload, result)
# # if type(result2) is dict and len(result2) >= len(result):
# # result = result2
# cls._futures.pop(c).set_exception(CurlLoop.CurlException(code=err_num, desc=err_msg, data=result))
# if num_ready == 0:
# break
#
# Path: falsy/netboy/fetch.py
# async def net_boy(payload, share=None):
# targets = []
# for p in payload:
# if p.get('postfields'):
# targets.append(post_request(p, share))
# else:
# targets.append(get_request(p, share))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# Path: falsy/netboy/run.py
# def run(coro, loop=None):
# async def main_task():
# pycurl_task = aio.ensure_future(curl_loop())
# try:
# r = await coro
# finally:
# pycurl_task.cancel()
# with suppress(aio.CancelledError):
# await pycurl_task
# return r, pycurl_task
#
# if loop is None:
# loop = uvloop.new_event_loop()
# # loop = aio.get_event_loop()
# aio.set_event_loop(loop)
# loop.set_exception_handler(exception_handler)
# r, _ = loop.run_until_complete(main_task())
# return r
. Output only the next line. | def run(self, payload=None, loop=None): |
Given the code snippet: <|code_start|>
f = FALSY(static_dir='demo/with_wsgi/static') \
.swagger('demo/with_wsgi/spec.yml', ui=True, ui_language='zh-cn', theme='normal') \
.wsgi(flask_app, PRE_FLASK) \
<|code_end|>
, generate the next line using the imports in this file:
from demo.with_wsgi.ops.flask import flask_app, PRE_FLASK
from demo.with_wsgi.ops.tornado import tornado_app, PRE_TORNADO
from falsy.falsy import FALSY
and context (functions, classes, or occasionally code) from other files:
# Path: demo/with_wsgi/ops/flask.py
# PRE_FLASK = 'flask'
# def pre_flask(route):
# def hello_flask():
#
# Path: demo/with_wsgi/ops/tornado.py
# class TornadoHandler(tornado.web.RequestHandler):
# def get(self):
# def pre_tornado(route):
# PRE_TORNADO = 'tornado'
#
# Path: falsy/falsy.py
# class FALSY:
# def __init__(self, falcon_api=None,
# static_path='static', static_dir='static', log_config=None):
# if log_config is None:
# self.log = JLog().setup().bind()
# else:
# self.log = JLog().setup(config=log_config).bind()
# self.log.info(cc('falsy init', fore=77, styles=['italic', 'underlined', 'reverse']))
#
# self.api = self.falcon_api = falcon_api or falcon.API()
# self.static_path = static_path.strip('/')
# self.static_dir = static_dir if os.path.isdir(static_dir) else '.'
#
# self.api = CommonStaticMiddleware(self.falcon_api, static_dir=self.static_dir,
# url_prefix=self.static_path)
# self.log.info('common static middleware loaded\n\t{}'.format(
# 'url_prefix(static_path):' + reverse() + self.static_path + rreverse() +
# ', static_dir:' + reverse() + self.static_dir + rreverse()))
#
# def wsgi(self, app, url_prefix='/wsgi'):
# self.api = CommonWSGIMiddleware(self.api, app, url_prefix=url_prefix)
# self.log.info('common wsgi middleware loaded\n\t{}'.format('url_prefix:' + self.static_path))
# return self
#
# def swagger(self, filename, ui=True, new_file=None, ui_language='en', theme='normal', errors=None, cors_origin=None, api_url=None):
# server = SwaggerServer(errors=errors, cors_origin=cors_origin)
# self.log.info('swagger server init')
#
# swagger_file = filename.replace('/', '_')
# if swagger_file.endswith('yml') or swagger_file.endswith('yaml'):
# new_file = new_file or swagger_file
# new_file = new_file.replace('.yaml', '.json')
# new_file = new_file.replace('.yml', '.json')
# new_path = self.static_dir + '/' + new_file
# with open(filename, 'r') as f:
# config = yaml.load(f, Loader)
# server.load_specs(config)
# with open(new_path, 'w') as fw:
# config = self.remove_error_info(config)
# json.dump(config, fw, sort_keys=True, indent=4)
# self.log.info('swagger file generated(from yaml file)\n\t{}'.format(
# 'new_path:' + reverse() + new_path + rreverse()))
# else:
# new_file = new_file or swagger_file
# new_path = self.static_dir + '/' + new_file
# with open(filename, 'r') as fr:
# config = fr.read()
# server.load_specs(config)
# with open(new_path, 'w') as fw:
# config = json.loads(self.remove_error_info(config))
# json.dump(config, fw, sort_keys=True, indent=4)
# self.log.info('swagger file generated(from json file)\n\t{}'.format(
# 'new_path:' + reverse() + new_path + rreverse()))
# path = server.basePath
# path = path.lstrip('/') if path else 'v0'
# self.falcon_api.add_sink(server, '/' + path)
# self.log.info('swagger server sinked\n\t{}'.format('path:' + reverse() + path + rreverse()))
# if ui:
# self.api = SwaggerUIStaticMiddleware(self.api, swagger_file=self.static_path + '/' + new_file,
# url_prefix=path, language=ui_language, theme=theme, api_url=api_url)
# self.log.info('swagger ui static middleware loaded\n\t{}'.format(
# 'url_prefix(static_path):' + reverse() + self.static_path) + rreverse())
# return self
#
# # deprecated
# def begin_api(self, api_prefix=None, errors=None):
# pass
#
# # deprecated
# def end_api(self):
# pass
#
# def remove_error_info(self, d):
# if not isinstance(d, (dict, list)):
# return d
# if isinstance(d, list):
# return [self.remove_error_info(v) for v in d]
# return {k: self.remove_error_info(v) for k, v in d.items()
# if k not in {'validationId', 'beforeId', 'afterId', 'exceptionId', 'operationId', 'finalId', 'operationMode'}}
. Output only the next line. | .wsgi(tornado_app, PRE_TORNADO) |
Predict the next line after this snippet: <|code_start|>
f = FALSY(static_dir='demo/with_wsgi/static') \
.swagger('demo/with_wsgi/spec.yml', ui=True, ui_language='zh-cn', theme='normal') \
.wsgi(flask_app, PRE_FLASK) \
<|code_end|>
using the current file's imports:
from demo.with_wsgi.ops.flask import flask_app, PRE_FLASK
from demo.with_wsgi.ops.tornado import tornado_app, PRE_TORNADO
from falsy.falsy import FALSY
and any relevant context from other files:
# Path: demo/with_wsgi/ops/flask.py
# PRE_FLASK = 'flask'
# def pre_flask(route):
# def hello_flask():
#
# Path: demo/with_wsgi/ops/tornado.py
# class TornadoHandler(tornado.web.RequestHandler):
# def get(self):
# def pre_tornado(route):
# PRE_TORNADO = 'tornado'
#
# Path: falsy/falsy.py
# class FALSY:
# def __init__(self, falcon_api=None,
# static_path='static', static_dir='static', log_config=None):
# if log_config is None:
# self.log = JLog().setup().bind()
# else:
# self.log = JLog().setup(config=log_config).bind()
# self.log.info(cc('falsy init', fore=77, styles=['italic', 'underlined', 'reverse']))
#
# self.api = self.falcon_api = falcon_api or falcon.API()
# self.static_path = static_path.strip('/')
# self.static_dir = static_dir if os.path.isdir(static_dir) else '.'
#
# self.api = CommonStaticMiddleware(self.falcon_api, static_dir=self.static_dir,
# url_prefix=self.static_path)
# self.log.info('common static middleware loaded\n\t{}'.format(
# 'url_prefix(static_path):' + reverse() + self.static_path + rreverse() +
# ', static_dir:' + reverse() + self.static_dir + rreverse()))
#
# def wsgi(self, app, url_prefix='/wsgi'):
# self.api = CommonWSGIMiddleware(self.api, app, url_prefix=url_prefix)
# self.log.info('common wsgi middleware loaded\n\t{}'.format('url_prefix:' + self.static_path))
# return self
#
# def swagger(self, filename, ui=True, new_file=None, ui_language='en', theme='normal', errors=None, cors_origin=None, api_url=None):
# server = SwaggerServer(errors=errors, cors_origin=cors_origin)
# self.log.info('swagger server init')
#
# swagger_file = filename.replace('/', '_')
# if swagger_file.endswith('yml') or swagger_file.endswith('yaml'):
# new_file = new_file or swagger_file
# new_file = new_file.replace('.yaml', '.json')
# new_file = new_file.replace('.yml', '.json')
# new_path = self.static_dir + '/' + new_file
# with open(filename, 'r') as f:
# config = yaml.load(f, Loader)
# server.load_specs(config)
# with open(new_path, 'w') as fw:
# config = self.remove_error_info(config)
# json.dump(config, fw, sort_keys=True, indent=4)
# self.log.info('swagger file generated(from yaml file)\n\t{}'.format(
# 'new_path:' + reverse() + new_path + rreverse()))
# else:
# new_file = new_file or swagger_file
# new_path = self.static_dir + '/' + new_file
# with open(filename, 'r') as fr:
# config = fr.read()
# server.load_specs(config)
# with open(new_path, 'w') as fw:
# config = json.loads(self.remove_error_info(config))
# json.dump(config, fw, sort_keys=True, indent=4)
# self.log.info('swagger file generated(from json file)\n\t{}'.format(
# 'new_path:' + reverse() + new_path + rreverse()))
# path = server.basePath
# path = path.lstrip('/') if path else 'v0'
# self.falcon_api.add_sink(server, '/' + path)
# self.log.info('swagger server sinked\n\t{}'.format('path:' + reverse() + path + rreverse()))
# if ui:
# self.api = SwaggerUIStaticMiddleware(self.api, swagger_file=self.static_path + '/' + new_file,
# url_prefix=path, language=ui_language, theme=theme, api_url=api_url)
# self.log.info('swagger ui static middleware loaded\n\t{}'.format(
# 'url_prefix(static_path):' + reverse() + self.static_path) + rreverse())
# return self
#
# # deprecated
# def begin_api(self, api_prefix=None, errors=None):
# pass
#
# # deprecated
# def end_api(self):
# pass
#
# def remove_error_info(self, d):
# if not isinstance(d, (dict, list)):
# return d
# if isinstance(d, list):
# return [self.remove_error_info(v) for v in d]
# return {k: self.remove_error_info(v) for k, v in d.items()
# if k not in {'validationId', 'beforeId', 'afterId', 'exceptionId', 'operationId', 'finalId', 'operationMode'}}
. Output only the next line. | .wsgi(tornado_app, PRE_TORNADO) |
Continue the code snippet: <|code_start|> 'spider': 'pycurl',
'state': 'error',
'error_code': e.code,
'error_desc': e.desc,
}
except Exception as e:
return {
'url': c._raw_url,
'id': c._raw_id,
'payload': c._raw_payload,
'spider': 'pycurl',
'state': 'critical',
'error_code': -1,
'error_desc': "{} - {}".format(type(e), str(e)),
}
return curl_ret
finally:
cls._multi.remove_handle(c)
@classmethod
def perform(cls):
if cls._futures:
while True:
status, num_active = cls._multi.perform()
if status != pycurl.E_CALL_MULTI_PERFORM:
break
while True:
num_ready, success, fail = cls._multi.info_read()
for c in success:
cc = cls._futures.pop(c)
<|code_end|>
. Use current file imports:
import asyncio as aio
import atexit
import pycurl
from falsy.loader.func import load
from falsy.netboy.curl_result import curl_result
and context (classes, functions, or code) from other files:
# Path: falsy/loader/func.py
# def load(function_name):
# if not function_name:
# return None
#
# if function_name in func_map.keys():
# return func_map[function_name]
#
# module_name, attr_path = function_name.rsplit('.', 1)
# module = None
# last_import_error = None
#
# while not module:
#
# try:
# module = importlib.import_module(module_name)
# except ImportError as import_error:
# last_import_error = import_error
# if '.' in module_name:
# module_name, attr_path1 = module_name.rsplit('.', 1)
# attr_path = '{0}.{1}'.format(attr_path1, attr_path)
# else:
# raise
# try:
# function = deep_getattr(module, attr_path)
# except AttributeError:
# if last_import_error:
# raise last_import_error
# else:
# raise
# func_map[str(function_name)] = function
# return function
#
# Path: falsy/netboy/curl_result.py
# def curl_result(c):
# effective_url = c.getinfo(pycurl.EFFECTIVE_URL)
# primary_ip = c.getinfo(pycurl.PRIMARY_IP)
# primary_port = c.getinfo(pycurl.PRIMARY_PORT)
# local_ip = c.getinfo(pycurl.LOCAL_IP)
# local_port = c.getinfo(pycurl.LOCAL_PORT)
# speed_download = c.getinfo(pycurl.SPEED_DOWNLOAD)
# size_download = c.getinfo(pycurl.SIZE_DOWNLOAD)
# redirect_time = c.getinfo(pycurl.REDIRECT_TIME)
# redirect_count = c.getinfo(pycurl.REDIRECT_COUNT)
# redirect_url = c.getinfo(pycurl.REDIRECT_URL)
# http_code = c.getinfo(pycurl.HTTP_CODE)
# response_code = c.getinfo(pycurl.RESPONSE_CODE)
# total_time = c.getinfo(pycurl.TOTAL_TIME)
# content_type = c.getinfo(pycurl.CONTENT_TYPE)
# namelookup_time = c.getinfo(pycurl.NAMELOOKUP_TIME)
# info_filetime = c.getinfo(pycurl.INFO_FILETIME)
# http_connectcode = c.getinfo(pycurl.HTTP_CONNECTCODE)
# starttransfer_time = c.getinfo(pycurl.STARTTRANSFER_TIME)
# pretransfer_time = c.getinfo(pycurl.PRETRANSFER_TIME)
# header_size = c.getinfo(pycurl.HEADER_SIZE)
# request_size = c.getinfo(pycurl.REQUEST_SIZE)
# ssl_verifyresult = c.getinfo(pycurl.SSL_VERIFYRESULT)
# num_connects = c.getinfo(pycurl.NUM_CONNECTS)
#
# return {
# 'effective_url': effective_url,
# 'primary_ip': primary_ip,
# 'primary_port': primary_port,
# 'local_ip': local_ip,
# 'local_port': local_port,
# 'speed_download': speed_download,
# 'size_download': size_download,
# 'redirect_time': redirect_time,
# 'redirect_count': redirect_count,
# 'redirect_url': redirect_url,
# 'http_code': http_code,
# 'response_code': response_code,
# 'total_time': total_time,
# 'content_type': content_type,
# 'namelookup_time': namelookup_time,
# 'info_filetime': info_filetime,
# 'http_connectcode': http_connectcode,
# 'starttransfer_time': starttransfer_time,
# 'pretransfer_time': pretransfer_time,
# 'header_size': header_size,
# 'request_size': request_size,
# 'ssl_verifyresult': ssl_verifyresult,
# 'num_connects': num_connects,
# # 'proxy_ssl_verifyresult': proxy_ssl_verifyresult,
# # 'app_connecttime': app_connecttime,
#
# }
. Output only the next line. | result = curl_result(c) |
Given snippet: <|code_start|>
class ColoredRecord(object):
class __dict(collections.defaultdict):
def __missing__(self, name):
try:
return parse_colors(name)
except Exception:
raise KeyError("{} is not a valid record attribute "
"or color sequence".format(name))
def __init__(self, record):
self.__dict__ = self.__dict()
self.__dict__.update(record.__dict__)
self.__record = record
def __getattr__(self, name):
return getattr(self.__record, name)
codes = {
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import collections
from falsy.termcc.termcc import fore, back, style, reset, rastyle, rred, ritalic, reverse, rafore, raback
and context:
# Path: falsy/termcc/termcc.py
# def fore(fore):
# if type(fore) == str:
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE[fore]))
# if 0 < fore <= 256:
# return wrap(TERMCC_FORE + str(fore))
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE['white']))
#
# def back(color):
# return wrap(TERMCC_BACK + str(TERMCC_FORE_TABLE[color]))
#
# def style(style):
# return wrap(TERMCC_STYLE + str(TERMCC_STYLE_TABLE[style]))
#
# def reset():
# return wrap(TERMCC_RESET_ALL)
#
# def rastyle():
# return wrap(TERMCC_RESET_ALL_STYLE)
#
# def rred():
# return rfore('red')
#
# def ritalic():
# return rstyle('italic')
#
# def reverse():
# return style('reverse')
#
# def rafore():
# return wrap(TERMCC_RESET_ALL_FORE)
#
# def raback():
# return wrap(TERMCC_RESET_ALL_BACK)
which might include code, classes, or functions. Output only the next line. | 'black': fore('black'), |
Predict the next line after this snippet: <|code_start|> raise KeyError("{} is not a valid record attribute "
"or color sequence".format(name))
def __init__(self, record):
self.__dict__ = self.__dict()
self.__dict__.update(record.__dict__)
self.__record = record
def __getattr__(self, name):
return getattr(self.__record, name)
codes = {
'black': fore('black'),
'red': fore('red'),
'green': fore('green'),
'yellow': fore('yellow'),
'blue': fore('blue'),
'magenta': fore('magenta'),
'cyan': fore('cyan'),
'lgray': fore('lightgray'),
'gray': fore('darkgray'),
'lred': fore('lightred'),
'lgreen': fore('lightgreen'),
'lyellow': fore('lightyellow'),
'lblue': fore('lightblue'),
'lmagenta': fore('lightmagenta'),
'lcyan': fore('lightcyan'),
'white': fore('white'),
<|code_end|>
using the current file's imports:
import logging
import collections
from falsy.termcc.termcc import fore, back, style, reset, rastyle, rred, ritalic, reverse, rafore, raback
and any relevant context from other files:
# Path: falsy/termcc/termcc.py
# def fore(fore):
# if type(fore) == str:
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE[fore]))
# if 0 < fore <= 256:
# return wrap(TERMCC_FORE + str(fore))
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE['white']))
#
# def back(color):
# return wrap(TERMCC_BACK + str(TERMCC_FORE_TABLE[color]))
#
# def style(style):
# return wrap(TERMCC_STYLE + str(TERMCC_STYLE_TABLE[style]))
#
# def reset():
# return wrap(TERMCC_RESET_ALL)
#
# def rastyle():
# return wrap(TERMCC_RESET_ALL_STYLE)
#
# def rred():
# return rfore('red')
#
# def ritalic():
# return rstyle('italic')
#
# def reverse():
# return style('reverse')
#
# def rafore():
# return wrap(TERMCC_RESET_ALL_FORE)
#
# def raback():
# return wrap(TERMCC_RESET_ALL_BACK)
. Output only the next line. | 'black_': back('black'), |
Using the snippet: <|code_start|> 'blue': fore('blue'),
'magenta': fore('magenta'),
'cyan': fore('cyan'),
'lgray': fore('lightgray'),
'gray': fore('darkgray'),
'lred': fore('lightred'),
'lgreen': fore('lightgreen'),
'lyellow': fore('lightyellow'),
'lblue': fore('lightblue'),
'lmagenta': fore('lightmagenta'),
'lcyan': fore('lightcyan'),
'white': fore('white'),
'black_': back('black'),
'red_': back('red'),
'green_': back('green'),
'yellow_': back('yellow'),
'blue_': back('blue'),
'magenta_': back('magenta'),
'cyan_': back('cyan'),
'lgray_': back('lightgray'),
'gray_': back('darkgray'),
'lred_': back('lightred'),
'lgreen_': back('lightgreen'),
'lyellow_': back('lightyellow'),
'lblue_': back('lightblue'),
'lmagenta_': back('lightmagenta'),
'lcyan_': back('lightcyan'),
'white_': back('white'),
<|code_end|>
, determine the next line of code. You have imports:
import logging
import collections
from falsy.termcc.termcc import fore, back, style, reset, rastyle, rred, ritalic, reverse, rafore, raback
and context (class names, function names, or code) available:
# Path: falsy/termcc/termcc.py
# def fore(fore):
# if type(fore) == str:
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE[fore]))
# if 0 < fore <= 256:
# return wrap(TERMCC_FORE + str(fore))
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE['white']))
#
# def back(color):
# return wrap(TERMCC_BACK + str(TERMCC_FORE_TABLE[color]))
#
# def style(style):
# return wrap(TERMCC_STYLE + str(TERMCC_STYLE_TABLE[style]))
#
# def reset():
# return wrap(TERMCC_RESET_ALL)
#
# def rastyle():
# return wrap(TERMCC_RESET_ALL_STYLE)
#
# def rred():
# return rfore('red')
#
# def ritalic():
# return rstyle('italic')
#
# def reverse():
# return style('reverse')
#
# def rafore():
# return wrap(TERMCC_RESET_ALL_FORE)
#
# def raback():
# return wrap(TERMCC_RESET_ALL_BACK)
. Output only the next line. | 'bold': style('bold'), |
Here is a snippet: <|code_start|> 'lblue': fore('lightblue'),
'lmagenta': fore('lightmagenta'),
'lcyan': fore('lightcyan'),
'white': fore('white'),
'black_': back('black'),
'red_': back('red'),
'green_': back('green'),
'yellow_': back('yellow'),
'blue_': back('blue'),
'magenta_': back('magenta'),
'cyan_': back('cyan'),
'lgray_': back('lightgray'),
'gray_': back('darkgray'),
'lred_': back('lightred'),
'lgreen_': back('lightgreen'),
'lyellow_': back('lightyellow'),
'lblue_': back('lightblue'),
'lmagenta_': back('lightmagenta'),
'lcyan_': back('lightcyan'),
'white_': back('white'),
'bold': style('bold'),
'dim': style('dim'),
'italic': style('italic'),
'underlined': style('underlined'),
'blink': style('blink'),
'reverse': style('reverse'),
'hidden': style('hidden'),
<|code_end|>
. Write the next line using the current file imports:
import logging
import collections
from falsy.termcc.termcc import fore, back, style, reset, rastyle, rred, ritalic, reverse, rafore, raback
and context from other files:
# Path: falsy/termcc/termcc.py
# def fore(fore):
# if type(fore) == str:
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE[fore]))
# if 0 < fore <= 256:
# return wrap(TERMCC_FORE + str(fore))
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE['white']))
#
# def back(color):
# return wrap(TERMCC_BACK + str(TERMCC_FORE_TABLE[color]))
#
# def style(style):
# return wrap(TERMCC_STYLE + str(TERMCC_STYLE_TABLE[style]))
#
# def reset():
# return wrap(TERMCC_RESET_ALL)
#
# def rastyle():
# return wrap(TERMCC_RESET_ALL_STYLE)
#
# def rred():
# return rfore('red')
#
# def ritalic():
# return rstyle('italic')
#
# def reverse():
# return style('reverse')
#
# def rafore():
# return wrap(TERMCC_RESET_ALL_FORE)
#
# def raback():
# return wrap(TERMCC_RESET_ALL_BACK)
, which may include functions, classes, or code. Output only the next line. | 'reset': reset(), |
Continue the code snippet: <|code_start|> 'lmagenta': fore('lightmagenta'),
'lcyan': fore('lightcyan'),
'white': fore('white'),
'black_': back('black'),
'red_': back('red'),
'green_': back('green'),
'yellow_': back('yellow'),
'blue_': back('blue'),
'magenta_': back('magenta'),
'cyan_': back('cyan'),
'lgray_': back('lightgray'),
'gray_': back('darkgray'),
'lred_': back('lightred'),
'lgreen_': back('lightgreen'),
'lyellow_': back('lightyellow'),
'lblue_': back('lightblue'),
'lmagenta_': back('lightmagenta'),
'lcyan_': back('lightcyan'),
'white_': back('white'),
'bold': style('bold'),
'dim': style('dim'),
'italic': style('italic'),
'underlined': style('underlined'),
'blink': style('blink'),
'reverse': style('reverse'),
'hidden': style('hidden'),
'reset': reset(),
<|code_end|>
. Use current file imports:
import logging
import collections
from falsy.termcc.termcc import fore, back, style, reset, rastyle, rred, ritalic, reverse, rafore, raback
and context (classes, functions, or code) from other files:
# Path: falsy/termcc/termcc.py
# def fore(fore):
# if type(fore) == str:
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE[fore]))
# if 0 < fore <= 256:
# return wrap(TERMCC_FORE + str(fore))
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE['white']))
#
# def back(color):
# return wrap(TERMCC_BACK + str(TERMCC_FORE_TABLE[color]))
#
# def style(style):
# return wrap(TERMCC_STYLE + str(TERMCC_STYLE_TABLE[style]))
#
# def reset():
# return wrap(TERMCC_RESET_ALL)
#
# def rastyle():
# return wrap(TERMCC_RESET_ALL_STYLE)
#
# def rred():
# return rfore('red')
#
# def ritalic():
# return rstyle('italic')
#
# def reverse():
# return style('reverse')
#
# def rafore():
# return wrap(TERMCC_RESET_ALL_FORE)
#
# def raback():
# return wrap(TERMCC_RESET_ALL_BACK)
. Output only the next line. | 'rstyle': rastyle(), |
Given the code snippet: <|code_start|> 'lcyan': fore('lightcyan'),
'white': fore('white'),
'black_': back('black'),
'red_': back('red'),
'green_': back('green'),
'yellow_': back('yellow'),
'blue_': back('blue'),
'magenta_': back('magenta'),
'cyan_': back('cyan'),
'lgray_': back('lightgray'),
'gray_': back('darkgray'),
'lred_': back('lightred'),
'lgreen_': back('lightgreen'),
'lyellow_': back('lightyellow'),
'lblue_': back('lightblue'),
'lmagenta_': back('lightmagenta'),
'lcyan_': back('lightcyan'),
'white_': back('white'),
'bold': style('bold'),
'dim': style('dim'),
'italic': style('italic'),
'underlined': style('underlined'),
'blink': style('blink'),
'reverse': style('reverse'),
'hidden': style('hidden'),
'reset': reset(),
'rstyle': rastyle(),
<|code_end|>
, generate the next line using the imports in this file:
import logging
import collections
from falsy.termcc.termcc import fore, back, style, reset, rastyle, rred, ritalic, reverse, rafore, raback
and context (functions, classes, or occasionally code) from other files:
# Path: falsy/termcc/termcc.py
# def fore(fore):
# if type(fore) == str:
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE[fore]))
# if 0 < fore <= 256:
# return wrap(TERMCC_FORE + str(fore))
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE['white']))
#
# def back(color):
# return wrap(TERMCC_BACK + str(TERMCC_FORE_TABLE[color]))
#
# def style(style):
# return wrap(TERMCC_STYLE + str(TERMCC_STYLE_TABLE[style]))
#
# def reset():
# return wrap(TERMCC_RESET_ALL)
#
# def rastyle():
# return wrap(TERMCC_RESET_ALL_STYLE)
#
# def rred():
# return rfore('red')
#
# def ritalic():
# return rstyle('italic')
#
# def reverse():
# return style('reverse')
#
# def rafore():
# return wrap(TERMCC_RESET_ALL_FORE)
#
# def raback():
# return wrap(TERMCC_RESET_ALL_BACK)
. Output only the next line. | 'rafore': rafore(), |
Given the code snippet: <|code_start|> 'white': fore('white'),
'black_': back('black'),
'red_': back('red'),
'green_': back('green'),
'yellow_': back('yellow'),
'blue_': back('blue'),
'magenta_': back('magenta'),
'cyan_': back('cyan'),
'lgray_': back('lightgray'),
'gray_': back('darkgray'),
'lred_': back('lightred'),
'lgreen_': back('lightgreen'),
'lyellow_': back('lightyellow'),
'lblue_': back('lightblue'),
'lmagenta_': back('lightmagenta'),
'lcyan_': back('lightcyan'),
'white_': back('white'),
'bold': style('bold'),
'dim': style('dim'),
'italic': style('italic'),
'underlined': style('underlined'),
'blink': style('blink'),
'reverse': style('reverse'),
'hidden': style('hidden'),
'reset': reset(),
'rstyle': rastyle(),
'rafore': rafore(),
<|code_end|>
, generate the next line using the imports in this file:
import logging
import collections
from falsy.termcc.termcc import fore, back, style, reset, rastyle, rred, ritalic, reverse, rafore, raback
and context (functions, classes, or occasionally code) from other files:
# Path: falsy/termcc/termcc.py
# def fore(fore):
# if type(fore) == str:
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE[fore]))
# if 0 < fore <= 256:
# return wrap(TERMCC_FORE + str(fore))
# return wrap(TERMCC_FORE + str(TERMCC_FORE_TABLE['white']))
#
# def back(color):
# return wrap(TERMCC_BACK + str(TERMCC_FORE_TABLE[color]))
#
# def style(style):
# return wrap(TERMCC_STYLE + str(TERMCC_STYLE_TABLE[style]))
#
# def reset():
# return wrap(TERMCC_RESET_ALL)
#
# def rastyle():
# return wrap(TERMCC_RESET_ALL_STYLE)
#
# def rred():
# return rfore('red')
#
# def ritalic():
# return rstyle('italic')
#
# def reverse():
# return style('reverse')
#
# def rafore():
# return wrap(TERMCC_RESET_ALL_FORE)
#
# def raback():
# return wrap(TERMCC_RESET_ALL_BACK)
. Output only the next line. | 'raback': raback(), |
Predict the next line after this snippet: <|code_start|> self.load_methods(method, method_content, path, swagger_spec)
def load_methods(self, method, method_content, path, swagger_spec):
uri_fields, uri_regex = compile_uri_template(
'/' + method.lower() + swagger_spec['basePath'] + path)
self.specs[uri_regex] = {'uri_fields': uri_fields}
for attribute, attribute_content in method_content.items():
if attribute in ['beforeId', 'afterId', 'operationId', 'validationId', 'exceptionId', 'finalId']:
attribute_content = self.load_handler(attribute_content)
self.load_attributes(attribute, attribute_content, swagger_spec, uri_regex)
self.specs[uri_regex]['path'] = path
def load_attributes(self, attribute, attribute_content, swagger_spec, uri_regex):
self.specs[uri_regex][attribute] = attribute_content
if attribute == 'parameters':
for i, param in enumerate(attribute_content):
if param.get('in') == 'body':
schema = param.get('schema')
ref = schema.get('$ref')
if ref:
self.specs[uri_regex]['schema'] = swagger_spec['definitions'][
ref[ref.rfind('/') + 1:]]
else:
self.specs[uri_regex]['schema'] = schema
self.specs[uri_regex][attribute][i]['validationId'] = self.load_handler(param.get('validationId'))
def load_handler(self, name):
if name is None:
return None
<|code_end|>
using the current file's imports:
import pprint
import falcon
import json
import logging
from falcon.routing import compile_uri_template
from falsy.loader.func import load
and any relevant context from other files:
# Path: falsy/loader/func.py
# def load(function_name):
# if not function_name:
# return None
#
# if function_name in func_map.keys():
# return func_map[function_name]
#
# module_name, attr_path = function_name.rsplit('.', 1)
# module = None
# last_import_error = None
#
# while not module:
#
# try:
# module = importlib.import_module(module_name)
# except ImportError as import_error:
# last_import_error = import_error
# if '.' in module_name:
# module_name, attr_path1 = module_name.rsplit('.', 1)
# attr_path = '{0}.{1}'.format(attr_path1, attr_path)
# else:
# raise
# try:
# function = deep_getattr(module, attr_path)
# except AttributeError:
# if last_import_error:
# raise last_import_error
# else:
# raise
# func_map[str(function_name)] = function
# return function
. Output only the next line. | return load(name) |
Predict the next line for this snippet: <|code_start|>
def json_check(value):
try:
if type(value) == str:
try:
value = json.loads(value)
except json.decoder.JSONDecodeError as e:
value = ast.literal_eval(value)
return value
except Exception as e:
raise falcon.HTTPInvalidParam('json check error:', str(e)+' '+str(type(e)))
class OperatorLoader:
def __init__(self):
<|code_end|>
with the help of current file imports:
import ast
import base64
import falcon
import json
from falsy.jlog.jlog import JLog
and context from other files:
# Path: falsy/jlog/jlog.py
# class JLog:
# def __init__(self, name='falsy'):
# self.logger = None
# self.logname = name
#
# def setup(self, config=None):
# if config is not None:
# highlights = config.get('highlights')
# logfile = config.get('logfile', '/tmp/falsy.log')
# file_level = config.get('file_level', 'DEBUG')
# console_level = config.get('console_level', 'DEBUG')
# handlers = config.get('handlers', ['file', 'console'])
# extra_loggers = config.get('extra_loggers')
# else:
# highlights = None
# logfile = '/tmp/falsy.log'
# file_level = console_level = 'DEBUG'
# handlers = ['file', 'console']
# extra_loggers = None
# config = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'formatters': {
# 'file': {
# 'fmt': '%(asctime)s.%(msecs)03d %(levelname)-8s %(name)-8s %(message)s',
# 'datefmt': '%Y-%m-%d %H:%M:%S %Z%z'
# },
# 'console': {
# '()': JLogColoredFormatter,
# 'fmt': '%(99)s%(process)s-%(thread)s%(reset)s %(yellow)s%(asctime)s.%(msecs)03d%(reset)s %(cyan)s%(name)-8s%(reset)s'
# '%(log_color)s%(message)s%(reset)s%(trace)s%(high)s',
# 'datefmt': '%m%d %H:%M:%S',
# 'log_colors': {
# 'DEBUG': 'blue',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
# },
#
# },
# 'filters': {
# 'trace_filter': {
# '()': TraceFilter,
# },
# 'highlight_filter': {
# '()': HighlightFilter,
# 'highlights': highlights
# }
# },
# 'handlers': {
# 'file': {
# 'level': file_level,
# 'filters': None,
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'filename': logfile,
# 'formatter': 'file'
# },
# 'console': {
# 'level': console_level,
# 'filters': ['trace_filter', 'highlight_filter'],
# 'class': 'logging.StreamHandler',
# 'stream': 'ext://sys.stdout',
# 'formatter': 'console'
# },
# },
# 'loggers': {
# self.logname: {
# 'handlers': handlers,
# 'level': 'DEBUG',
# 'propagate': False,
# },
# }
# }
# if extra_loggers:
# config['loggers'].update(extra_loggers)
# logging.config.dictConfig(config)
# return self
#
# def bind(self):
# self.logger = logging.getLogger(self.logname)
# return self
#
# def bind2(self, logname):
# self.logger = logging.getLogger(logname)
# return self
#
# def debug(self, msg, *args, **kwargs):
# return self.logger.debug(msg, *args, **kwargs)
#
# def info(self, msg, *args, **kwargs):
# return self.logger.info(msg, *args, **kwargs)
#
# def warning(self, msg, *args, **kwargs):
# return self.logger.warning(msg, *args, **kwargs)
#
# def error(self, msg, *args, **kwargs):
# return self.logger.error(msg, *args, **kwargs)
#
# def critical(self, msg, *args, **kwargs):
# return self.logger.critical(msg, *args, **kwargs)
#
# def warning_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.critical(msg, *args, **kwargs)
#
# def critical_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.critical(msg, *args, **kwargs)
#
# def error_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.error(msg, *args, **kwargs)
#
# def trace(self, kwargs):
# exc_type, exc_value, exc_traceback = sys.exc_info()
# stack = traceback.extract_tb(exc_traceback)
# lines = []
# for i, s in enumerate(stack):
# filename = s.filename
# l = len(filename)
# shortfile = kwargs.get('shortfile', 40)
# if l > shortfile:
# filename = filename[filename.find('/', l - shortfile):]
# line = '%-40s:%-4s %s' % (
# blue() + filename, yellow() + str(s.lineno),
# '|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line)
# lines.append(line)
# lines = '\n\t'.join(lines)
# kwargs['extra'] = {
# 'trace': magenta() + str(exc_type) + ' ' + bold() + magenta() + str(exc_value) + '\n\t' + lines}
, which may contain function names, class names, or code. Output only the next line. | self.log = JLog().bind() |
Next line prediction: <|code_start|>
argmap = {
'name': fields.Str(required=False),
}
<|code_end|>
. Use current file imports:
(import json
from marshmallow import fields
from falsy.utils.marshmallow import validate)
and context including class names, function names, or small code snippets from other files:
# Path: falsy/utils/marshmallow.py
# def validate(argmap):
# schema = argmap2schema(argmap)
#
# def decorator(func):
# @functools.wraps(func)
# def decorated(*args, **kwargs):
# e = schema.validate(kwargs)
# if e:
# raise MMException(str(e))
# return func(*args, **kwargs)
#
# return decorated
#
# return decorator
. Output only the next line. | @validate(argmap) |
Given snippet: <|code_start|>
def exception_handler(context):
print('context:', context)
def run(coro, loop=None):
async def main_task():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import asyncio as aio
import json
import uvloop
from contextlib import suppress
from falsy.netboy.curl_loop import curl_loop
and context:
# Path: falsy/netboy/curl_loop.py
# async def curl_loop():
# while True:
# await aio.sleep(0)
# CurlLoop.perform()
which might include code, classes, or functions. Output only the next line. | pycurl_task = aio.ensure_future(curl_loop()) |
Using the snippet: <|code_start|> highlights = None
logfile = '/tmp/falsy.log'
file_level = console_level = 'DEBUG'
handlers = ['file', 'console']
extra_loggers = None
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'file': {
'fmt': '%(asctime)s.%(msecs)03d %(levelname)-8s %(name)-8s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S %Z%z'
},
'console': {
'()': JLogColoredFormatter,
'fmt': '%(99)s%(process)s-%(thread)s%(reset)s %(yellow)s%(asctime)s.%(msecs)03d%(reset)s %(cyan)s%(name)-8s%(reset)s'
'%(log_color)s%(message)s%(reset)s%(trace)s%(high)s',
'datefmt': '%m%d %H:%M:%S',
'log_colors': {
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
},
},
'filters': {
'trace_filter': {
<|code_end|>
, determine the next line of code. You have imports:
import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta
and context (class names, function names, or code) available:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
. Output only the next line. | '()': TraceFilter, |
Next line prediction: <|code_start|> handlers = ['file', 'console']
extra_loggers = None
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'file': {
'fmt': '%(asctime)s.%(msecs)03d %(levelname)-8s %(name)-8s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S %Z%z'
},
'console': {
'()': JLogColoredFormatter,
'fmt': '%(99)s%(process)s-%(thread)s%(reset)s %(yellow)s%(asctime)s.%(msecs)03d%(reset)s %(cyan)s%(name)-8s%(reset)s'
'%(log_color)s%(message)s%(reset)s%(trace)s%(high)s',
'datefmt': '%m%d %H:%M:%S',
'log_colors': {
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
},
},
'filters': {
'trace_filter': {
'()': TraceFilter,
},
'highlight_filter': {
<|code_end|>
. Use current file imports:
(import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta)
and context including class names, function names, or small code snippets from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
. Output only the next line. | '()': HighlightFilter, |
Here is a snippet: <|code_start|>
class JLog:
def __init__(self, name='falsy'):
self.logger = None
self.logname = name
def setup(self, config=None):
if config is not None:
highlights = config.get('highlights')
logfile = config.get('logfile', '/tmp/falsy.log')
file_level = config.get('file_level', 'DEBUG')
console_level = config.get('console_level', 'DEBUG')
handlers = config.get('handlers', ['file', 'console'])
extra_loggers = config.get('extra_loggers')
else:
highlights = None
logfile = '/tmp/falsy.log'
file_level = console_level = 'DEBUG'
handlers = ['file', 'console']
extra_loggers = None
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'file': {
'fmt': '%(asctime)s.%(msecs)03d %(levelname)-8s %(name)-8s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S %Z%z'
},
'console': {
<|code_end|>
. Write the next line using the current file imports:
import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta
and context from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
, which may include functions, classes, or code. Output only the next line. | '()': JLogColoredFormatter, |
Predict the next line after this snippet: <|code_start|>
def error(self, msg, *args, **kwargs):
return self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
return self.logger.critical(msg, *args, **kwargs)
def warning_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def critical_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def error_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.error(msg, *args, **kwargs)
def trace(self, kwargs):
exc_type, exc_value, exc_traceback = sys.exc_info()
stack = traceback.extract_tb(exc_traceback)
lines = []
for i, s in enumerate(stack):
filename = s.filename
l = len(filename)
shortfile = kwargs.get('shortfile', 40)
if l > shortfile:
filename = filename[filename.find('/', l - shortfile):]
line = '%-40s:%-4s %s' % (
<|code_end|>
using the current file's imports:
import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta
and any relevant context from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
. Output only the next line. | blue() + filename, yellow() + str(s.lineno), |
Based on the snippet: <|code_start|>
def error(self, msg, *args, **kwargs):
return self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
return self.logger.critical(msg, *args, **kwargs)
def warning_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def critical_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def error_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.error(msg, *args, **kwargs)
def trace(self, kwargs):
exc_type, exc_value, exc_traceback = sys.exc_info()
stack = traceback.extract_tb(exc_traceback)
lines = []
for i, s in enumerate(stack):
filename = s.filename
l = len(filename)
shortfile = kwargs.get('shortfile', 40)
if l > shortfile:
filename = filename[filename.find('/', l - shortfile):]
line = '%-40s:%-4s %s' % (
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta
and context (classes, functions, sometimes code) from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
. Output only the next line. | blue() + filename, yellow() + str(s.lineno), |
Next line prediction: <|code_start|> def error(self, msg, *args, **kwargs):
return self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
return self.logger.critical(msg, *args, **kwargs)
def warning_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def critical_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def error_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.error(msg, *args, **kwargs)
def trace(self, kwargs):
exc_type, exc_value, exc_traceback = sys.exc_info()
stack = traceback.extract_tb(exc_traceback)
lines = []
for i, s in enumerate(stack):
filename = s.filename
l = len(filename)
shortfile = kwargs.get('shortfile', 40)
if l > shortfile:
filename = filename[filename.find('/', l - shortfile):]
line = '%-40s:%-4s %s' % (
blue() + filename, yellow() + str(s.lineno),
<|code_end|>
. Use current file imports:
(import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta)
and context including class names, function names, or small code snippets from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
. Output only the next line. | '|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line) |
Predict the next line after this snippet: <|code_start|> def error(self, msg, *args, **kwargs):
return self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
return self.logger.critical(msg, *args, **kwargs)
def warning_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def critical_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def error_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.error(msg, *args, **kwargs)
def trace(self, kwargs):
exc_type, exc_value, exc_traceback = sys.exc_info()
stack = traceback.extract_tb(exc_traceback)
lines = []
for i, s in enumerate(stack):
filename = s.filename
l = len(filename)
shortfile = kwargs.get('shortfile', 40)
if l > shortfile:
filename = filename[filename.find('/', l - shortfile):]
line = '%-40s:%-4s %s' % (
blue() + filename, yellow() + str(s.lineno),
<|code_end|>
using the current file's imports:
import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta
and any relevant context from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
. Output only the next line. | '|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line) |
Predict the next line for this snippet: <|code_start|> return self.logger.critical(msg, *args, **kwargs)
def warning_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def critical_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def error_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.error(msg, *args, **kwargs)
def trace(self, kwargs):
exc_type, exc_value, exc_traceback = sys.exc_info()
stack = traceback.extract_tb(exc_traceback)
lines = []
for i, s in enumerate(stack):
filename = s.filename
l = len(filename)
shortfile = kwargs.get('shortfile', 40)
if l > shortfile:
filename = filename[filename.find('/', l - shortfile):]
line = '%-40s:%-4s %s' % (
blue() + filename, yellow() + str(s.lineno),
'|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line)
lines.append(line)
lines = '\n\t'.join(lines)
kwargs['extra'] = {
<|code_end|>
with the help of current file imports:
import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta
and context from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
, which may contain function names, class names, or code. Output only the next line. | 'trace': magenta() + str(exc_type) + ' ' + bold() + magenta() + str(exc_value) + '\n\t' + lines} |
Predict the next line after this snippet: <|code_start|> return self.logger.critical(msg, *args, **kwargs)
def warning_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def critical_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.critical(msg, *args, **kwargs)
def error_trace(self, msg, *args, **kwargs):
self.trace(kwargs)
return self.logger.error(msg, *args, **kwargs)
def trace(self, kwargs):
exc_type, exc_value, exc_traceback = sys.exc_info()
stack = traceback.extract_tb(exc_traceback)
lines = []
for i, s in enumerate(stack):
filename = s.filename
l = len(filename)
shortfile = kwargs.get('shortfile', 40)
if l > shortfile:
filename = filename[filename.find('/', l - shortfile):]
line = '%-40s:%-4s %s' % (
blue() + filename, yellow() + str(s.lineno),
'|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line)
lines.append(line)
lines = '\n\t'.join(lines)
kwargs['extra'] = {
<|code_end|>
using the current file's imports:
import logging
import logging.config
import traceback
import collections
import sys
from falsy.jlog.filters import TraceFilter, HighlightFilter
from falsy.jlog.formatters import JLogColoredFormatter
from falsy.termcc.termcc import blue, yellow, cyan, red, bold, magenta
and any relevant context from other files:
# Path: falsy/jlog/filters.py
# class TraceFilter(logging.Filter):
# def filter(self, record):
# if 'trace' not in dir(record):
# record.trace = ''
# else:
# record.trace = '\n\t' + record.trace
# return True
#
# class HighlightFilter(logging.Filter):
# def __init__(self, highlights=None):
# self.highlights = highlights
#
# def filter(self, record):
# record.high = ''
# if self.highlights is None:
# return True
# for e in self.highlights:
# if e in record.msg:
# record.high = '\n\t' + \
# magenta() + 'highlight' + rmagenta() + ': ' + \
# record.msg.replace(e, cc(e, fore='yellow', back='red'))
# return True
#
# Path: falsy/jlog/formatters.py
# class JLogColoredFormatter(logging.Formatter):
# def __init__(self, fmt=None, datefmt=None, style='%',
# log_colors=None, reset=True):
# if fmt is None:
# default_formats = {
# '%': '%(log_color)s%(levelname)s:%(name)s:%(message)s',
# '{': '{log_color}{levelname}:{name}:{message}',
# '$': '${log_color}${levelname}:${name}:${message}'
# }
# fmt = default_formats[style]
#
# super().__init__(fmt, datefmt, style)
# default_log_colors = {
# 'DEBUG': 'white',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
#
# self.log_colors = (
# log_colors if log_colors is not None else default_log_colors)
# self.reset = reset
#
# def color(self, log_colors, level_name):
# return parse_colors(log_colors.get(level_name, ""))
#
# def format(self, record):
# record = ColoredRecord(record)
# record.log_color = self.color(self.log_colors, record.levelname)
#
# message = super().format(record)
#
# if self.reset and not message.endswith(codes['reset']):
# message += codes['reset']
#
# return message
#
# Path: falsy/termcc/termcc.py
# def blue():
# return fore('blue')
#
# def yellow():
# return fore('yellow')
#
# def cyan():
# return fore('cyan')
#
# def red():
# return fore('red')
#
# def bold():
# return style('bold') #
#
# def magenta():
# return fore('magenta')
. Output only the next line. | 'trace': magenta() + str(exc_type) + ' ' + bold() + magenta() + str(exc_value) + '\n\t' + lines} |
Based on the snippet: <|code_start|> # Lowercase name here.
name = name.lower()
# Now we can actually record the header name and value.
if name in headers['content'][count]:
headers['content'][count][name].append(value)
else:
headers['content'][count][name] = [value]
def write_function(buf):
size = data_buf.getbuffer().nbytes
if size < 4096000:
data_buf.write(buf)
return len(buf)
return 0
url = p.get('url')
c._raw_url = url
c._raw_id = p.get('id', str(uuid.uuid1()))
c._raw_post_func = p.get('post_func')
c._raw_payload = p
c.setopt(pycurl.URL, url.encode('utf-8'))
c.setopt(pycurl.FOLLOWLOCATION, p.get('followlocation', 1))
c.setopt(pycurl.MAXREDIRS, p.get('maxredirs', 5))
# c.setopt(pycurl.WRITEHEADER, header_buf)
headerfunction = p.get('headerfunction')
if headerfunction is None:
c.setopt(pycurl.HEADERFUNCTION, header_function)
else:
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import re
import pycurl
import uuid
from falsy.loader.func import load
and context (classes, functions, sometimes code) from other files:
# Path: falsy/loader/func.py
# def load(function_name):
# if not function_name:
# return None
#
# if function_name in func_map.keys():
# return func_map[function_name]
#
# module_name, attr_path = function_name.rsplit('.', 1)
# module = None
# last_import_error = None
#
# while not module:
#
# try:
# module = importlib.import_module(module_name)
# except ImportError as import_error:
# last_import_error = import_error
# if '.' in module_name:
# module_name, attr_path1 = module_name.rsplit('.', 1)
# attr_path = '{0}.{1}'.format(attr_path1, attr_path)
# else:
# raise
# try:
# function = deep_getattr(module, attr_path)
# except AttributeError:
# if last_import_error:
# raise last_import_error
# else:
# raise
# func_map[str(function_name)] = function
# return function
. Output only the next line. | c.setopt(pycurl.HEADERFUNCTION, load(headerfunction)) |
Continue the code snippet: <|code_start|>
class TraceFilter(logging.Filter):
def filter(self, record):
if 'trace' not in dir(record):
record.trace = ''
else:
record.trace = '\n\t' + record.trace
return True
class HighlightFilter(logging.Filter):
def __init__(self, highlights=None):
self.highlights = highlights
def filter(self, record):
record.high = ''
if self.highlights is None:
return True
for e in self.highlights:
if e in record.msg:
record.high = '\n\t' + \
<|code_end|>
. Use current file imports:
import logging
from falsy.termcc.termcc import magenta, rmagenta, cc
and context (classes, functions, or code) from other files:
# Path: falsy/termcc/termcc.py
# def magenta():
# return fore('magenta')
#
# def rmagenta():
# return rfore('magenta')
#
# def cc(text, **kwargs):
# c = kwargs.get('fore')
# b = kwargs.get('back')
# ss = kwargs.get('styles')
# p = ''
# if c:
# p += fore(c)
# print(p)
# if b:
# p += back(b)
# if ss:
# for s in ss:
# p += style(s)
# return p + text + reset()
. Output only the next line. | magenta() + 'highlight' + rmagenta() + ': ' + \ |
Next line prediction: <|code_start|>
class TraceFilter(logging.Filter):
def filter(self, record):
if 'trace' not in dir(record):
record.trace = ''
else:
record.trace = '\n\t' + record.trace
return True
class HighlightFilter(logging.Filter):
def __init__(self, highlights=None):
self.highlights = highlights
def filter(self, record):
record.high = ''
if self.highlights is None:
return True
for e in self.highlights:
if e in record.msg:
record.high = '\n\t' + \
<|code_end|>
. Use current file imports:
(import logging
from falsy.termcc.termcc import magenta, rmagenta, cc)
and context including class names, function names, or small code snippets from other files:
# Path: falsy/termcc/termcc.py
# def magenta():
# return fore('magenta')
#
# def rmagenta():
# return rfore('magenta')
#
# def cc(text, **kwargs):
# c = kwargs.get('fore')
# b = kwargs.get('back')
# ss = kwargs.get('styles')
# p = ''
# if c:
# p += fore(c)
# print(p)
# if b:
# p += back(b)
# if ss:
# for s in ss:
# p += style(s)
# return p + text + reset()
. Output only the next line. | magenta() + 'highlight' + rmagenta() + ': ' + \ |
Using the snippet: <|code_start|>
class TraceFilter(logging.Filter):
def filter(self, record):
if 'trace' not in dir(record):
record.trace = ''
else:
record.trace = '\n\t' + record.trace
return True
class HighlightFilter(logging.Filter):
def __init__(self, highlights=None):
self.highlights = highlights
def filter(self, record):
record.high = ''
if self.highlights is None:
return True
for e in self.highlights:
if e in record.msg:
record.high = '\n\t' + \
magenta() + 'highlight' + rmagenta() + ': ' + \
<|code_end|>
, determine the next line of code. You have imports:
import logging
from falsy.termcc.termcc import magenta, rmagenta, cc
and context (class names, function names, or code) available:
# Path: falsy/termcc/termcc.py
# def magenta():
# return fore('magenta')
#
# def rmagenta():
# return rfore('magenta')
#
# def cc(text, **kwargs):
# c = kwargs.get('fore')
# b = kwargs.get('back')
# ss = kwargs.get('styles')
# p = ''
# if c:
# p += fore(c)
# print(p)
# if b:
# p += back(b)
# if ss:
# for s in ss:
# p += style(s)
# return p + text + reset()
. Output only the next line. | record.msg.replace(e, cc(e, fore='yellow', back='red')) |
Here is a snippet: <|code_start|>
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class CommonWSGIMiddleware(object):
def __init__(self, falcon_api, app, url_prefix='wsgi'):
self.falcon_api = falcon_api
self.app = app
self.url_prefix = url_prefix.lstrip('/')
<|code_end|>
. Write the next line using the current file imports:
import logging
import mimetypes
import os
import falcon
from jinja2 import Template
from falsy.jlog.jlog import JLog
and context from other files:
# Path: falsy/jlog/jlog.py
# class JLog:
# def __init__(self, name='falsy'):
# self.logger = None
# self.logname = name
#
# def setup(self, config=None):
# if config is not None:
# highlights = config.get('highlights')
# logfile = config.get('logfile', '/tmp/falsy.log')
# file_level = config.get('file_level', 'DEBUG')
# console_level = config.get('console_level', 'DEBUG')
# handlers = config.get('handlers', ['file', 'console'])
# extra_loggers = config.get('extra_loggers')
# else:
# highlights = None
# logfile = '/tmp/falsy.log'
# file_level = console_level = 'DEBUG'
# handlers = ['file', 'console']
# extra_loggers = None
# config = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'formatters': {
# 'file': {
# 'fmt': '%(asctime)s.%(msecs)03d %(levelname)-8s %(name)-8s %(message)s',
# 'datefmt': '%Y-%m-%d %H:%M:%S %Z%z'
# },
# 'console': {
# '()': JLogColoredFormatter,
# 'fmt': '%(99)s%(process)s-%(thread)s%(reset)s %(yellow)s%(asctime)s.%(msecs)03d%(reset)s %(cyan)s%(name)-8s%(reset)s'
# '%(log_color)s%(message)s%(reset)s%(trace)s%(high)s',
# 'datefmt': '%m%d %H:%M:%S',
# 'log_colors': {
# 'DEBUG': 'blue',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
# },
#
# },
# 'filters': {
# 'trace_filter': {
# '()': TraceFilter,
# },
# 'highlight_filter': {
# '()': HighlightFilter,
# 'highlights': highlights
# }
# },
# 'handlers': {
# 'file': {
# 'level': file_level,
# 'filters': None,
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'filename': logfile,
# 'formatter': 'file'
# },
# 'console': {
# 'level': console_level,
# 'filters': ['trace_filter', 'highlight_filter'],
# 'class': 'logging.StreamHandler',
# 'stream': 'ext://sys.stdout',
# 'formatter': 'console'
# },
# },
# 'loggers': {
# self.logname: {
# 'handlers': handlers,
# 'level': 'DEBUG',
# 'propagate': False,
# },
# }
# }
# if extra_loggers:
# config['loggers'].update(extra_loggers)
# logging.config.dictConfig(config)
# return self
#
# def bind(self):
# self.logger = logging.getLogger(self.logname)
# return self
#
# def bind2(self, logname):
# self.logger = logging.getLogger(logname)
# return self
#
# def debug(self, msg, *args, **kwargs):
# return self.logger.debug(msg, *args, **kwargs)
#
# def info(self, msg, *args, **kwargs):
# return self.logger.info(msg, *args, **kwargs)
#
# def warning(self, msg, *args, **kwargs):
# return self.logger.warning(msg, *args, **kwargs)
#
# def error(self, msg, *args, **kwargs):
# return self.logger.error(msg, *args, **kwargs)
#
# def critical(self, msg, *args, **kwargs):
# return self.logger.critical(msg, *args, **kwargs)
#
# def warning_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.critical(msg, *args, **kwargs)
#
# def critical_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.critical(msg, *args, **kwargs)
#
# def error_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.error(msg, *args, **kwargs)
#
# def trace(self, kwargs):
# exc_type, exc_value, exc_traceback = sys.exc_info()
# stack = traceback.extract_tb(exc_traceback)
# lines = []
# for i, s in enumerate(stack):
# filename = s.filename
# l = len(filename)
# shortfile = kwargs.get('shortfile', 40)
# if l > shortfile:
# filename = filename[filename.find('/', l - shortfile):]
# line = '%-40s:%-4s %s' % (
# blue() + filename, yellow() + str(s.lineno),
# '|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line)
# lines.append(line)
# lines = '\n\t'.join(lines)
# kwargs['extra'] = {
# 'trace': magenta() + str(exc_type) + ' ' + bold() + magenta() + str(exc_value) + '\n\t' + lines}
, which may include functions, classes, or code. Output only the next line. | self.log = JLog().bind() |
Predict the next line for this snippet: <|code_start|>
route_args = {
'/get/v1/hello': {
'name': fields.Str(required=False),
},
'/post/v1/hello': {
'name': fields.Str(validate=lambda p: len(p) >= 4)
}
}
def mmcheck(req, resp, **kwargs):
<|code_end|>
with the help of current file imports:
from marshmallow import fields
from falsy.utils.marshmallow import mm_check
and context from other files:
# Path: falsy/utils/marshmallow.py
# def mm_check(routes, req, **kwargs):
# sig = req.spec['route_signature']
# args = routes.get(sig)
# schema = argmap2schema(args)
# e = schema.validate(kwargs)
# if e:
# raise MMException(str(e))
, which may contain function names, class names, or code. Output only the next line. | mm_check(route_args, req, **kwargs) |
Predict the next line after this snippet: <|code_start|>
if __name__ == "__main__":
payload = [
# {
# "url": "http://172.30.0.77:8003/v1/validate",
# "postfields": {
# "token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE0OTE4ODg2ODQsImNvZGVfaWQiOiJjYWYwZTZlOC0wYTEzLTExZTctOTVhNy0xYzg3MmM3MTBhNDgifQ.SkwAtOX8JW4ZRb2S4cftg7PGveU21DZKzlrBYRK6S9I"
# },
#
# 'id':2
# },
# {
# 'url': 'http://www.douban.com',
# 'dns_servers': '114.114.114.114'
# },
{
'url': 'http://www.baidu.com',
},
{
'url': 'http://www.douban.com',
},
# {
# 'url': 'http://www.google.com',
# 'dns_servers': '114.114.114.114',
# 'id':1
# },
]
# payload=[{'url': 'http://lwjk.hncd.cn:8091/hnjtyjws', 'useragent': 'Mozilla/5.0', 'followlocation': 0, 'aiohttp_timeout': 60, 'connecttimeout': 30, 'timeout': 30}]
<|code_end|>
using the current file's imports:
import json
from falsy.netboy.fetch import get_boy, post_boy, net_boy
from falsy.netboy.netboy import NetBoy
from falsy.netboy.run import run
and any relevant context from other files:
# Path: falsy/netboy/fetch.py
# async def get_boy(payload):
# targets = []
# for p in payload:
# targets.append(get_request(p))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# async def post_boy(payload):
# targets = []
# for p in payload:
# targets.append(post_request(p))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# async def net_boy(payload, share=None):
# targets = []
# for p in payload:
# if p.get('postfields'):
# targets.append(post_request(p, share))
# else:
# targets.append(get_request(p, share))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# Path: falsy/netboy/netboy.py
# class NetBoy:
# class Exception(Exception):
# pass
#
# class Dict(typing.Dict[str, typing.Any]):
# def __getattr__(self, name):
# # type: (str) -> Any
# try:
# return self[name]
# except KeyError:
# # raise NetBoy.Exception('netboy key error: ' + name)
# return None # '!netboy key [' + name + '] does not exist'
# except Exception:
# raise NetBoy.Exception('netboy exception: ' + name)
#
# def __setattr__(self, name, value):
# # type: (str, Any) -> None
# self[name] = value
#
# def __init__(self, payload=None, share=None):
# self.payload = payload
# if share:
# s = pycurl.CurlShare()
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
# self.share = s
# else:
# self.share = None
#
# def run(self, payload=None, loop=None):
# real_payload = payload
# if self.payload is None:
# real_payload = payload
# elif payload is None:
# real_payload = self.payload
# else:
# real_payload = self.payload + payload
# ress = run(net_boy(real_payload, self.share), loop=loop)
# obj_ress = []
# for v in ress:
# if type(v) == CurlLoop.CurlException:
# boy = NetBoy.Dict(v.data)
# # boy['payload'] = real_payload
# obj_ress.append(boy)
# elif type(v) == dict:
# boy = NetBoy.Dict(v)
# obj_ress.append(boy)
# # else:
# # boy = NetBoy.Dict({
# # 'state': 'critical',
# # 'spider': 'pycurl',
# # 'error_code': -1,
# # 'error_desc': "{} - {}".format(type(v), str(v)),
# # 'payload': real_payload
# # })
# # obj_ress.append(boy)
# return obj_ress
#
# Path: falsy/netboy/run.py
# def run(coro, loop=None):
# async def main_task():
# pycurl_task = aio.ensure_future(curl_loop())
# try:
# r = await coro
# finally:
# pycurl_task.cancel()
# with suppress(aio.CancelledError):
# await pycurl_task
# return r, pycurl_task
#
# if loop is None:
# loop = uvloop.new_event_loop()
# # loop = aio.get_event_loop()
# aio.set_event_loop(loop)
# loop.set_exception_handler(exception_handler)
# r, _ = loop.run_until_complete(main_task())
# return r
. Output only the next line. | boy=NetBoy(payload, share=True) |
Continue the code snippet: <|code_start|>
if __name__ == "__main__":
payload = [
# {
# "url": "http://172.30.0.77:8003/v1/validate",
# "postfields": {
# "token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE0OTE4ODg2ODQsImNvZGVfaWQiOiJjYWYwZTZlOC0wYTEzLTExZTctOTVhNy0xYzg3MmM3MTBhNDgifQ.SkwAtOX8JW4ZRb2S4cftg7PGveU21DZKzlrBYRK6S9I"
# },
#
# 'id':2
# },
# {
# 'url': 'http://www.douban.com',
# 'dns_servers': '114.114.114.114'
# },
{
'url': 'http://www.baidu.com',
},
{
'url': 'http://www.douban.com',
},
# {
# 'url': 'http://www.google.com',
# 'dns_servers': '114.114.114.114',
# 'id':1
# },
]
# payload=[{'url': 'http://lwjk.hncd.cn:8091/hnjtyjws', 'useragent': 'Mozilla/5.0', 'followlocation': 0, 'aiohttp_timeout': 60, 'connecttimeout': 30, 'timeout': 30}]
boy=NetBoy(payload, share=True)
<|code_end|>
. Use current file imports:
import json
from falsy.netboy.fetch import get_boy, post_boy, net_boy
from falsy.netboy.netboy import NetBoy
from falsy.netboy.run import run
and context (classes, functions, or code) from other files:
# Path: falsy/netboy/fetch.py
# async def get_boy(payload):
# targets = []
# for p in payload:
# targets.append(get_request(p))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# async def post_boy(payload):
# targets = []
# for p in payload:
# targets.append(post_request(p))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# async def net_boy(payload, share=None):
# targets = []
# for p in payload:
# if p.get('postfields'):
# targets.append(post_request(p, share))
# else:
# targets.append(get_request(p, share))
# res = await aio.gather(
# *targets, return_exceptions=True
# )
# return res
#
# Path: falsy/netboy/netboy.py
# class NetBoy:
# class Exception(Exception):
# pass
#
# class Dict(typing.Dict[str, typing.Any]):
# def __getattr__(self, name):
# # type: (str) -> Any
# try:
# return self[name]
# except KeyError:
# # raise NetBoy.Exception('netboy key error: ' + name)
# return None # '!netboy key [' + name + '] does not exist'
# except Exception:
# raise NetBoy.Exception('netboy exception: ' + name)
#
# def __setattr__(self, name, value):
# # type: (str, Any) -> None
# self[name] = value
#
# def __init__(self, payload=None, share=None):
# self.payload = payload
# if share:
# s = pycurl.CurlShare()
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
# self.share = s
# else:
# self.share = None
#
# def run(self, payload=None, loop=None):
# real_payload = payload
# if self.payload is None:
# real_payload = payload
# elif payload is None:
# real_payload = self.payload
# else:
# real_payload = self.payload + payload
# ress = run(net_boy(real_payload, self.share), loop=loop)
# obj_ress = []
# for v in ress:
# if type(v) == CurlLoop.CurlException:
# boy = NetBoy.Dict(v.data)
# # boy['payload'] = real_payload
# obj_ress.append(boy)
# elif type(v) == dict:
# boy = NetBoy.Dict(v)
# obj_ress.append(boy)
# # else:
# # boy = NetBoy.Dict({
# # 'state': 'critical',
# # 'spider': 'pycurl',
# # 'error_code': -1,
# # 'error_desc': "{} - {}".format(type(v), str(v)),
# # 'payload': real_payload
# # })
# # obj_ress.append(boy)
# return obj_ress
#
# Path: falsy/netboy/run.py
# def run(coro, loop=None):
# async def main_task():
# pycurl_task = aio.ensure_future(curl_loop())
# try:
# r = await coro
# finally:
# pycurl_task.cancel()
# with suppress(aio.CancelledError):
# await pycurl_task
# return r, pycurl_task
#
# if loop is None:
# loop = uvloop.new_event_loop()
# # loop = aio.get_event_loop()
# aio.set_event_loop(loop)
# loop.set_exception_handler(exception_handler)
# r, _ = loop.run_until_complete(main_task())
# return r
. Output only the next line. | ress = boy.run() |
Based on the snippet: <|code_start|># Single curl request:
def post_it(payload):
if type(payload) is list:
payload = payload[0]
c = pycurl.Curl()
data_buf = BytesIO()
# header_buf = BytesIO()
headers = {'count': 0, 'content': [{}]}
try:
<|code_end|>
, predict the immediate next line with the help of imports:
import pycurl
import re
import json
from io import BytesIO
from bs4 import UnicodeDammit, BeautifulSoup
from falsy.netboy.curl_result import curl_result
from falsy.netboy.utils import setup_curl_for_get, get_title, get_links, get_links2, get_metas, get_images, \
get_scripts, get_text, setup_curl_for_post
and context (classes, functions, sometimes code) from other files:
# Path: falsy/netboy/curl_result.py
# def curl_result(c):
# effective_url = c.getinfo(pycurl.EFFECTIVE_URL)
# primary_ip = c.getinfo(pycurl.PRIMARY_IP)
# primary_port = c.getinfo(pycurl.PRIMARY_PORT)
# local_ip = c.getinfo(pycurl.LOCAL_IP)
# local_port = c.getinfo(pycurl.LOCAL_PORT)
# speed_download = c.getinfo(pycurl.SPEED_DOWNLOAD)
# size_download = c.getinfo(pycurl.SIZE_DOWNLOAD)
# redirect_time = c.getinfo(pycurl.REDIRECT_TIME)
# redirect_count = c.getinfo(pycurl.REDIRECT_COUNT)
# redirect_url = c.getinfo(pycurl.REDIRECT_URL)
# http_code = c.getinfo(pycurl.HTTP_CODE)
# response_code = c.getinfo(pycurl.RESPONSE_CODE)
# total_time = c.getinfo(pycurl.TOTAL_TIME)
# content_type = c.getinfo(pycurl.CONTENT_TYPE)
# namelookup_time = c.getinfo(pycurl.NAMELOOKUP_TIME)
# info_filetime = c.getinfo(pycurl.INFO_FILETIME)
# http_connectcode = c.getinfo(pycurl.HTTP_CONNECTCODE)
# starttransfer_time = c.getinfo(pycurl.STARTTRANSFER_TIME)
# pretransfer_time = c.getinfo(pycurl.PRETRANSFER_TIME)
# header_size = c.getinfo(pycurl.HEADER_SIZE)
# request_size = c.getinfo(pycurl.REQUEST_SIZE)
# ssl_verifyresult = c.getinfo(pycurl.SSL_VERIFYRESULT)
# num_connects = c.getinfo(pycurl.NUM_CONNECTS)
#
# return {
# 'effective_url': effective_url,
# 'primary_ip': primary_ip,
# 'primary_port': primary_port,
# 'local_ip': local_ip,
# 'local_port': local_port,
# 'speed_download': speed_download,
# 'size_download': size_download,
# 'redirect_time': redirect_time,
# 'redirect_count': redirect_count,
# 'redirect_url': redirect_url,
# 'http_code': http_code,
# 'response_code': response_code,
# 'total_time': total_time,
# 'content_type': content_type,
# 'namelookup_time': namelookup_time,
# 'info_filetime': info_filetime,
# 'http_connectcode': http_connectcode,
# 'starttransfer_time': starttransfer_time,
# 'pretransfer_time': pretransfer_time,
# 'header_size': header_size,
# 'request_size': request_size,
# 'ssl_verifyresult': ssl_verifyresult,
# 'num_connects': num_connects,
# # 'proxy_ssl_verifyresult': proxy_ssl_verifyresult,
# # 'app_connecttime': app_connecttime,
#
# }
#
# Path: falsy/netboy/utils.py
# def setup_curl_for_get(c, p, data_buf, headers=None, share=None):
# setup_curl_basic(c, p, data_buf, headers, share)
# httpheader = p.get('httpheader')
# if httpheader:
# # c.setopt(pycurl.HEADER, p.get('header', 1))
# c.setopt(c.HTTPHEADER, httpheader)
# return c
#
# def get_title(soup):
# if soup.title is None:
# return None
# text = str(soup.title.get_text())
# return re.sub('\s+', ' ', text)
#
# def get_links(soup):
# return [link['href'] for link in soup.find_all('a', href=True)]
#
# def get_links2(soup):
# return [style['href'] for style in soup.find_all('link', href=True)]
#
# def get_metas(soup):
# return [meta.get('content') for meta in soup.find_all('meta', content=True)]
#
# def get_images(soup):
# return [img['src'] for img in soup.find_all('img', src=True)]
#
# def get_scripts(soup):
# return [script['src'] for script in soup.find_all('script', src=True)]
#
# def get_text(soup):
# texts = soup.findAll(text=True)
#
# def visible(element):
# if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
# return False
# elif re.match('<!--.*-->', str(element)):
# return False
# return True
#
# visible_texts = filter(visible, texts)
# return ' '.join(visible_texts)
#
# def setup_curl_for_post(c, p, data_buf, headers=None, share=None):
# setup_curl_basic(c, p, data_buf, headers, share)
# httpheader = p.get('httpheader', ['Accept: application/json', "Content-type: application/json"])
# if httpheader:
# # c.setopt(pycurl.HEADER, p.get('header', 1))
# c.setopt(pycurl.HTTPHEADER, httpheader)
# post301 = getattr(pycurl, 'POST301', None)
# if post301 is not None:
# # Added in libcurl 7.17.1.
# c.setopt(post301, True)
# c.setopt(pycurl.POST, 1)
# postfields = p.get('postfields')
# if postfields:
# postfields = json.dumps(postfields, indent=2, ensure_ascii=False)
# c.setopt(pycurl.POSTFIELDS, postfields)
# return c
. Output only the next line. | setup_curl_for_post(c, payload, data_buf, headers) # header_buf) |
Given the following code snippet before the placeholder: <|code_start|>
# from ymon.loader.task import loads
if __name__ == '__main__':
payload = {
'tasks': [
{"args": "haha", "ids": ["demo.celery.task.tasks.test2"], "on_error": "demo.celery.task.tasks.on_chord_error"},
],
'callback': "demo.celery.task.tasks.callback"
}
<|code_end|>
, predict the next line using imports from the current file:
from time import sleep
from celery import group, chain, chord
from celery.result import AsyncResult
from .main import app
from falsy.loader import func, task
from .tasks import add, on_chord_error
and context including class names, function names, and sometimes code from other files:
# Path: demo/celery/task/main.py
#
# Path: falsy/loader/func.py
# def load(function_name):
# def deep_getattr(obj, attr):
#
# Path: falsy/loader/task.py
# def load(id, args, error_handler=None):
# def loads(payload):
#
# Path: demo/celery/task/tasks.py
# class ExampleException(Exception):
# def catch(et, ev, es):
# def crawl(self, urls):
# def callback(self, s):
# def on_chord_error(request, exc, traceback):
# def error_callback(*args, **kwargs):
# def post_func(payload, resp):
. Output only the next line. | res = task.loads(payload).delay() |
Based on the snippet: <|code_start|>
try:
except Exception as e:
raise Exception('celery import failed')
def load(id, args, error_handler=None):
if args and error_handler:
<|code_end|>
, predict the immediate next line with the help of imports:
from falsy.loader import func
from celery import chain, chord, group
and context (classes, functions, sometimes code) from other files:
# Path: falsy/loader/func.py
# def load(function_name):
# def deep_getattr(obj, attr):
. Output only the next line. | return func.load(id).s(args).on_error(func.load(error_handler).s()) |
Predict the next line after this snippet: <|code_start|>
async def get_boy(payload):
targets = []
for p in payload:
<|code_end|>
using the current file's imports:
from falsy.netboy.request import get_request, post_request
import asyncio as aio
and any relevant context from other files:
# Path: falsy/netboy/request.py
# async def get_request(payload, share=None):
# c = pycurl.Curl()
# data_buf = BytesIO()
# # header_buf = BytesIO()
# headers = {'count': 0, 'content': [{}]}
# try:
# setup_curl_for_get(c, payload, data_buf, headers, share) # header_buf)
#
# with aiohttp.Timeout(payload.get('aiohttp_timeout', 60)):
# resp = await CurlLoop.handler_ready(c)
# charset = None
# if 'content-type' in headers:
# content_type = headers['content-type'].lower()
# match = re.search('charset=(\S+)', content_type)
# if match:
# charset = match.group(1)
# print('Decoding using %s' % charset)
# body = data_buf.getvalue()
# if len(body) == 0:
# data = ''
# charset = 'utf-8'
# else:
# if charset is None:
# dammit = UnicodeDammit(body, ["utf-8", "gb2312", "gbk", "big5", "gb18030"], smart_quotes_to="html")
# data = dammit.unicode_markup
# charset = dammit.original_encoding
# else:
# data = body.decode(charset, 'ignore')
# # headers.remove({})
# headers['content'] = [h for h in headers['content'] if len(h) > 0]
# soup_lxml = BeautifulSoup(data, 'lxml')
# soup_html = BeautifulSoup(data, 'html.parser')
# resp.update({
# 'url': payload.get('url'),
# # 'soup': soup,
# 'title': get_title(soup_lxml),
# 'links': get_links(soup_lxml),
# 'links2': get_links2(soup_lxml),
# 'metas': get_metas(soup_lxml),
# 'images': get_images(soup_lxml),
# 'scripts': get_scripts(soup_lxml),
# 'text': get_text(soup_html),
# 'data': data,
# 'headers': headers,
# 'charset': charset,
# 'spider': 'pycurl',
# 'payload': payload,
# })
# post_func = payload.get('post_func')
# if post_func:
# post_func = load(post_func)
# resp = post_func(payload, resp)
# return resp
# finally:
# c.close()
#
# async def post_request(payload, share=None):
# c = pycurl.Curl()
# data_buf = BytesIO()
# # header_buf = BytesIO()
# headers = {'count': 0, 'content': [{}]}
# try:
# setup_curl_for_post(c, payload, data_buf, headers, share) # header_buf)
#
# with aiohttp.Timeout(payload.get('aiohttp_timeout', 60)):
# resp = await CurlLoop.handler_ready(c)
# # encoding = None
# # if 'content-type' in headers:
# # content_type = headers['content-type'].lower()
# # match = re.search('charset=(\S+)', content_type)
# # if match:
# # encoding = match.group(1)
# # print('Decoding using %s' % encoding)
# body = data_buf.getvalue()
# encoding = 'utf-8'
# data = body.decode(encoding, 'ignore') if len(body) > 0 else ''
#
# # if encoding is None:
# # dammit = UnicodeDammit(body, ["utf-8", "gb2312", "gbk", "big5", "gb18030"], smart_quotes_to="html")
# # data = dammit.unicode_markup
# # encoding = dammit.original_encoding
# # else:
# # data = body.decode(encoding, 'ignore')
# # headers.remove({})
# headers['content'] = [h for h in headers['content'] if len(h) > 0]
#
# resp.update({
# # 'url': payload.get('url'),
# 'data': data,
# 'headers': headers,
# 'encoding': encoding,
# })
# post_func = payload.get('post_func')
# if type(post_func) == str:
# post_func = load(post_func)
# if post_func:
# resp = post_func(payload, resp)
# # post_func = payload.get('post_func')
# # if post_func:
# # post_func = load(post_func)
# # resp = post_func(payload, resp)
# return resp
# finally:
# c.close()
. Output only the next line. | targets.append(get_request(p)) |
Given the code snippet: <|code_start|>
async def get_boy(payload):
targets = []
for p in payload:
targets.append(get_request(p))
res = await aio.gather(
*targets, return_exceptions=True
)
return res
async def post_boy(payload):
targets = []
for p in payload:
<|code_end|>
, generate the next line using the imports in this file:
from falsy.netboy.request import get_request, post_request
import asyncio as aio
and context (functions, classes, or occasionally code) from other files:
# Path: falsy/netboy/request.py
# async def get_request(payload, share=None):
# c = pycurl.Curl()
# data_buf = BytesIO()
# # header_buf = BytesIO()
# headers = {'count': 0, 'content': [{}]}
# try:
# setup_curl_for_get(c, payload, data_buf, headers, share) # header_buf)
#
# with aiohttp.Timeout(payload.get('aiohttp_timeout', 60)):
# resp = await CurlLoop.handler_ready(c)
# charset = None
# if 'content-type' in headers:
# content_type = headers['content-type'].lower()
# match = re.search('charset=(\S+)', content_type)
# if match:
# charset = match.group(1)
# print('Decoding using %s' % charset)
# body = data_buf.getvalue()
# if len(body) == 0:
# data = ''
# charset = 'utf-8'
# else:
# if charset is None:
# dammit = UnicodeDammit(body, ["utf-8", "gb2312", "gbk", "big5", "gb18030"], smart_quotes_to="html")
# data = dammit.unicode_markup
# charset = dammit.original_encoding
# else:
# data = body.decode(charset, 'ignore')
# # headers.remove({})
# headers['content'] = [h for h in headers['content'] if len(h) > 0]
# soup_lxml = BeautifulSoup(data, 'lxml')
# soup_html = BeautifulSoup(data, 'html.parser')
# resp.update({
# 'url': payload.get('url'),
# # 'soup': soup,
# 'title': get_title(soup_lxml),
# 'links': get_links(soup_lxml),
# 'links2': get_links2(soup_lxml),
# 'metas': get_metas(soup_lxml),
# 'images': get_images(soup_lxml),
# 'scripts': get_scripts(soup_lxml),
# 'text': get_text(soup_html),
# 'data': data,
# 'headers': headers,
# 'charset': charset,
# 'spider': 'pycurl',
# 'payload': payload,
# })
# post_func = payload.get('post_func')
# if post_func:
# post_func = load(post_func)
# resp = post_func(payload, resp)
# return resp
# finally:
# c.close()
#
# async def post_request(payload, share=None):
# c = pycurl.Curl()
# data_buf = BytesIO()
# # header_buf = BytesIO()
# headers = {'count': 0, 'content': [{}]}
# try:
# setup_curl_for_post(c, payload, data_buf, headers, share) # header_buf)
#
# with aiohttp.Timeout(payload.get('aiohttp_timeout', 60)):
# resp = await CurlLoop.handler_ready(c)
# # encoding = None
# # if 'content-type' in headers:
# # content_type = headers['content-type'].lower()
# # match = re.search('charset=(\S+)', content_type)
# # if match:
# # encoding = match.group(1)
# # print('Decoding using %s' % encoding)
# body = data_buf.getvalue()
# encoding = 'utf-8'
# data = body.decode(encoding, 'ignore') if len(body) > 0 else ''
#
# # if encoding is None:
# # dammit = UnicodeDammit(body, ["utf-8", "gb2312", "gbk", "big5", "gb18030"], smart_quotes_to="html")
# # data = dammit.unicode_markup
# # encoding = dammit.original_encoding
# # else:
# # data = body.decode(encoding, 'ignore')
# # headers.remove({})
# headers['content'] = [h for h in headers['content'] if len(h) > 0]
#
# resp.update({
# # 'url': payload.get('url'),
# 'data': data,
# 'headers': headers,
# 'encoding': encoding,
# })
# post_func = payload.get('post_func')
# if type(post_func) == str:
# post_func = load(post_func)
# if post_func:
# resp = post_func(payload, resp)
# # post_func = payload.get('post_func')
# # if post_func:
# # post_func = load(post_func)
# # resp = post_func(payload, resp)
# return resp
# finally:
# c.close()
. Output only the next line. | targets.append(post_request(p)) |
Predict the next line for this snippet: <|code_start|>
class CustomException(Exception):
pass
def handle_custom(req, resp, e):
resp.body = json.dumps({'error': 'custom error catched'})
resp.content_type = 'application/json'
<|code_end|>
with the help of current file imports:
import json
from falsy.falsy import FALSY
and context from other files:
# Path: falsy/falsy.py
# class FALSY:
# def __init__(self, falcon_api=None,
# static_path='static', static_dir='static', log_config=None):
# if log_config is None:
# self.log = JLog().setup().bind()
# else:
# self.log = JLog().setup(config=log_config).bind()
# self.log.info(cc('falsy init', fore=77, styles=['italic', 'underlined', 'reverse']))
#
# self.api = self.falcon_api = falcon_api or falcon.API()
# self.static_path = static_path.strip('/')
# self.static_dir = static_dir if os.path.isdir(static_dir) else '.'
#
# self.api = CommonStaticMiddleware(self.falcon_api, static_dir=self.static_dir,
# url_prefix=self.static_path)
# self.log.info('common static middleware loaded\n\t{}'.format(
# 'url_prefix(static_path):' + reverse() + self.static_path + rreverse() +
# ', static_dir:' + reverse() + self.static_dir + rreverse()))
#
# def wsgi(self, app, url_prefix='/wsgi'):
# self.api = CommonWSGIMiddleware(self.api, app, url_prefix=url_prefix)
# self.log.info('common wsgi middleware loaded\n\t{}'.format('url_prefix:' + self.static_path))
# return self
#
# def swagger(self, filename, ui=True, new_file=None, ui_language='en', theme='normal', errors=None, cors_origin=None, api_url=None):
# server = SwaggerServer(errors=errors, cors_origin=cors_origin)
# self.log.info('swagger server init')
#
# swagger_file = filename.replace('/', '_')
# if swagger_file.endswith('yml') or swagger_file.endswith('yaml'):
# new_file = new_file or swagger_file
# new_file = new_file.replace('.yaml', '.json')
# new_file = new_file.replace('.yml', '.json')
# new_path = self.static_dir + '/' + new_file
# with open(filename, 'r') as f:
# config = yaml.load(f, Loader)
# server.load_specs(config)
# with open(new_path, 'w') as fw:
# config = self.remove_error_info(config)
# json.dump(config, fw, sort_keys=True, indent=4)
# self.log.info('swagger file generated(from yaml file)\n\t{}'.format(
# 'new_path:' + reverse() + new_path + rreverse()))
# else:
# new_file = new_file or swagger_file
# new_path = self.static_dir + '/' + new_file
# with open(filename, 'r') as fr:
# config = fr.read()
# server.load_specs(config)
# with open(new_path, 'w') as fw:
# config = json.loads(self.remove_error_info(config))
# json.dump(config, fw, sort_keys=True, indent=4)
# self.log.info('swagger file generated(from json file)\n\t{}'.format(
# 'new_path:' + reverse() + new_path + rreverse()))
# path = server.basePath
# path = path.lstrip('/') if path else 'v0'
# self.falcon_api.add_sink(server, '/' + path)
# self.log.info('swagger server sinked\n\t{}'.format('path:' + reverse() + path + rreverse()))
# if ui:
# self.api = SwaggerUIStaticMiddleware(self.api, swagger_file=self.static_path + '/' + new_file,
# url_prefix=path, language=ui_language, theme=theme, api_url=api_url)
# self.log.info('swagger ui static middleware loaded\n\t{}'.format(
# 'url_prefix(static_path):' + reverse() + self.static_path) + rreverse())
# return self
#
# # deprecated
# def begin_api(self, api_prefix=None, errors=None):
# pass
#
# # deprecated
# def end_api(self):
# pass
#
# def remove_error_info(self, d):
# if not isinstance(d, (dict, list)):
# return d
# if isinstance(d, list):
# return [self.remove_error_info(v) for v in d]
# return {k: self.remove_error_info(v) for k, v in d.items()
# if k not in {'validationId', 'beforeId', 'afterId', 'exceptionId', 'operationId', 'finalId', 'operationMode'}}
, which may contain function names, class names, or code. Output only the next line. | f = FALSY(static_path='test', static_dir='demo/catch/static') |
Predict the next line after this snippet: <|code_start|> "text": document.body.innerText,
});
'''
req['params'] = {"expression": eval_func}
ws.send(json.dumps(req))
resp = self.recv4result(ws)
return resp
def crawl_info(self, data, payload, begin_time):
end_time = datetime.datetime.now()
elapsed = (end_time - begin_time).total_seconds()
tid = payload.get('id')
url = payload.get('url')
if type(data) == dict:
data['time'] = elapsed
if tid:
data['id'] = tid
data['spider'] = 'chrome'
data['url'] = url
data['payload'] = payload
# data['chrome_id'] = payload.get('chrome_id')
else:
data = {
'time': elapsed,
'spider': 'chrome',
'url': url,
}
post_func = payload.get('post_func')
if type(post_func) == str:
<|code_end|>
using the current file's imports:
import datetime
import json
import socket
import traceback
import html
import pycurl
import uvloop
import websocket
import sys
import asyncio as aio
from concurrent import futures
from logging import Logger
from time import sleep
from urllib.parse import urlparse
from bs4 import UnicodeDammit
from falsy.loader import func
from falsy.netboy.netboy import NetBoy
from falsy.netboy.one import get_it
and any relevant context from other files:
# Path: falsy/loader/func.py
# def load(function_name):
# def deep_getattr(obj, attr):
#
# Path: falsy/netboy/netboy.py
# class NetBoy:
# class Exception(Exception):
# pass
#
# class Dict(typing.Dict[str, typing.Any]):
# def __getattr__(self, name):
# # type: (str) -> Any
# try:
# return self[name]
# except KeyError:
# # raise NetBoy.Exception('netboy key error: ' + name)
# return None # '!netboy key [' + name + '] does not exist'
# except Exception:
# raise NetBoy.Exception('netboy exception: ' + name)
#
# def __setattr__(self, name, value):
# # type: (str, Any) -> None
# self[name] = value
#
# def __init__(self, payload=None, share=None):
# self.payload = payload
# if share:
# s = pycurl.CurlShare()
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
# self.share = s
# else:
# self.share = None
#
# def run(self, payload=None, loop=None):
# real_payload = payload
# if self.payload is None:
# real_payload = payload
# elif payload is None:
# real_payload = self.payload
# else:
# real_payload = self.payload + payload
# ress = run(net_boy(real_payload, self.share), loop=loop)
# obj_ress = []
# for v in ress:
# if type(v) == CurlLoop.CurlException:
# boy = NetBoy.Dict(v.data)
# # boy['payload'] = real_payload
# obj_ress.append(boy)
# elif type(v) == dict:
# boy = NetBoy.Dict(v)
# obj_ress.append(boy)
# # else:
# # boy = NetBoy.Dict({
# # 'state': 'critical',
# # 'spider': 'pycurl',
# # 'error_code': -1,
# # 'error_desc': "{} - {}".format(type(v), str(v)),
# # 'payload': real_payload
# # })
# # obj_ress.append(boy)
# return obj_ress
#
# Path: falsy/netboy/one.py
# def get_it(payload):
# if type(payload) is list:
# payload = payload[0]
# c = pycurl.Curl()
# data_buf = BytesIO()
# # header_buf = BytesIO()
# headers = {'count': 0, 'content': [{}]}
# try:
# setup_curl_for_get(c, payload, data_buf, headers) # header_buf)
# c.perform()
#
# resp = curl_result(c)
# resp['url'] = payload.get('url')
# resp['id'] = payload.get('id')
# resp['state'] = 'normal'
# resp['spider'] = 'pycurl'
# resp['payload'] = payload
#
# pycurl_get_resp(data_buf, headers, payload, resp)
# return resp
# except pycurl.error as e:
# resp = curl_result(c)
# resp['url'] = payload.get('url')
# resp['id'] = payload.get('id')
# resp['state'] = 'error'
# resp['spider'] = 'pycurl'
# resp['error_code'] = code = e.args[0]
# resp['error_desc'] = desc = e.args[1]
# if code in [18, 47]:
# resp['state'] = 'abnormal'
# pycurl_get_resp(data_buf, headers, payload, resp)
# return resp
# except Exception as e:
# resp = curl_result(c)
# resp['url'] = payload.get('url')
# resp['id'] = payload.get('id')
# resp['state'] = 'critical'
# resp['spider'] = 'pycurl'
# resp['error_code'] = '-1'
# resp['error_desc'] = 'pycurl re-one exception leaked: ' + str(e) + ' ' + str(type(e))
# return resp
# finally:
# c.close()
. Output only the next line. | post_func = func.load(post_func) |
Based on the snippet: <|code_start|> ret = self.crawl_info(error_data, payload, begin_time)
return ret
else:
sleep(payload.get('retry_sleep', 3))
payload['sockettimeout'] = int(payload.get('sockettimeout') or self._socket_timeout) + payload.get(
'retry_extra', 10)
payload['loadtimeout'] = int(payload.get('loadtimeout') or self._socket_timeout) + payload.get('retry_extra',
10)
payload['retried'] = True
return self.run1_core(payload, browser=browser, begin_time=begin_time)
except Exception as e:
error_data = {
'state': 'critical',
'error_code': -7,
'error_desc': str(type(e)) + ': ' + str(e)
}
ret = self.crawl_info(error_data, payload, begin_time)
return ret
finally:
if browser is not None:
browser.close()
def json_endp(self):
payload = {
'url': self._url + '/json',
'httpheader': ["Content-Type: application/json; charset=utf-8"],
'http_version': pycurl.CURL_HTTP_VERSION_1_1,
'useragent': 'curl/7.53.1'
}
<|code_end|>
, predict the immediate next line with the help of imports:
import datetime
import json
import socket
import traceback
import html
import pycurl
import uvloop
import websocket
import sys
import asyncio as aio
from concurrent import futures
from logging import Logger
from time import sleep
from urllib.parse import urlparse
from bs4 import UnicodeDammit
from falsy.loader import func
from falsy.netboy.netboy import NetBoy
from falsy.netboy.one import get_it
and context (classes, functions, sometimes code) from other files:
# Path: falsy/loader/func.py
# def load(function_name):
# def deep_getattr(obj, attr):
#
# Path: falsy/netboy/netboy.py
# class NetBoy:
# class Exception(Exception):
# pass
#
# class Dict(typing.Dict[str, typing.Any]):
# def __getattr__(self, name):
# # type: (str) -> Any
# try:
# return self[name]
# except KeyError:
# # raise NetBoy.Exception('netboy key error: ' + name)
# return None # '!netboy key [' + name + '] does not exist'
# except Exception:
# raise NetBoy.Exception('netboy exception: ' + name)
#
# def __setattr__(self, name, value):
# # type: (str, Any) -> None
# self[name] = value
#
# def __init__(self, payload=None, share=None):
# self.payload = payload
# if share:
# s = pycurl.CurlShare()
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_DNS)
# s.setopt(pycurl.SH_SHARE, pycurl.LOCK_DATA_SSL_SESSION)
# self.share = s
# else:
# self.share = None
#
# def run(self, payload=None, loop=None):
# real_payload = payload
# if self.payload is None:
# real_payload = payload
# elif payload is None:
# real_payload = self.payload
# else:
# real_payload = self.payload + payload
# ress = run(net_boy(real_payload, self.share), loop=loop)
# obj_ress = []
# for v in ress:
# if type(v) == CurlLoop.CurlException:
# boy = NetBoy.Dict(v.data)
# # boy['payload'] = real_payload
# obj_ress.append(boy)
# elif type(v) == dict:
# boy = NetBoy.Dict(v)
# obj_ress.append(boy)
# # else:
# # boy = NetBoy.Dict({
# # 'state': 'critical',
# # 'spider': 'pycurl',
# # 'error_code': -1,
# # 'error_desc': "{} - {}".format(type(v), str(v)),
# # 'payload': real_payload
# # })
# # obj_ress.append(boy)
# return obj_ress
#
# Path: falsy/netboy/one.py
# def get_it(payload):
# if type(payload) is list:
# payload = payload[0]
# c = pycurl.Curl()
# data_buf = BytesIO()
# # header_buf = BytesIO()
# headers = {'count': 0, 'content': [{}]}
# try:
# setup_curl_for_get(c, payload, data_buf, headers) # header_buf)
# c.perform()
#
# resp = curl_result(c)
# resp['url'] = payload.get('url')
# resp['id'] = payload.get('id')
# resp['state'] = 'normal'
# resp['spider'] = 'pycurl'
# resp['payload'] = payload
#
# pycurl_get_resp(data_buf, headers, payload, resp)
# return resp
# except pycurl.error as e:
# resp = curl_result(c)
# resp['url'] = payload.get('url')
# resp['id'] = payload.get('id')
# resp['state'] = 'error'
# resp['spider'] = 'pycurl'
# resp['error_code'] = code = e.args[0]
# resp['error_desc'] = desc = e.args[1]
# if code in [18, 47]:
# resp['state'] = 'abnormal'
# pycurl_get_resp(data_buf, headers, payload, resp)
# return resp
# except Exception as e:
# resp = curl_result(c)
# resp['url'] = payload.get('url')
# resp['id'] = payload.get('id')
# resp['state'] = 'critical'
# resp['spider'] = 'pycurl'
# resp['error_code'] = '-1'
# resp['error_desc'] = 'pycurl re-one exception leaked: ' + str(e) + ' ' + str(type(e))
# return resp
# finally:
# c.close()
. Output only the next line. | resp = get_it(payload) |
Here is a snippet: <|code_start|>
class redirect_exceptions(ContextDecorator):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __enter__(self):
return self
def __exit__(self, e_type, e_value, e_trace):
if e_type is None or e_value is None:
return
to = self.__dict__.get('to')
if to:
exceptions = self.__dict__.get('exceptions', ())
if issubclass(e_type, exceptions):
<|code_end|>
. Write the next line using the current file imports:
import time
from contextlib import ContextDecorator
from falsy.jlog.jlog import JLog
from falsy.loader import func
and context from other files:
# Path: falsy/jlog/jlog.py
# class JLog:
# def __init__(self, name='falsy'):
# self.logger = None
# self.logname = name
#
# def setup(self, config=None):
# if config is not None:
# highlights = config.get('highlights')
# logfile = config.get('logfile', '/tmp/falsy.log')
# file_level = config.get('file_level', 'DEBUG')
# console_level = config.get('console_level', 'DEBUG')
# handlers = config.get('handlers', ['file', 'console'])
# extra_loggers = config.get('extra_loggers')
# else:
# highlights = None
# logfile = '/tmp/falsy.log'
# file_level = console_level = 'DEBUG'
# handlers = ['file', 'console']
# extra_loggers = None
# config = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'formatters': {
# 'file': {
# 'fmt': '%(asctime)s.%(msecs)03d %(levelname)-8s %(name)-8s %(message)s',
# 'datefmt': '%Y-%m-%d %H:%M:%S %Z%z'
# },
# 'console': {
# '()': JLogColoredFormatter,
# 'fmt': '%(99)s%(process)s-%(thread)s%(reset)s %(yellow)s%(asctime)s.%(msecs)03d%(reset)s %(cyan)s%(name)-8s%(reset)s'
# '%(log_color)s%(message)s%(reset)s%(trace)s%(high)s',
# 'datefmt': '%m%d %H:%M:%S',
# 'log_colors': {
# 'DEBUG': 'blue',
# 'INFO': 'green',
# 'WARNING': 'yellow',
# 'ERROR': 'red',
# 'CRITICAL': 'red',
# }
# },
#
# },
# 'filters': {
# 'trace_filter': {
# '()': TraceFilter,
# },
# 'highlight_filter': {
# '()': HighlightFilter,
# 'highlights': highlights
# }
# },
# 'handlers': {
# 'file': {
# 'level': file_level,
# 'filters': None,
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'filename': logfile,
# 'formatter': 'file'
# },
# 'console': {
# 'level': console_level,
# 'filters': ['trace_filter', 'highlight_filter'],
# 'class': 'logging.StreamHandler',
# 'stream': 'ext://sys.stdout',
# 'formatter': 'console'
# },
# },
# 'loggers': {
# self.logname: {
# 'handlers': handlers,
# 'level': 'DEBUG',
# 'propagate': False,
# },
# }
# }
# if extra_loggers:
# config['loggers'].update(extra_loggers)
# logging.config.dictConfig(config)
# return self
#
# def bind(self):
# self.logger = logging.getLogger(self.logname)
# return self
#
# def bind2(self, logname):
# self.logger = logging.getLogger(logname)
# return self
#
# def debug(self, msg, *args, **kwargs):
# return self.logger.debug(msg, *args, **kwargs)
#
# def info(self, msg, *args, **kwargs):
# return self.logger.info(msg, *args, **kwargs)
#
# def warning(self, msg, *args, **kwargs):
# return self.logger.warning(msg, *args, **kwargs)
#
# def error(self, msg, *args, **kwargs):
# return self.logger.error(msg, *args, **kwargs)
#
# def critical(self, msg, *args, **kwargs):
# return self.logger.critical(msg, *args, **kwargs)
#
# def warning_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.critical(msg, *args, **kwargs)
#
# def critical_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.critical(msg, *args, **kwargs)
#
# def error_trace(self, msg, *args, **kwargs):
# self.trace(kwargs)
# return self.logger.error(msg, *args, **kwargs)
#
# def trace(self, kwargs):
# exc_type, exc_value, exc_traceback = sys.exc_info()
# stack = traceback.extract_tb(exc_traceback)
# lines = []
# for i, s in enumerate(stack):
# filename = s.filename
# l = len(filename)
# shortfile = kwargs.get('shortfile', 40)
# if l > shortfile:
# filename = filename[filename.find('/', l - shortfile):]
# line = '%-40s:%-4s %s' % (
# blue() + filename, yellow() + str(s.lineno),
# '|' + '-' * (i * 4) + cyan() + s.name + ':' + red() + s.line)
# lines.append(line)
# lines = '\n\t'.join(lines)
# kwargs['extra'] = {
# 'trace': magenta() + str(exc_type) + ' ' + bold() + magenta() + str(exc_value) + '\n\t' + lines}
#
# Path: falsy/loader/func.py
# def load(function_name):
# def deep_getattr(obj, attr):
, which may include functions, classes, or code. Output only the next line. | propagate = func.load(to)(e_type, e_value, e_trace) |
Predict the next line after this snippet: <|code_start|>from __future__ import absolute_import
urlpatterns = (
url(r'^authorize/$', views.AuthorizationView.as_view(), name="authorize"),
url(r'^token/$', views.TokenView.as_view(), name="token"),
url(r'^revoke_token/$', views.RevokeTokenView.as_view(),
name="revoke-token"),
)
# Application management views
urlpatterns += (
url(r'^applications/$', views.ApplicationList.as_view(), name="list"),
url(r'^applications/register/$',
<|code_end|>
using the current file's imports:
from django.conf.urls import url
from oauth2_provider import views
from .views import CoffeestatsApplicationRegistration, \
CoffeestatsApplicationDetail, \
CoffeestatsApplicationApproval, \
CoffeestatsApplicationRejection, \
CoffeestatsApplicationFullList
and any relevant context from other files:
# Path: coffeestats/caffeine_oauth2/views.py
# class CoffeestatsApplicationRegistration(MailContextMixin,
# ApplicationRegistration):
# mail_subject_template = 'caffeine_oauth2/mail_registered_subject.txt'
# mail_body_html_template = 'caffeine_oauth2/mail_registered_body.html'
# mail_body_text_template = 'caffeine_oauth2/mail_registered_body.txt'
#
# def get_form_class(self):
# """
# Returns a customized form class for the coffeestats application model.
#
# """
# return CoffeestatsApplicationForm
#
# def form_valid(self, form):
# application = form.save(commit=False)
# application.user = self.request.user
# application.save()
# self._send_new_application_mail(application)
# return super(CoffeestatsApplicationRegistration, self).form_valid(form)
#
# def _send_new_application_mail(self, application):
# mail_context = self.get_mail_context(application)
# mail_context.update({
# 'approval_url': '{}{}'.format(
# mail_context['site_url'], reverse_lazy(
# 'oauth2_provider:approve', kwargs={'pk': application.id}))
# })
#
# send_mail(
# render_to_string(self.mail_subject_template, mail_context),
# render_to_string(self.mail_body_text_template, mail_context),
# settings.DEFAULT_FROM_EMAIL,
# [admin[1] for admin in settings.ADMINS],
# html_message=render_to_string(
# self.mail_body_html_template, mail_context)
# )
#
# class CoffeestatsApplicationDetail(ApplicationDetail):
#
# def get_template_names(self):
# application = self.get_object()
# names = super(CoffeestatsApplicationDetail, self).get_template_names()
# if not application.approved:
# names.insert(0, 'caffeine_oauth2/pending_approval.html')
# return names
#
# class CoffeestatsApplicationApproval(ApproverRequiredMixin, MailContextMixin,
# UpdateView):
# template_name = 'caffeine_oauth2/approve.html'
# context_object_name = 'application'
# queryset = CoffeestatsApplication.objects.filter(approved=False)
# form_class = CoffeestatsApplicationApprovalForm
# success_url = reverse_lazy('oauth2_provider:list_all')
# mail_subject_template = 'caffeine_oauth2/mail_approval_subject.txt'
# mail_body_html_template = 'caffeine_oauth2/mail_approval_body.html'
# mail_body_text_template = 'caffeine_oauth2/mail_approval_body.txt'
#
# def form_valid(self, form):
# application = form.save(commit=False)
# application.approve(self.request.user)
# application.save()
# self._send_approval_mail(application)
# return redirect(self.get_success_url())
#
# def _send_approval_mail(self, application):
# mail_context = self.get_mail_context(application)
# mail_context.update({
# 'api_details': '{}{}'.format(
# mail_context['site_url'], reverse_lazy(
# 'oauth2_provider:detail', kwargs={'pk': application.id}))
# })
#
# send_mail(
# render_to_string(self.mail_subject_template, mail_context),
# render_to_string(self.mail_body_text_template, mail_context),
# settings.DEFAULT_FROM_EMAIL,
# [application.user.email],
# html_message=render_to_string(
# self.mail_body_html_template, mail_context)
# )
#
# class CoffeestatsApplicationRejection(ApproverRequiredMixin, MailContextMixin,
# UpdateView):
# template_name = 'caffeine_oauth2/reject.html'
# context_object_name = 'application'
# queryset = CoffeestatsApplication.objects.filter(approved=False)
# form_class = CoffeestatsApplicationRejectionForm
# success_url = reverse_lazy('oauth2_provider:list_all')
# mail_subject_template = 'caffeine_oauth2/mail_reject_subject.txt'
# mail_body_html_template = 'caffeine_oauth2/mail_reject_body.html'
# mail_body_text_template = 'caffeine_oauth2/mail_reject_body.txt'
#
# def form_valid(self, form):
# application = self.get_object()
# self._send_rejection_mail(form.cleaned_data['reasoning'])
# application.reject()
# return redirect(self.get_success_url())
#
# def _send_rejection_mail(self, reasoning):
# application = self.get_object()
# mail_context = self.get_mail_context(application)
# mail_context.update({
# 'reasoning': reasoning
# })
#
# send_mail(
# render_to_string(self.mail_subject_template, mail_context),
# render_to_string(self.mail_body_text_template, mail_context),
# settings.DEFAULT_FROM_EMAIL,
# [application.user.email],
# html_message=render_to_string(
# self.mail_body_html_template, mail_context)
# )
#
# class CoffeestatsApplicationFullList(ApproverRequiredMixin, ListView):
# model = CoffeestatsApplication
# template_name = 'caffeine_oauth2/list_all.html'
# context_object_name = 'applications'
. Output only the next line. | CoffeestatsApplicationRegistration.as_view(), name="register"), |
Given snippet: <|code_start|>from __future__ import absolute_import
urlpatterns = (
url(r'^authorize/$', views.AuthorizationView.as_view(), name="authorize"),
url(r'^token/$', views.TokenView.as_view(), name="token"),
url(r'^revoke_token/$', views.RevokeTokenView.as_view(),
name="revoke-token"),
)
# Application management views
urlpatterns += (
url(r'^applications/$', views.ApplicationList.as_view(), name="list"),
url(r'^applications/register/$',
CoffeestatsApplicationRegistration.as_view(), name="register"),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.conf.urls import url
from oauth2_provider import views
from .views import CoffeestatsApplicationRegistration, \
CoffeestatsApplicationDetail, \
CoffeestatsApplicationApproval, \
CoffeestatsApplicationRejection, \
CoffeestatsApplicationFullList
and context:
# Path: coffeestats/caffeine_oauth2/views.py
# class CoffeestatsApplicationRegistration(MailContextMixin,
# ApplicationRegistration):
# mail_subject_template = 'caffeine_oauth2/mail_registered_subject.txt'
# mail_body_html_template = 'caffeine_oauth2/mail_registered_body.html'
# mail_body_text_template = 'caffeine_oauth2/mail_registered_body.txt'
#
# def get_form_class(self):
# """
# Returns a customized form class for the coffeestats application model.
#
# """
# return CoffeestatsApplicationForm
#
# def form_valid(self, form):
# application = form.save(commit=False)
# application.user = self.request.user
# application.save()
# self._send_new_application_mail(application)
# return super(CoffeestatsApplicationRegistration, self).form_valid(form)
#
# def _send_new_application_mail(self, application):
# mail_context = self.get_mail_context(application)
# mail_context.update({
# 'approval_url': '{}{}'.format(
# mail_context['site_url'], reverse_lazy(
# 'oauth2_provider:approve', kwargs={'pk': application.id}))
# })
#
# send_mail(
# render_to_string(self.mail_subject_template, mail_context),
# render_to_string(self.mail_body_text_template, mail_context),
# settings.DEFAULT_FROM_EMAIL,
# [admin[1] for admin in settings.ADMINS],
# html_message=render_to_string(
# self.mail_body_html_template, mail_context)
# )
#
# class CoffeestatsApplicationDetail(ApplicationDetail):
#
# def get_template_names(self):
# application = self.get_object()
# names = super(CoffeestatsApplicationDetail, self).get_template_names()
# if not application.approved:
# names.insert(0, 'caffeine_oauth2/pending_approval.html')
# return names
#
# class CoffeestatsApplicationApproval(ApproverRequiredMixin, MailContextMixin,
# UpdateView):
# template_name = 'caffeine_oauth2/approve.html'
# context_object_name = 'application'
# queryset = CoffeestatsApplication.objects.filter(approved=False)
# form_class = CoffeestatsApplicationApprovalForm
# success_url = reverse_lazy('oauth2_provider:list_all')
# mail_subject_template = 'caffeine_oauth2/mail_approval_subject.txt'
# mail_body_html_template = 'caffeine_oauth2/mail_approval_body.html'
# mail_body_text_template = 'caffeine_oauth2/mail_approval_body.txt'
#
# def form_valid(self, form):
# application = form.save(commit=False)
# application.approve(self.request.user)
# application.save()
# self._send_approval_mail(application)
# return redirect(self.get_success_url())
#
# def _send_approval_mail(self, application):
# mail_context = self.get_mail_context(application)
# mail_context.update({
# 'api_details': '{}{}'.format(
# mail_context['site_url'], reverse_lazy(
# 'oauth2_provider:detail', kwargs={'pk': application.id}))
# })
#
# send_mail(
# render_to_string(self.mail_subject_template, mail_context),
# render_to_string(self.mail_body_text_template, mail_context),
# settings.DEFAULT_FROM_EMAIL,
# [application.user.email],
# html_message=render_to_string(
# self.mail_body_html_template, mail_context)
# )
#
# class CoffeestatsApplicationRejection(ApproverRequiredMixin, MailContextMixin,
# UpdateView):
# template_name = 'caffeine_oauth2/reject.html'
# context_object_name = 'application'
# queryset = CoffeestatsApplication.objects.filter(approved=False)
# form_class = CoffeestatsApplicationRejectionForm
# success_url = reverse_lazy('oauth2_provider:list_all')
# mail_subject_template = 'caffeine_oauth2/mail_reject_subject.txt'
# mail_body_html_template = 'caffeine_oauth2/mail_reject_body.html'
# mail_body_text_template = 'caffeine_oauth2/mail_reject_body.txt'
#
# def form_valid(self, form):
# application = self.get_object()
# self._send_rejection_mail(form.cleaned_data['reasoning'])
# application.reject()
# return redirect(self.get_success_url())
#
# def _send_rejection_mail(self, reasoning):
# application = self.get_object()
# mail_context = self.get_mail_context(application)
# mail_context.update({
# 'reasoning': reasoning
# })
#
# send_mail(
# render_to_string(self.mail_subject_template, mail_context),
# render_to_string(self.mail_body_text_template, mail_context),
# settings.DEFAULT_FROM_EMAIL,
# [application.user.email],
# html_message=render_to_string(
# self.mail_body_html_template, mail_context)
# )
#
# class CoffeestatsApplicationFullList(ApproverRequiredMixin, ListView):
# model = CoffeestatsApplication
# template_name = 'caffeine_oauth2/list_all.html'
# context_object_name = 'applications'
which might include code, classes, or functions. Output only the next line. | url(r'^applications/(?P<pk>\d+)/$', CoffeestatsApplicationDetail.as_view(), |
Given snippet: <|code_start|>
class CaffeineViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows caffeine entries to be viewed.
"""
queryset = Caffeine.objects.all().order_by('-date')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from rest_framework import permissions, viewsets
from caffeine.models import Caffeine, User
from .serializers import (
CaffeineSerializer,
UserCaffeineSerializer,
UserSerializer,
)
from .permissions import IsOwnerOrReadOnly, IsOwnCaffeineOrReadOnly
and context:
# Path: coffeestats/caffeine_api_v2/serializers.py
# class CaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'user', 'date', 'timezone', 'ctype'
# )
# extra_kwargs = {
# 'user': {'lookup_field': 'username'},
# }
#
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# class UserCaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
# user = serializers.HyperlinkedRelatedField(
# read_only=True, view_name='user-detail', lookup_field='username')
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'date', 'entrytime', 'timezone', 'ctype', 'user'
# )
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# def save(self):
# user = self.context['view'].view_owner
# self.validated_data['user'] = user
# if ('timezone' not in self.validated_data or
# not self.validated_data['timezone']):
# self.validated_data['timezone'] = user.timezone
# return super(UserCaffeineSerializer, self).save()
#
# class UserSerializer(serializers.HyperlinkedModelSerializer):
# caffeines = serializers.HyperlinkedIdentityField(
# view_name='user-caffeine-list', lookup_field='username',
# lookup_url_kwarg='caffeine_username')
# name = serializers.SerializerMethodField()
# profile = serializers.HyperlinkedIdentityField(
# view_name='public', lookup_field='username')
# counts = serializers.SerializerMethodField()
#
# class Meta:
# model = User
# fields = (
# 'url', 'username', 'location', 'first_name', 'last_name',
# 'name', 'profile', 'counts', 'caffeines',
# )
# extra_kwargs = {
# 'url': {'lookup_field': 'username'},
# }
#
# def get_name(self, obj):
# return obj.get_full_name()
#
# def get_counts(self, obj):
# count_items = Caffeine.objects.total_caffeine_for_user(obj)
# return count_items
#
# Path: coffeestats/caffeine_api_v2/permissions.py
# class IsOwnerOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow owners of an object to edit it.
#
# """
# def has_object_permission(self, request, view, obj):
# if request.method in permissions.SAFE_METHODS:
# return True
#
# return obj.user == request.user
#
# class IsOwnCaffeineOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow operations on own caffeine items.
#
# """
# def has_permission(self, request, view):
# if request.method in permissions.SAFE_METHODS:
# return True
# return view.view_owner == request.user
which might include code, classes, or functions. Output only the next line. | serializer_class = CaffeineSerializer |
Given the code snippet: <|code_start|>
class CaffeineViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows caffeine entries to be viewed.
"""
queryset = Caffeine.objects.all().order_by('-date')
serializer_class = CaffeineSerializer
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed.
"""
queryset = User.objects.all().order_by('username')
serializer_class = UserSerializer
lookup_field = 'username'
class UserCaffeineViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows working with a users caffeine entries.
"""
<|code_end|>
, generate the next line using the imports in this file:
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from rest_framework import permissions, viewsets
from caffeine.models import Caffeine, User
from .serializers import (
CaffeineSerializer,
UserCaffeineSerializer,
UserSerializer,
)
from .permissions import IsOwnerOrReadOnly, IsOwnCaffeineOrReadOnly
and context (functions, classes, or occasionally code) from other files:
# Path: coffeestats/caffeine_api_v2/serializers.py
# class CaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'user', 'date', 'timezone', 'ctype'
# )
# extra_kwargs = {
# 'user': {'lookup_field': 'username'},
# }
#
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# class UserCaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
# user = serializers.HyperlinkedRelatedField(
# read_only=True, view_name='user-detail', lookup_field='username')
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'date', 'entrytime', 'timezone', 'ctype', 'user'
# )
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# def save(self):
# user = self.context['view'].view_owner
# self.validated_data['user'] = user
# if ('timezone' not in self.validated_data or
# not self.validated_data['timezone']):
# self.validated_data['timezone'] = user.timezone
# return super(UserCaffeineSerializer, self).save()
#
# class UserSerializer(serializers.HyperlinkedModelSerializer):
# caffeines = serializers.HyperlinkedIdentityField(
# view_name='user-caffeine-list', lookup_field='username',
# lookup_url_kwarg='caffeine_username')
# name = serializers.SerializerMethodField()
# profile = serializers.HyperlinkedIdentityField(
# view_name='public', lookup_field='username')
# counts = serializers.SerializerMethodField()
#
# class Meta:
# model = User
# fields = (
# 'url', 'username', 'location', 'first_name', 'last_name',
# 'name', 'profile', 'counts', 'caffeines',
# )
# extra_kwargs = {
# 'url': {'lookup_field': 'username'},
# }
#
# def get_name(self, obj):
# return obj.get_full_name()
#
# def get_counts(self, obj):
# count_items = Caffeine.objects.total_caffeine_for_user(obj)
# return count_items
#
# Path: coffeestats/caffeine_api_v2/permissions.py
# class IsOwnerOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow owners of an object to edit it.
#
# """
# def has_object_permission(self, request, view, obj):
# if request.method in permissions.SAFE_METHODS:
# return True
#
# return obj.user == request.user
#
# class IsOwnCaffeineOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow operations on own caffeine items.
#
# """
# def has_permission(self, request, view):
# if request.method in permissions.SAFE_METHODS:
# return True
# return view.view_owner == request.user
. Output only the next line. | serializer_class = UserCaffeineSerializer |
Given the following code snippet before the placeholder: <|code_start|>
class CaffeineViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows caffeine entries to be viewed.
"""
queryset = Caffeine.objects.all().order_by('-date')
serializer_class = CaffeineSerializer
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed.
"""
queryset = User.objects.all().order_by('username')
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from rest_framework import permissions, viewsets
from caffeine.models import Caffeine, User
from .serializers import (
CaffeineSerializer,
UserCaffeineSerializer,
UserSerializer,
)
from .permissions import IsOwnerOrReadOnly, IsOwnCaffeineOrReadOnly
and context including class names, function names, and sometimes code from other files:
# Path: coffeestats/caffeine_api_v2/serializers.py
# class CaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'user', 'date', 'timezone', 'ctype'
# )
# extra_kwargs = {
# 'user': {'lookup_field': 'username'},
# }
#
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# class UserCaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
# user = serializers.HyperlinkedRelatedField(
# read_only=True, view_name='user-detail', lookup_field='username')
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'date', 'entrytime', 'timezone', 'ctype', 'user'
# )
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# def save(self):
# user = self.context['view'].view_owner
# self.validated_data['user'] = user
# if ('timezone' not in self.validated_data or
# not self.validated_data['timezone']):
# self.validated_data['timezone'] = user.timezone
# return super(UserCaffeineSerializer, self).save()
#
# class UserSerializer(serializers.HyperlinkedModelSerializer):
# caffeines = serializers.HyperlinkedIdentityField(
# view_name='user-caffeine-list', lookup_field='username',
# lookup_url_kwarg='caffeine_username')
# name = serializers.SerializerMethodField()
# profile = serializers.HyperlinkedIdentityField(
# view_name='public', lookup_field='username')
# counts = serializers.SerializerMethodField()
#
# class Meta:
# model = User
# fields = (
# 'url', 'username', 'location', 'first_name', 'last_name',
# 'name', 'profile', 'counts', 'caffeines',
# )
# extra_kwargs = {
# 'url': {'lookup_field': 'username'},
# }
#
# def get_name(self, obj):
# return obj.get_full_name()
#
# def get_counts(self, obj):
# count_items = Caffeine.objects.total_caffeine_for_user(obj)
# return count_items
#
# Path: coffeestats/caffeine_api_v2/permissions.py
# class IsOwnerOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow owners of an object to edit it.
#
# """
# def has_object_permission(self, request, view, obj):
# if request.method in permissions.SAFE_METHODS:
# return True
#
# return obj.user == request.user
#
# class IsOwnCaffeineOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow operations on own caffeine items.
#
# """
# def has_permission(self, request, view):
# if request.method in permissions.SAFE_METHODS:
# return True
# return view.view_owner == request.user
. Output only the next line. | serializer_class = UserSerializer |
Continue the code snippet: <|code_start|>
class CaffeineViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows caffeine entries to be viewed.
"""
queryset = Caffeine.objects.all().order_by('-date')
serializer_class = CaffeineSerializer
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed.
"""
queryset = User.objects.all().order_by('username')
serializer_class = UserSerializer
lookup_field = 'username'
class UserCaffeineViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows working with a users caffeine entries.
"""
serializer_class = UserCaffeineSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnCaffeineOrReadOnly,
<|code_end|>
. Use current file imports:
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from rest_framework import permissions, viewsets
from caffeine.models import Caffeine, User
from .serializers import (
CaffeineSerializer,
UserCaffeineSerializer,
UserSerializer,
)
from .permissions import IsOwnerOrReadOnly, IsOwnCaffeineOrReadOnly
and context (classes, functions, or code) from other files:
# Path: coffeestats/caffeine_api_v2/serializers.py
# class CaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'user', 'date', 'timezone', 'ctype'
# )
# extra_kwargs = {
# 'user': {'lookup_field': 'username'},
# }
#
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# class UserCaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
# user = serializers.HyperlinkedRelatedField(
# read_only=True, view_name='user-detail', lookup_field='username')
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'date', 'entrytime', 'timezone', 'ctype', 'user'
# )
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# def save(self):
# user = self.context['view'].view_owner
# self.validated_data['user'] = user
# if ('timezone' not in self.validated_data or
# not self.validated_data['timezone']):
# self.validated_data['timezone'] = user.timezone
# return super(UserCaffeineSerializer, self).save()
#
# class UserSerializer(serializers.HyperlinkedModelSerializer):
# caffeines = serializers.HyperlinkedIdentityField(
# view_name='user-caffeine-list', lookup_field='username',
# lookup_url_kwarg='caffeine_username')
# name = serializers.SerializerMethodField()
# profile = serializers.HyperlinkedIdentityField(
# view_name='public', lookup_field='username')
# counts = serializers.SerializerMethodField()
#
# class Meta:
# model = User
# fields = (
# 'url', 'username', 'location', 'first_name', 'last_name',
# 'name', 'profile', 'counts', 'caffeines',
# )
# extra_kwargs = {
# 'url': {'lookup_field': 'username'},
# }
#
# def get_name(self, obj):
# return obj.get_full_name()
#
# def get_counts(self, obj):
# count_items = Caffeine.objects.total_caffeine_for_user(obj)
# return count_items
#
# Path: coffeestats/caffeine_api_v2/permissions.py
# class IsOwnerOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow owners of an object to edit it.
#
# """
# def has_object_permission(self, request, view, obj):
# if request.method in permissions.SAFE_METHODS:
# return True
#
# return obj.user == request.user
#
# class IsOwnCaffeineOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow operations on own caffeine items.
#
# """
# def has_permission(self, request, view):
# if request.method in permissions.SAFE_METHODS:
# return True
# return view.view_owner == request.user
. Output only the next line. | IsOwnerOrReadOnly,) |
Based on the snippet: <|code_start|>
class CaffeineViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows caffeine entries to be viewed.
"""
queryset = Caffeine.objects.all().order_by('-date')
serializer_class = CaffeineSerializer
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed.
"""
queryset = User.objects.all().order_by('username')
serializer_class = UserSerializer
lookup_field = 'username'
class UserCaffeineViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows working with a users caffeine entries.
"""
serializer_class = UserCaffeineSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
<|code_end|>
, predict the immediate next line with the help of imports:
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from rest_framework import permissions, viewsets
from caffeine.models import Caffeine, User
from .serializers import (
CaffeineSerializer,
UserCaffeineSerializer,
UserSerializer,
)
from .permissions import IsOwnerOrReadOnly, IsOwnCaffeineOrReadOnly
and context (classes, functions, sometimes code) from other files:
# Path: coffeestats/caffeine_api_v2/serializers.py
# class CaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'user', 'date', 'timezone', 'ctype'
# )
# extra_kwargs = {
# 'user': {'lookup_field': 'username'},
# }
#
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# class UserCaffeineSerializer(serializers.HyperlinkedModelSerializer):
# ctype = CaffeineField()
# user = serializers.HyperlinkedRelatedField(
# read_only=True, view_name='user-detail', lookup_field='username')
#
# class Meta:
# model = Caffeine
# fields = (
# 'url', 'date', 'entrytime', 'timezone', 'ctype', 'user'
# )
# validators = [
# NoRecentCaffeineValidator('user', 'ctype', 'date')
# ]
#
# def save(self):
# user = self.context['view'].view_owner
# self.validated_data['user'] = user
# if ('timezone' not in self.validated_data or
# not self.validated_data['timezone']):
# self.validated_data['timezone'] = user.timezone
# return super(UserCaffeineSerializer, self).save()
#
# class UserSerializer(serializers.HyperlinkedModelSerializer):
# caffeines = serializers.HyperlinkedIdentityField(
# view_name='user-caffeine-list', lookup_field='username',
# lookup_url_kwarg='caffeine_username')
# name = serializers.SerializerMethodField()
# profile = serializers.HyperlinkedIdentityField(
# view_name='public', lookup_field='username')
# counts = serializers.SerializerMethodField()
#
# class Meta:
# model = User
# fields = (
# 'url', 'username', 'location', 'first_name', 'last_name',
# 'name', 'profile', 'counts', 'caffeines',
# )
# extra_kwargs = {
# 'url': {'lookup_field': 'username'},
# }
#
# def get_name(self, obj):
# return obj.get_full_name()
#
# def get_counts(self, obj):
# count_items = Caffeine.objects.total_caffeine_for_user(obj)
# return count_items
#
# Path: coffeestats/caffeine_api_v2/permissions.py
# class IsOwnerOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow owners of an object to edit it.
#
# """
# def has_object_permission(self, request, view, obj):
# if request.method in permissions.SAFE_METHODS:
# return True
#
# return obj.user == request.user
#
# class IsOwnCaffeineOrReadOnly(permissions.BasePermission):
# """
# Custom permission to only allow operations on own caffeine items.
#
# """
# def has_permission(self, request, view):
# if request.method in permissions.SAFE_METHODS:
# return True
# return view.view_owner == request.user
. Output only the next line. | IsOwnCaffeineOrReadOnly, |
Given the code snippet: <|code_start|>
password1 = forms.CharField(required=False)
password2 = forms.CharField(required=False)
password_set = False
email_action = None
class Meta:
model = User
fields = ['email', 'first_name', 'last_name', 'location']
def clean_password2(self):
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if (password2 or password1) and password1 != password2:
raise forms.ValidationError(PASSWORD_MISMATCH_ERROR)
return password2
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
emailuser = User.objects.filter(
email__iexact=self.cleaned_data['email']).first()
if emailuser is not None and emailuser != self.instance:
raise forms.ValidationError(DUPLICATE_EMAIL_ERROR)
if self.cleaned_data['email'] != self.instance.email:
self.email_action = Action.objects.create_action(
<|code_end|>
, generate the next line using the imports in this file:
from datetime import datetime
from django import forms
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_registration.forms import RegistrationFormUniqueEmail
from .models import (
ACTION_TYPES,
Action,
Caffeine,
User,
)
and context (functions, classes, or occasionally code) from other files:
# Path: coffeestats/caffeine/models.py
# ACTION_TYPES = Choices((0, "change_email", _("Change email")))
#
# class Action(models.Model):
# """
# Action model.
#
# """
#
# user = models.ForeignKey("User", models.PROTECT)
# code = models.CharField(_("action code"), max_length=32, unique=True)
# created = AutoCreatedField(_("created"))
# validuntil = models.DateTimeField(_("valid until"), db_index=True)
# atype = models.PositiveSmallIntegerField(
# _("action type"), choices=ACTION_TYPES, db_index=True
# )
# data = models.TextField(_("action data"))
#
# objects = ActionManager()
#
# class Meta:
# ordering = ["-validuntil"]
#
# def __str__(self):
# return "%s valid until %s" % (ACTION_TYPES[self.atype], self.validuntil)
#
# class Caffeine(models.Model):
# """
# Caffeinated drink model.
#
# """
#
# ctype = models.PositiveSmallIntegerField(choices=DRINK_TYPES, db_index=True)
# user = models.ForeignKey("User", models.PROTECT, related_name="caffeines")
# date = models.DateTimeField(_("consumed"), db_index=True)
# entrytime = AutoCreatedField(_("entered"), db_index=True)
# timezone = models.CharField(max_length=40, db_index=True, blank=True)
#
# objects = CaffeineManager()
#
# class Meta:
# ordering = ["-date"]
#
# def clean(self):
# recent_caffeine = Caffeine.objects.find_recent_caffeine(
# self.user, self.date, self.ctype
# )
# if recent_caffeine:
# raise ValidationError(
# _(
# "Your last %(drink)s was less than %(minutes)d minutes "
# "ago at %(date)s %(timezone)s"
# ),
# code="drink_frequency",
# params={
# "drink": DRINK_TYPES[self.ctype],
# "minutes": settings.MINIMUM_DRINK_DISTANCE,
# "date": recent_caffeine.date,
# "timezone": recent_caffeine.timezone,
# },
# )
# super(Caffeine, self).clean()
#
# def __str__(self):
# return (
# "%s at %s %s"
# % (
# DRINK_TYPES[self.ctype],
# self.date.strftime(settings.CAFFEINE_DATETIME_FORMAT),
# self.timezone or "",
# )
# ).strip()
#
# def format_type(self):
# return DRINK_TYPES[self.ctype]
#
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
. Output only the next line. | self.instance, ACTION_TYPES.change_email, |
Next line prediction: <|code_start|> """
password1 = forms.CharField(required=False)
password2 = forms.CharField(required=False)
password_set = False
email_action = None
class Meta:
model = User
fields = ['email', 'first_name', 'last_name', 'location']
def clean_password2(self):
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if (password2 or password1) and password1 != password2:
raise forms.ValidationError(PASSWORD_MISMATCH_ERROR)
return password2
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
emailuser = User.objects.filter(
email__iexact=self.cleaned_data['email']).first()
if emailuser is not None and emailuser != self.instance:
raise forms.ValidationError(DUPLICATE_EMAIL_ERROR)
if self.cleaned_data['email'] != self.instance.email:
<|code_end|>
. Use current file imports:
(from datetime import datetime
from django import forms
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_registration.forms import RegistrationFormUniqueEmail
from .models import (
ACTION_TYPES,
Action,
Caffeine,
User,
))
and context including class names, function names, or small code snippets from other files:
# Path: coffeestats/caffeine/models.py
# ACTION_TYPES = Choices((0, "change_email", _("Change email")))
#
# class Action(models.Model):
# """
# Action model.
#
# """
#
# user = models.ForeignKey("User", models.PROTECT)
# code = models.CharField(_("action code"), max_length=32, unique=True)
# created = AutoCreatedField(_("created"))
# validuntil = models.DateTimeField(_("valid until"), db_index=True)
# atype = models.PositiveSmallIntegerField(
# _("action type"), choices=ACTION_TYPES, db_index=True
# )
# data = models.TextField(_("action data"))
#
# objects = ActionManager()
#
# class Meta:
# ordering = ["-validuntil"]
#
# def __str__(self):
# return "%s valid until %s" % (ACTION_TYPES[self.atype], self.validuntil)
#
# class Caffeine(models.Model):
# """
# Caffeinated drink model.
#
# """
#
# ctype = models.PositiveSmallIntegerField(choices=DRINK_TYPES, db_index=True)
# user = models.ForeignKey("User", models.PROTECT, related_name="caffeines")
# date = models.DateTimeField(_("consumed"), db_index=True)
# entrytime = AutoCreatedField(_("entered"), db_index=True)
# timezone = models.CharField(max_length=40, db_index=True, blank=True)
#
# objects = CaffeineManager()
#
# class Meta:
# ordering = ["-date"]
#
# def clean(self):
# recent_caffeine = Caffeine.objects.find_recent_caffeine(
# self.user, self.date, self.ctype
# )
# if recent_caffeine:
# raise ValidationError(
# _(
# "Your last %(drink)s was less than %(minutes)d minutes "
# "ago at %(date)s %(timezone)s"
# ),
# code="drink_frequency",
# params={
# "drink": DRINK_TYPES[self.ctype],
# "minutes": settings.MINIMUM_DRINK_DISTANCE,
# "date": recent_caffeine.date,
# "timezone": recent_caffeine.timezone,
# },
# )
# super(Caffeine, self).clean()
#
# def __str__(self):
# return (
# "%s at %s %s"
# % (
# DRINK_TYPES[self.ctype],
# self.date.strftime(settings.CAFFEINE_DATETIME_FORMAT),
# self.timezone or "",
# )
# ).strip()
#
# def format_type(self):
# return DRINK_TYPES[self.ctype]
#
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
. Output only the next line. | self.email_action = Action.objects.create_action( |
Given snippet: <|code_start|> model = User
fields = ['timezone']
error_messages = {
'timezone': {
'required': EMPTY_TIMEZONE_ERROR,
},
}
def __init__(self, *args, **kwargs):
super(SelectTimeZoneForm, self).__init__(*args, **kwargs)
self.fields['timezone'].required = True
def clean_timezone(self):
cleantimezone = self.cleaned_data['timezone']
try:
with timezone.override(cleantimezone):
return cleantimezone
except:
raise forms.ValidationError(INVALID_TIMEZONE_ERROR)
class SubmitCaffeineForm(forms.ModelForm):
"""
This is the form for new caffeine submissions.
"""
date = forms.DateField(required=False)
time = forms.TimeField(required=False)
class Meta:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from datetime import datetime
from django import forms
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_registration.forms import RegistrationFormUniqueEmail
from .models import (
ACTION_TYPES,
Action,
Caffeine,
User,
)
and context:
# Path: coffeestats/caffeine/models.py
# ACTION_TYPES = Choices((0, "change_email", _("Change email")))
#
# class Action(models.Model):
# """
# Action model.
#
# """
#
# user = models.ForeignKey("User", models.PROTECT)
# code = models.CharField(_("action code"), max_length=32, unique=True)
# created = AutoCreatedField(_("created"))
# validuntil = models.DateTimeField(_("valid until"), db_index=True)
# atype = models.PositiveSmallIntegerField(
# _("action type"), choices=ACTION_TYPES, db_index=True
# )
# data = models.TextField(_("action data"))
#
# objects = ActionManager()
#
# class Meta:
# ordering = ["-validuntil"]
#
# def __str__(self):
# return "%s valid until %s" % (ACTION_TYPES[self.atype], self.validuntil)
#
# class Caffeine(models.Model):
# """
# Caffeinated drink model.
#
# """
#
# ctype = models.PositiveSmallIntegerField(choices=DRINK_TYPES, db_index=True)
# user = models.ForeignKey("User", models.PROTECT, related_name="caffeines")
# date = models.DateTimeField(_("consumed"), db_index=True)
# entrytime = AutoCreatedField(_("entered"), db_index=True)
# timezone = models.CharField(max_length=40, db_index=True, blank=True)
#
# objects = CaffeineManager()
#
# class Meta:
# ordering = ["-date"]
#
# def clean(self):
# recent_caffeine = Caffeine.objects.find_recent_caffeine(
# self.user, self.date, self.ctype
# )
# if recent_caffeine:
# raise ValidationError(
# _(
# "Your last %(drink)s was less than %(minutes)d minutes "
# "ago at %(date)s %(timezone)s"
# ),
# code="drink_frequency",
# params={
# "drink": DRINK_TYPES[self.ctype],
# "minutes": settings.MINIMUM_DRINK_DISTANCE,
# "date": recent_caffeine.date,
# "timezone": recent_caffeine.timezone,
# },
# )
# super(Caffeine, self).clean()
#
# def __str__(self):
# return (
# "%s at %s %s"
# % (
# DRINK_TYPES[self.ctype],
# self.date.strftime(settings.CAFFEINE_DATETIME_FORMAT),
# self.timezone or "",
# )
# ).strip()
#
# def format_type(self):
# return DRINK_TYPES[self.ctype]
#
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
which might include code, classes, or functions. Output only the next line. | model = Caffeine |
Here is a snippet: <|code_start|>"""
DUPLICATE_USER_ERROR = _("A user with that username already exists.")
DUPLICATE_EMAIL_ERROR = _(
"This email address is already in use. "
"Please supply a different email address."
)
PASSWORD_MISMATCH_ERROR = _('Passwords must match!')
INVALID_TIMEZONE_ERROR = _("Invalid time zone name")
EMPTY_TIMEZONE_ERROR = _("Time zone must not be empty.")
class CoffeestatsRegistrationForm(RegistrationFormUniqueEmail):
"""
This is the form for registering new users.
"""
firstname = forms.CharField(label=_("First name"), required=False)
lastname = forms.CharField(label=_("Last name"), required=False)
location = forms.CharField(label=_("Location"), required=False)
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
<|code_end|>
. Write the next line using the current file imports:
from datetime import datetime
from django import forms
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_registration.forms import RegistrationFormUniqueEmail
from .models import (
ACTION_TYPES,
Action,
Caffeine,
User,
)
and context from other files:
# Path: coffeestats/caffeine/models.py
# ACTION_TYPES = Choices((0, "change_email", _("Change email")))
#
# class Action(models.Model):
# """
# Action model.
#
# """
#
# user = models.ForeignKey("User", models.PROTECT)
# code = models.CharField(_("action code"), max_length=32, unique=True)
# created = AutoCreatedField(_("created"))
# validuntil = models.DateTimeField(_("valid until"), db_index=True)
# atype = models.PositiveSmallIntegerField(
# _("action type"), choices=ACTION_TYPES, db_index=True
# )
# data = models.TextField(_("action data"))
#
# objects = ActionManager()
#
# class Meta:
# ordering = ["-validuntil"]
#
# def __str__(self):
# return "%s valid until %s" % (ACTION_TYPES[self.atype], self.validuntil)
#
# class Caffeine(models.Model):
# """
# Caffeinated drink model.
#
# """
#
# ctype = models.PositiveSmallIntegerField(choices=DRINK_TYPES, db_index=True)
# user = models.ForeignKey("User", models.PROTECT, related_name="caffeines")
# date = models.DateTimeField(_("consumed"), db_index=True)
# entrytime = AutoCreatedField(_("entered"), db_index=True)
# timezone = models.CharField(max_length=40, db_index=True, blank=True)
#
# objects = CaffeineManager()
#
# class Meta:
# ordering = ["-date"]
#
# def clean(self):
# recent_caffeine = Caffeine.objects.find_recent_caffeine(
# self.user, self.date, self.ctype
# )
# if recent_caffeine:
# raise ValidationError(
# _(
# "Your last %(drink)s was less than %(minutes)d minutes "
# "ago at %(date)s %(timezone)s"
# ),
# code="drink_frequency",
# params={
# "drink": DRINK_TYPES[self.ctype],
# "minutes": settings.MINIMUM_DRINK_DISTANCE,
# "date": recent_caffeine.date,
# "timezone": recent_caffeine.timezone,
# },
# )
# super(Caffeine, self).clean()
#
# def __str__(self):
# return (
# "%s at %s %s"
# % (
# DRINK_TYPES[self.ctype],
# self.date.strftime(settings.CAFFEINE_DATETIME_FORMAT),
# self.timezone or "",
# )
# ).strip()
#
# def format_type(self):
# return DRINK_TYPES[self.ctype]
#
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
, which may include functions, classes, or code. Output only the next line. | existing = User.objects.filter( |
Given the following code snippet before the placeholder: <|code_start|> return self.initial['password']
class CaffeineUserAdmin(UserAdmin):
"""
Custom admin page for users.
"""
form = UserChangeForm
add_form = UserCreationForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff',)
fieldsets = (
(None, {
'fields': ('username', 'email', 'password')}),
(_('Personal info'), {
'fields': ('first_name', 'last_name', 'location')}),
(_('Permissions'), {
'fields': ('is_superuser', 'is_staff', 'is_active', 'public')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}),
)
search_fields = ('username', 'email')
ordering = ('username', 'email')
filter_horizontal = ()
<|code_end|>
, predict the next line using imports from the current file:
from django import forms
from django.utils.translation import ugettext as _
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import (
Action,
Caffeine,
User,
)
and context including class names, function names, and sometimes code from other files:
# Path: coffeestats/caffeine/models.py
# class Action(models.Model):
# """
# Action model.
#
# """
#
# user = models.ForeignKey("User", models.PROTECT)
# code = models.CharField(_("action code"), max_length=32, unique=True)
# created = AutoCreatedField(_("created"))
# validuntil = models.DateTimeField(_("valid until"), db_index=True)
# atype = models.PositiveSmallIntegerField(
# _("action type"), choices=ACTION_TYPES, db_index=True
# )
# data = models.TextField(_("action data"))
#
# objects = ActionManager()
#
# class Meta:
# ordering = ["-validuntil"]
#
# def __str__(self):
# return "%s valid until %s" % (ACTION_TYPES[self.atype], self.validuntil)
#
# class Caffeine(models.Model):
# """
# Caffeinated drink model.
#
# """
#
# ctype = models.PositiveSmallIntegerField(choices=DRINK_TYPES, db_index=True)
# user = models.ForeignKey("User", models.PROTECT, related_name="caffeines")
# date = models.DateTimeField(_("consumed"), db_index=True)
# entrytime = AutoCreatedField(_("entered"), db_index=True)
# timezone = models.CharField(max_length=40, db_index=True, blank=True)
#
# objects = CaffeineManager()
#
# class Meta:
# ordering = ["-date"]
#
# def clean(self):
# recent_caffeine = Caffeine.objects.find_recent_caffeine(
# self.user, self.date, self.ctype
# )
# if recent_caffeine:
# raise ValidationError(
# _(
# "Your last %(drink)s was less than %(minutes)d minutes "
# "ago at %(date)s %(timezone)s"
# ),
# code="drink_frequency",
# params={
# "drink": DRINK_TYPES[self.ctype],
# "minutes": settings.MINIMUM_DRINK_DISTANCE,
# "date": recent_caffeine.date,
# "timezone": recent_caffeine.timezone,
# },
# )
# super(Caffeine, self).clean()
#
# def __str__(self):
# return (
# "%s at %s %s"
# % (
# DRINK_TYPES[self.ctype],
# self.date.strftime(settings.CAFFEINE_DATETIME_FORMAT),
# self.timezone or "",
# )
# ).strip()
#
# def format_type(self):
# return DRINK_TYPES[self.ctype]
#
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
. Output only the next line. | admin.site.register(Action) |
Given the code snippet: <|code_start|>
class CaffeineUserAdmin(UserAdmin):
"""
Custom admin page for users.
"""
form = UserChangeForm
add_form = UserCreationForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff',)
fieldsets = (
(None, {
'fields': ('username', 'email', 'password')}),
(_('Personal info'), {
'fields': ('first_name', 'last_name', 'location')}),
(_('Permissions'), {
'fields': ('is_superuser', 'is_staff', 'is_active', 'public')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}),
)
search_fields = ('username', 'email')
ordering = ('username', 'email')
filter_horizontal = ()
admin.site.register(Action)
<|code_end|>
, generate the next line using the imports in this file:
from django import forms
from django.utils.translation import ugettext as _
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import (
Action,
Caffeine,
User,
)
and context (functions, classes, or occasionally code) from other files:
# Path: coffeestats/caffeine/models.py
# class Action(models.Model):
# """
# Action model.
#
# """
#
# user = models.ForeignKey("User", models.PROTECT)
# code = models.CharField(_("action code"), max_length=32, unique=True)
# created = AutoCreatedField(_("created"))
# validuntil = models.DateTimeField(_("valid until"), db_index=True)
# atype = models.PositiveSmallIntegerField(
# _("action type"), choices=ACTION_TYPES, db_index=True
# )
# data = models.TextField(_("action data"))
#
# objects = ActionManager()
#
# class Meta:
# ordering = ["-validuntil"]
#
# def __str__(self):
# return "%s valid until %s" % (ACTION_TYPES[self.atype], self.validuntil)
#
# class Caffeine(models.Model):
# """
# Caffeinated drink model.
#
# """
#
# ctype = models.PositiveSmallIntegerField(choices=DRINK_TYPES, db_index=True)
# user = models.ForeignKey("User", models.PROTECT, related_name="caffeines")
# date = models.DateTimeField(_("consumed"), db_index=True)
# entrytime = AutoCreatedField(_("entered"), db_index=True)
# timezone = models.CharField(max_length=40, db_index=True, blank=True)
#
# objects = CaffeineManager()
#
# class Meta:
# ordering = ["-date"]
#
# def clean(self):
# recent_caffeine = Caffeine.objects.find_recent_caffeine(
# self.user, self.date, self.ctype
# )
# if recent_caffeine:
# raise ValidationError(
# _(
# "Your last %(drink)s was less than %(minutes)d minutes "
# "ago at %(date)s %(timezone)s"
# ),
# code="drink_frequency",
# params={
# "drink": DRINK_TYPES[self.ctype],
# "minutes": settings.MINIMUM_DRINK_DISTANCE,
# "date": recent_caffeine.date,
# "timezone": recent_caffeine.timezone,
# },
# )
# super(Caffeine, self).clean()
#
# def __str__(self):
# return (
# "%s at %s %s"
# % (
# DRINK_TYPES[self.ctype],
# self.date.strftime(settings.CAFFEINE_DATETIME_FORMAT),
# self.timezone or "",
# )
# ).strip()
#
# def format_type(self):
# return DRINK_TYPES[self.ctype]
#
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
. Output only the next line. | admin.site.register(Caffeine) |
Continue the code snippet: <|code_start|>"""
Django admin classes for the caffeine app.
"""
PASSWORD_MISMATCH_ERROR = _("Passwords don't match")
class UserCreationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required fields, plus a
repeated password.
"""
password1 = forms.CharField(label=_('Password'),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_('Password (again)'),
widget=forms.PasswordInput)
class Meta:
<|code_end|>
. Use current file imports:
from django import forms
from django.utils.translation import ugettext as _
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import (
Action,
Caffeine,
User,
)
and context (classes, functions, or code) from other files:
# Path: coffeestats/caffeine/models.py
# class Action(models.Model):
# """
# Action model.
#
# """
#
# user = models.ForeignKey("User", models.PROTECT)
# code = models.CharField(_("action code"), max_length=32, unique=True)
# created = AutoCreatedField(_("created"))
# validuntil = models.DateTimeField(_("valid until"), db_index=True)
# atype = models.PositiveSmallIntegerField(
# _("action type"), choices=ACTION_TYPES, db_index=True
# )
# data = models.TextField(_("action data"))
#
# objects = ActionManager()
#
# class Meta:
# ordering = ["-validuntil"]
#
# def __str__(self):
# return "%s valid until %s" % (ACTION_TYPES[self.atype], self.validuntil)
#
# class Caffeine(models.Model):
# """
# Caffeinated drink model.
#
# """
#
# ctype = models.PositiveSmallIntegerField(choices=DRINK_TYPES, db_index=True)
# user = models.ForeignKey("User", models.PROTECT, related_name="caffeines")
# date = models.DateTimeField(_("consumed"), db_index=True)
# entrytime = AutoCreatedField(_("entered"), db_index=True)
# timezone = models.CharField(max_length=40, db_index=True, blank=True)
#
# objects = CaffeineManager()
#
# class Meta:
# ordering = ["-date"]
#
# def clean(self):
# recent_caffeine = Caffeine.objects.find_recent_caffeine(
# self.user, self.date, self.ctype
# )
# if recent_caffeine:
# raise ValidationError(
# _(
# "Your last %(drink)s was less than %(minutes)d minutes "
# "ago at %(date)s %(timezone)s"
# ),
# code="drink_frequency",
# params={
# "drink": DRINK_TYPES[self.ctype],
# "minutes": settings.MINIMUM_DRINK_DISTANCE,
# "date": recent_caffeine.date,
# "timezone": recent_caffeine.timezone,
# },
# )
# super(Caffeine, self).clean()
#
# def __str__(self):
# return (
# "%s at %s %s"
# % (
# DRINK_TYPES[self.ctype],
# self.date.strftime(settings.CAFFEINE_DATETIME_FORMAT),
# self.timezone or "",
# )
# ).strip()
#
# def format_type(self):
# return DRINK_TYPES[self.ctype]
#
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
. Output only the next line. | model = User |
Given snippet: <|code_start|>"""
Custom authentication backend for coffeestats.
"""
logger = logging.getLogger(__name__)
class LegacyCoffeestatsAuth(object):
"""
Authentication backend for passwords generated by the original coffeestats
PHP implementation.
"""
def authenticate(self, username=None, password=None):
try:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from passlib.hash import bcrypt
from django.utils import timezone
from django.contrib.auth.hashers import make_password
from .models import User
import logging
and context:
# Path: coffeestats/caffeine/models.py
# class User(AbstractUser):
# """
# User model.
#
# """
#
# cryptsum = models.CharField(_("old password hash"), max_length=60, blank=True)
# location = models.CharField(max_length=128, blank=True)
# public = models.BooleanField(default=True)
# token = models.CharField(max_length=32, unique=True)
# timezone = models.CharField(_("timezone"), max_length=40, db_index=True, blank=True)
#
# objects = CaffeineUserManager()
#
# def get_absolute_url(self):
# return reverse("public", kwargs={"username": self.username})
#
# def __str__(self):
# return self.get_full_name() or self.username
#
# def export_csv(self):
# subject = _("Your caffeine records")
# body = _("Attached is your caffeine track record.")
# email = EmailMessage(subject, body, to=[self.email])
# now = timezone.now().strftime(settings.CAFFEINE_DATETIME_FORMAT)
# for drink in ("coffee", "mate"):
# email.attachments.append(
# (
# "%s-%s.csv" % (drink, now),
# Caffeine.objects.get_csv_data(getattr(DRINK_TYPES, drink), self),
# "text/csv",
# )
# )
# email.send()
#
# def has_usable_password(self):
# """
# Checks whether the current user has either an old password hash or a
# valid new password hash.
#
# """
# return self.cryptsum or super(User, self).has_usable_password()
#
# def set_password(self, password):
# """
# Sets the password and creates the authentication token if it is not
# set.
#
# """
# super(User, self).set_password(password)
# if not self.token:
# # on the run token
# # TODO: use something better for API authentication
# self.token = md5((self.username + password).encode("utf8")).hexdigest()
which might include code, classes, or functions. Output only the next line. | user = User.objects.get(username=username) |
Given the following code snippet before the placeholder: <|code_start|>
class ManagedROMArchiveTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.temppath = os.path.join(self.tempdir, "tempfile")
self.mock_user = mock()
self.mock_user.user_id = 1234
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_previous_managed_ids_returns_none_for_missing_file(self):
missing_path = os.path.join("some", "stupid", "path")
self.assertFalse(os.path.exists(missing_path))
<|code_end|>
, predict the next line using imports from the current file:
import json
import os
import shutil
import tempfile
import unittest
from mockito import *
from ice.history import ManagedROMArchive
and context including class names, function names, and sometimes code from other files:
# Path: ice/history.py
# class ManagedROMArchive(object):
# def __init__(self, archive_path):
# self.archive_path = archive_path
#
# self.archive = self.load_archive(archive_path)
#
# def load_archive(self, path):
# if not os.path.exists(path):
# return None
#
# with open(path) as archive_file:
# return json.load(archive_file)
#
# def archive_key(self, user):
# return str(user.user_id)
#
# def previous_managed_ids(self, user):
# if self.archive is None:
# return None
#
# key = self.archive_key(user)
# return self.archive[key] if key in self.archive else []
#
# def set_managed_ids(self, user, managed_ids):
# # `dict` makes a copy of `archive` so I can modify it freely
# new_archive = dict(self.archive) if self.archive else {}
# # Overwrite the old data
# new_archive[self.archive_key(user)] = managed_ids
# # Save the data to disk
# archive_json = json.dumps(new_archive)
# with open(self.archive_path, "w+") as f:
# f.write(archive_json)
# # At this point if an exception wasnt thrown then we know the data
# # save successfully, so overwrite our local data with what we just saved
# self.archive = new_archive
. Output only the next line. | archive = ManagedROMArchive(missing_path) |
Predict the next line for this snippet: <|code_start|># encoding: utf-8
"""
local_provider_tests.py
Created by Scott on 2014-08-18.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
class LocalProviderTests(unittest.TestCase):
def setUp(self):
<|code_end|>
with the help of current file imports:
from mockito import *
from ice.gridproviders.local_provider import LocalProvider
import os
import shutil
import tempfile
import unittest
and context from other files:
# Path: ice/gridproviders/local_provider.py
# class LocalProvider(grid_image_provider.GridImageProvider):
#
# def valid_extensions(self):
# return ['.png', '.jpg', '.jpeg', '.tiff']
#
# def image_for_rom(self, rom):
# """
# Checks the filesystem for images for a given ROM. To do so, it makes
# use of a consoles 'images' directory. If it finds an image in that
# directory with the same name as the ROMs name then it will return that.
# """
# img_dir = rom.console.images_directory
# if img_dir == "":
# logger.debug(
# "[%s] No images directory specified for %s" %
# (rom.name, rom.console.shortname)
# )
# return None
# for extension in self.valid_extensions():
# filename = rom.name + extension
# path = os.path.join(img_dir, filename)
# if os.path.isfile(path):
# # We found a valid path, return it
# return path
# # We went through all of the possible filenames for this ROM and a
# # file didnt exist with any of them. There is no image for this ROM in
# # the consoles image directory
# return None
, which may contain function names, class names, or code. Output only the next line. | self.provider = LocalProvider() |
Based on the snippet: <|code_start|># encoding: utf-8
"""
consolegrid_provider_tests.py
Created by Scott on 2014-08-18.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
# I need to do this instead of importing the class explicitly so that I can
# override the urllib2 function.
# TODO: Use dependency injection so I don't need to use that hack.
class ConsoleGridProviderTests(unittest.TestCase):
def setUp(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import unittest
from mockito import *
from urllib2 import URLError
from ice.gridproviders import consolegrid_provider
and context (classes, functions, sometimes code) from other files:
# Path: ice/gridproviders/consolegrid_provider.py
# class ConsoleGridProvider(grid_image_provider.GridImageProvider):
# def api_url():
# def is_enabled():
# def consolegrid_top_picture_url(self, rom):
# def find_url_for_rom(self, rom):
# def download_image(self, url):
# def image_for_rom(self, rom):
. Output only the next line. | self.provider = consolegrid_provider.ConsoleGridProvider() |
Using the snippet: <|code_start|>
class EmulatorsTests(unittest.TestCase):
@parameterized.expand([
("C:/emu.exe", "C:"),
("C:/Path/to/emulator.exe", "C:/Path/to"),
("/emu", "/"),
("/path/to/emulator", "/path/to"),
])
def test_emulator_startdir(self, location, expected):
emu = model.Emulator("Mednafen", location, "%l %r")
<|code_end|>
, determine the next line of code. You have imports:
import os
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice import emulators
from ice import model
and context (class names, function names, or code) available:
# Path: ice/emulators.py
# def emulator_rom_launch_command(emulator, rom):
# def emulator_startdir(emulator):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
. Output only the next line. | self.assertEqual(emulators.emulator_startdir(emu), expected) |
Given the following code snippet before the placeholder: <|code_start|>
class EmulatorsTests(unittest.TestCase):
@parameterized.expand([
("C:/emu.exe", "C:"),
("C:/Path/to/emulator.exe", "C:/Path/to"),
("/emu", "/"),
("/path/to/emulator", "/path/to"),
])
def test_emulator_startdir(self, location, expected):
<|code_end|>
, predict the next line using imports from the current file:
import os
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice import emulators
from ice import model
and context including class names, function names, and sometimes code from other files:
# Path: ice/emulators.py
# def emulator_rom_launch_command(emulator, rom):
# def emulator_startdir(emulator):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
. Output only the next line. | emu = model.Emulator("Mednafen", location, "%l %r") |
Next line prediction: <|code_start|>
class BackupsTests(unittest.TestCase):
def setUp(self):
self.steam_fixture = fixtures.SteamFixture()
self.user_fixture = fixtures.UserFixture(self.steam_fixture)
@parameterized.expand([
(None, None),
<|code_end|>
. Use current file imports:
(import os
import shutil
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from pysteam import model
from pysteam import shortcuts
from ice import backups
from ice import paths
from testinfra import fixtures)
and context including class names, function names, or small code snippets from other files:
# Path: ice/backups.py
# def default_backups_directory():
# def backup_filename(user, timestamp_format):
# def shortcuts_backup_path(directory, user, timestamp_format="%Y%m%d%H%M%S"):
# def backup_directory(config):
# def create_backup_of_shortcuts(config, user, dry_run=False):
# def _create_directory_if_needed(directory):
#
# Path: ice/paths.py
# def application_data_directory():
# def data_file_path(filename):
# def archive_path():
# def log_file_location():
# def default_roms_directory():
. Output only the next line. | ("", backups.default_backups_directory()), |
Based on the snippet: <|code_start|># encoding: utf-8
class TaskEngine(object):
def __init__(self, steam):
self.steam = steam
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from pysteam import paths as steam_paths
from pysteam import shortcuts
from pysteam import steam as steam_module
from ice import backups
from ice import configuration
from ice import consoles
from ice import emulators
from ice import paths
from ice import settings
from ice.logs import logger
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
and context (classes, functions, sometimes code) from other files:
# Path: ice/backups.py
# def default_backups_directory():
# def backup_filename(user, timestamp_format):
# def shortcuts_backup_path(directory, user, timestamp_format="%Y%m%d%H%M%S"):
# def backup_directory(config):
# def create_backup_of_shortcuts(config, user, dry_run=False):
# def _create_directory_if_needed(directory):
#
# Path: ice/configuration.py
# def get(store, option):
# def get_directory(store, option):
# def from_store(store):
#
# Path: ice/consoles.py
# def console_roms_directory(configuration, console):
# def path_is_rom(console, path):
#
# Path: ice/emulators.py
# def emulator_rom_launch_command(emulator, rom):
# def emulator_startdir(emulator):
#
# Path: ice/paths.py
# def application_data_directory():
# def data_file_path(filename):
# def archive_path():
# def log_file_location():
# def default_roms_directory():
#
# Path: ice/settings.py
# def find_settings_file(name, filesystem):
# def settings_file_path(name, filesystem, override = None):
# def load_configuration(filesystem, override = None):
# def load_emulators(filesystem, override = None):
# def load_consoles(emulators, filesystem, override = None):
# def load_app_settings(filesystem, file_overrides = {}):
# def image_provider(config):
#
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
#
# Path: ice/persistence/config_file_backing_store.py
# class ConfigFileBackingStore(backing_store.BackingStore):
#
# def __init__(self, path):
# super(ConfigFileBackingStore, self).__init__(path)
# self.configParser = ConfigParser.RawConfigParser()
# self.configParser.read(self.path)
#
# def identifiers(self):
# return self.configParser.sections()
#
# def add_identifier(self, ident):
# try:
# self.configParser.add_section(ident)
# except ConfigParser.DuplicateSectionError:
# raise ValueError("The identifier `%s` already exists" % str(ident))
#
# def remove_identifier(self, ident):
# self.configParser.remove_section(ident)
#
# def keys(self, ident):
# try:
# return self.configParser.options(ident)
# except ConfigParser.NoSectionError:
# raise ValueError("No identifier named `%s` exists" % str(ident))
#
# def get(self, ident, key, default=None):
# try:
# val = self.configParser.get(ident, key.lower())
# return val
# except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# return default
#
# def set(self, ident, key, value):
# self.configParser.set(ident, key.lower(), value)
#
# def save(self):
# try:
# with open(self.path, "w") as configFile:
# self.configParser.write(configFile)
# except IOError:
# raise IOError("Cannot save data to `%s`. Permission Denied")
. Output only the next line. | logger.debug("Initializing Ice") |
Given the following code snippet before the placeholder: <|code_start|>
class ConsoleAdapterTests(unittest.TestCase):
def test_verify(self):
emu = mock()
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice.persistence.adapters import console_adapter
from ice import model
from testinfra import fixtures
and context including class names, function names, and sometimes code from other files:
# Path: ice/persistence/adapters/console_adapter.py
# class ConsoleBackedObjectAdapter(object):
# def __init__(self, emulators):
# def new(self, backing_store, identifier):
# def verify(self, console):
# def save_in_store(self, backing_store, identifier, console):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
. Output only the next line. | adapter = console_adapter.ConsoleBackedObjectAdapter([]) |
Here is a snippet: <|code_start|>
class ConsoleAdapterTests(unittest.TestCase):
def test_verify(self):
emu = mock()
adapter = console_adapter.ConsoleBackedObjectAdapter([])
<|code_end|>
. Write the next line using the current file imports:
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice.persistence.adapters import console_adapter
from ice import model
from testinfra import fixtures
and context from other files:
# Path: ice/persistence/adapters/console_adapter.py
# class ConsoleBackedObjectAdapter(object):
# def __init__(self, emulators):
# def new(self, backing_store, identifier):
# def verify(self, console):
# def save_in_store(self, backing_store, identifier, console):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
, which may include functions, classes, or code. Output only the next line. | valid = model.Console("Nintendo", "NES", "", "", "", "", "", emu) |
Based on the snippet: <|code_start|>
class ROMParser(object):
regexes = [
# Regex that matches the entire string up until it hits the first '[',
# ']', '(', ')', or '.'
# DOESN'T WORK FOR GAMES WITH ()s IN THEIR NAME
ur"(?P<name>[^\(\)\[\]]*).*",
]
def __init__(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import re
import unicodedata
from ice.logs import logger
and context (classes, functions, sometimes code) from other files:
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
. Output only the next line. | logger.debug("Creating ROM parser with regexes: %s" % self.regexes) |
Using the snippet: <|code_start|> # },
# ...
# }
#
# Will produce an output like this
# [Section Name]
# key=value
# key=value
#
# [Section Name 2]
# key2=value
# key2=value
f = open(path, "w")
for section_name in sections_dict.keys():
f.write("[%s]\n" % section_name)
keyvalues = sections_dict[section_name]
for key in keyvalues.keys():
value = keyvalues[key]
f.write("%s=%s\n" % (key, value))
f.write("\n")
f.close()
def file_contents(self, path):
f = open(path, "r")
contents = f.read()
f.close()
return contents
def test_empty_file(self):
self.create_config_file(self.tempfile, {})
<|code_end|>
, determine the next line of code. You have imports:
import os
import shutil
import stat
import tempfile
import unittest
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
and context (class names, function names, or code) available:
# Path: ice/persistence/config_file_backing_store.py
# class ConfigFileBackingStore(backing_store.BackingStore):
#
# def __init__(self, path):
# super(ConfigFileBackingStore, self).__init__(path)
# self.configParser = ConfigParser.RawConfigParser()
# self.configParser.read(self.path)
#
# def identifiers(self):
# return self.configParser.sections()
#
# def add_identifier(self, ident):
# try:
# self.configParser.add_section(ident)
# except ConfigParser.DuplicateSectionError:
# raise ValueError("The identifier `%s` already exists" % str(ident))
#
# def remove_identifier(self, ident):
# self.configParser.remove_section(ident)
#
# def keys(self, ident):
# try:
# return self.configParser.options(ident)
# except ConfigParser.NoSectionError:
# raise ValueError("No identifier named `%s` exists" % str(ident))
#
# def get(self, ident, key, default=None):
# try:
# val = self.configParser.get(ident, key.lower())
# return val
# except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# return default
#
# def set(self, ident, key, value):
# self.configParser.set(ident, key.lower(), value)
#
# def save(self):
# try:
# with open(self.path, "w") as configFile:
# self.configParser.write(configFile)
# except IOError:
# raise IOError("Cannot save data to `%s`. Permission Denied")
. Output only the next line. | cfbs = ConfigFileBackingStore(self.tempfile) |
Next line prediction: <|code_start|>
class EnvironmentCheckerTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def testRequireDirectoryExistsSucceedsWhenDirectoryExists(self):
try:
<|code_end|>
. Use current file imports:
(import os
import shutil
import stat
import tempfile
import unittest
from ice.environment_checker import EnvironmentChecker
from ice.error.env_checker_error import EnvCheckerError
from ice.filesystem import RealFilesystem)
and context including class names, function names, or small code snippets from other files:
# Path: ice/environment_checker.py
# class EnvironmentChecker(object):
#
# def __init__(self, filesystem):
# self.filesystem = filesystem
#
# def __enter__(self):
# self.requirement_errors = []
# return self
#
# def __exit__(self, type, value, tb):
# self.resolve_unment_requirements()
#
# def require_directory_exists(self, path):
# """
# Validate that a directory exists.
#
# An attempt will be made to create a directory if it is missing
#
# Validation will fail if a directory cant be created at that path
# or if a file already exists there
# """
# if not self.filesystem.is_directory(path):
# self.requirement_errors.append(PathExistanceError(self.filesystem, path))
#
# def require_writable_path(self, path):
# """
# Validate that a path is writable.
#
# Returns an error if the path doesn't exist or it isn't writable
# None otherwise
# """
# if not self.filesystem.is_writable(path):
# self.requirement_errors.append(WritablePathError(path))
#
# def require_program_not_running(self, program_name):
# """
# Validate that a program with the name `program_name` is not currently
# running on the users system.
# """
# for pid in psutil.pids():
# try:
# p = psutil.Process(pid)
# if p.name().lower().startswith(program_name.lower()):
# return self.requirement_errors.append(
# ProcessRunningError(p.name()))
# except Exception:
# continue
#
# def resolve_unment_requirements(self):
# """
# Attempts to resolve any added requirements that were unmet
# """
# for err in self.requirement_errors:
# err.resolve()
#
# Path: ice/filesystem.py
# class RealFilesystem(object):
#
# def create_directories(self, path):
# return os.makedirs(path)
#
# def path_exists(self, path):
# return os.path.exists(path)
#
# def is_directory(self, path):
# return os.path.isdir(path)
#
# def is_file(self, path):
# return os.path.isfile(path)
#
# def is_writable(self, path):
# return os.access(path, os.W_OK)
#
# def _paths_in_directory(self, directory, incl_subdirs=False):
# assert self.is_directory(directory)
# # Use glob instead of `os.listdir` to find files because glob will ignore
# # hidden files. Or at least some hidden files. It ignores any file whose
# # name starts with ".", which is basically equivalent to 'hidden files' on
# # OSX/Linux, but means nothing on Windows. Still, its good enough, and I'd
# # like to avoid having OS-specific 'ignore hidden files' logic in this file
# # and let Python handle it instead.
# pattern = os.path.join(directory, "*")
# result = glob.glob(pattern)
# # Unfortunately Python glob doesn't support `**` for matching 0 or 1
# # subdirectories (like I was hoping it would), so instead we run a second
# # glob if we need subdirectories
# subdir_pattern = os.path.join(directory, "*", "*")
# subdir_result = glob.glob(subdir_pattern) if incl_subdirs else []
# return result + subdir_result
#
# def files_in_directory(self, directory, include_subdirectories=False):
# assert self.is_directory(directory), "Must specify a directory"
# return filter(
# os.path.isfile,
# self._paths_in_directory(directory, incl_subdirs=include_subdirectories),
# )
#
# def subdirectories_of_directory(self, directory, recursive=False):
# assert self.is_directory(directory), "Must specify a directory"
# return filter(
# os.path.isdir,
# self._paths_in_directory(directory, incl_subdirs=recursive),
# )
. Output only the next line. | with EnvironmentChecker(RealFilesystem()) as env_checker: |
Next line prediction: <|code_start|>
class EnvironmentCheckerTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def testRequireDirectoryExistsSucceedsWhenDirectoryExists(self):
try:
<|code_end|>
. Use current file imports:
(import os
import shutil
import stat
import tempfile
import unittest
from ice.environment_checker import EnvironmentChecker
from ice.error.env_checker_error import EnvCheckerError
from ice.filesystem import RealFilesystem)
and context including class names, function names, or small code snippets from other files:
# Path: ice/environment_checker.py
# class EnvironmentChecker(object):
#
# def __init__(self, filesystem):
# self.filesystem = filesystem
#
# def __enter__(self):
# self.requirement_errors = []
# return self
#
# def __exit__(self, type, value, tb):
# self.resolve_unment_requirements()
#
# def require_directory_exists(self, path):
# """
# Validate that a directory exists.
#
# An attempt will be made to create a directory if it is missing
#
# Validation will fail if a directory cant be created at that path
# or if a file already exists there
# """
# if not self.filesystem.is_directory(path):
# self.requirement_errors.append(PathExistanceError(self.filesystem, path))
#
# def require_writable_path(self, path):
# """
# Validate that a path is writable.
#
# Returns an error if the path doesn't exist or it isn't writable
# None otherwise
# """
# if not self.filesystem.is_writable(path):
# self.requirement_errors.append(WritablePathError(path))
#
# def require_program_not_running(self, program_name):
# """
# Validate that a program with the name `program_name` is not currently
# running on the users system.
# """
# for pid in psutil.pids():
# try:
# p = psutil.Process(pid)
# if p.name().lower().startswith(program_name.lower()):
# return self.requirement_errors.append(
# ProcessRunningError(p.name()))
# except Exception:
# continue
#
# def resolve_unment_requirements(self):
# """
# Attempts to resolve any added requirements that were unmet
# """
# for err in self.requirement_errors:
# err.resolve()
#
# Path: ice/filesystem.py
# class RealFilesystem(object):
#
# def create_directories(self, path):
# return os.makedirs(path)
#
# def path_exists(self, path):
# return os.path.exists(path)
#
# def is_directory(self, path):
# return os.path.isdir(path)
#
# def is_file(self, path):
# return os.path.isfile(path)
#
# def is_writable(self, path):
# return os.access(path, os.W_OK)
#
# def _paths_in_directory(self, directory, incl_subdirs=False):
# assert self.is_directory(directory)
# # Use glob instead of `os.listdir` to find files because glob will ignore
# # hidden files. Or at least some hidden files. It ignores any file whose
# # name starts with ".", which is basically equivalent to 'hidden files' on
# # OSX/Linux, but means nothing on Windows. Still, its good enough, and I'd
# # like to avoid having OS-specific 'ignore hidden files' logic in this file
# # and let Python handle it instead.
# pattern = os.path.join(directory, "*")
# result = glob.glob(pattern)
# # Unfortunately Python glob doesn't support `**` for matching 0 or 1
# # subdirectories (like I was hoping it would), so instead we run a second
# # glob if we need subdirectories
# subdir_pattern = os.path.join(directory, "*", "*")
# subdir_result = glob.glob(subdir_pattern) if incl_subdirs else []
# return result + subdir_result
#
# def files_in_directory(self, directory, include_subdirectories=False):
# assert self.is_directory(directory), "Must specify a directory"
# return filter(
# os.path.isfile,
# self._paths_in_directory(directory, incl_subdirs=include_subdirectories),
# )
#
# def subdirectories_of_directory(self, directory, recursive=False):
# assert self.is_directory(directory), "Must specify a directory"
# return filter(
# os.path.isdir,
# self._paths_in_directory(directory, incl_subdirs=recursive),
# )
. Output only the next line. | with EnvironmentChecker(RealFilesystem()) as env_checker: |
Predict the next line for this snippet: <|code_start|>
class EmulatorAdapterTests(unittest.TestCase):
def setUp(self):
fs = filesystem.RealFilesystem()
<|code_end|>
with the help of current file imports:
import os
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice import filesystem
from ice.persistence.adapters import emulator_adapter
from ice import model
and context from other files:
# Path: ice/filesystem.py
# class RealFilesystem(object):
# class FakeFilesystem(object):
# def create_directories(self, path):
# def path_exists(self, path):
# def is_directory(self, path):
# def is_file(self, path):
# def is_writable(self, path):
# def _paths_in_directory(self, directory, incl_subdirs=False):
# def files_in_directory(self, directory, include_subdirectories=False):
# def subdirectories_of_directory(self, directory, recursive=False):
# def __init__(self, root):
# def adjusted_path(self, path):
# def create_directories(self, path):
# def path_exists(self, path):
# def is_directory(self, path):
# def is_file(self, path):
# def is_writable(self, path):
# def files_in_directory(self, directory, include_subdirectories=False):
# def subdirectories_of_directory(self, directory, recursive=False):
#
# Path: ice/persistence/adapters/emulator_adapter.py
# class EmulatorBackedObjectAdapter(object):
# def __init__(self, filesystem):
# def new(self, backing_store, identifier):
# def verify(self, emulator):
# def save_in_store(self, backing_store, identifier, emulator):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
, which may contain function names, class names, or code. Output only the next line. | self.adapter = emulator_adapter.EmulatorBackedObjectAdapter(fs) |
Continue the code snippet: <|code_start|>
class EmulatorAdapterTests(unittest.TestCase):
def setUp(self):
fs = filesystem.RealFilesystem()
self.adapter = emulator_adapter.EmulatorBackedObjectAdapter(fs)
def test_verify_returns_false_when_location_is_none(self):
<|code_end|>
. Use current file imports:
import os
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice import filesystem
from ice.persistence.adapters import emulator_adapter
from ice import model
and context (classes, functions, or code) from other files:
# Path: ice/filesystem.py
# class RealFilesystem(object):
# class FakeFilesystem(object):
# def create_directories(self, path):
# def path_exists(self, path):
# def is_directory(self, path):
# def is_file(self, path):
# def is_writable(self, path):
# def _paths_in_directory(self, directory, incl_subdirs=False):
# def files_in_directory(self, directory, include_subdirectories=False):
# def subdirectories_of_directory(self, directory, recursive=False):
# def __init__(self, root):
# def adjusted_path(self, path):
# def create_directories(self, path):
# def path_exists(self, path):
# def is_directory(self, path):
# def is_file(self, path):
# def is_writable(self, path):
# def files_in_directory(self, directory, include_subdirectories=False):
# def subdirectories_of_directory(self, directory, recursive=False):
#
# Path: ice/persistence/adapters/emulator_adapter.py
# class EmulatorBackedObjectAdapter(object):
# def __init__(self, filesystem):
# def new(self, backing_store, identifier):
# def verify(self, emulator):
# def save_in_store(self, backing_store, identifier, emulator):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
. Output only the next line. | emu = model.Emulator("Mednafen", None, "%l %r") |
Predict the next line after this snippet: <|code_start|>
class SettingsTests(unittest.TestCase):
def setUp(self):
pass
@parameterized.expand([
("local", [LocalProvider]),
<|code_end|>
using the current file's imports:
import os
import shutil
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice.gridproviders.consolegrid_provider import ConsoleGridProvider
from ice.gridproviders.local_provider import LocalProvider
from ice import settings
and any relevant context from other files:
# Path: ice/gridproviders/consolegrid_provider.py
# class ConsoleGridProvider(grid_image_provider.GridImageProvider):
#
# @staticmethod
# def api_url():
# return "http://consolegrid.com/api/top_picture"
#
# @staticmethod
# def is_enabled():
# # TODO: Return True/False based on the current network status
# return True
#
# def consolegrid_top_picture_url(self, rom):
# host = self.api_url()
# quoted_name = urllib.quote(rom.name)
# return "%s?console=%s&game=%s" % (host, rom.console.shortname, quoted_name)
#
# def find_url_for_rom(self, rom):
# """
# Determines a suitable grid image for a given ROM by hitting
# ConsoleGrid.com
# """
# try:
# response = urllib2.urlopen(self.consolegrid_top_picture_url(rom))
# if response.getcode() == 204:
# name = rom.name
# console = rom.console.fullname
# logger.debug(
# "ConsoleGrid has no game called `%s` for %s" % (name, console)
# )
# else:
# return response.read()
# except urllib2.URLError as error:
# # Connection was refused. ConsoleGrid may be down, or something bad
# # may have happened
# logger.debug(
# "No image was downloaded due to an error with ConsoleGrid"
# )
#
# def download_image(self, url):
# """
# Downloads the image at 'url' and returns the path to the image on the
# local filesystem
# """
# (path, headers) = urllib.urlretrieve(url)
# return path
#
# def image_for_rom(self, rom):
# image_url = self.find_url_for_rom(rom)
# if image_url is None or image_url == "":
# return None
# return self.download_image(image_url)
#
# Path: ice/gridproviders/local_provider.py
# class LocalProvider(grid_image_provider.GridImageProvider):
#
# def valid_extensions(self):
# return ['.png', '.jpg', '.jpeg', '.tiff']
#
# def image_for_rom(self, rom):
# """
# Checks the filesystem for images for a given ROM. To do so, it makes
# use of a consoles 'images' directory. If it finds an image in that
# directory with the same name as the ROMs name then it will return that.
# """
# img_dir = rom.console.images_directory
# if img_dir == "":
# logger.debug(
# "[%s] No images directory specified for %s" %
# (rom.name, rom.console.shortname)
# )
# return None
# for extension in self.valid_extensions():
# filename = rom.name + extension
# path = os.path.join(img_dir, filename)
# if os.path.isfile(path):
# # We found a valid path, return it
# return path
# # We went through all of the possible filenames for this ROM and a
# # file didnt exist with any of them. There is no image for this ROM in
# # the consoles image directory
# return None
#
# Path: ice/settings.py
# def find_settings_file(name, filesystem):
# def settings_file_path(name, filesystem, override = None):
# def load_configuration(filesystem, override = None):
# def load_emulators(filesystem, override = None):
# def load_consoles(emulators, filesystem, override = None):
# def load_app_settings(filesystem, file_overrides = {}):
# def image_provider(config):
. Output only the next line. | ("consolegrid", [ConsoleGridProvider]), |
Predict the next line for this snippet: <|code_start|>
class SettingsTests(unittest.TestCase):
def setUp(self):
pass
@parameterized.expand([
<|code_end|>
with the help of current file imports:
import os
import shutil
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice.gridproviders.consolegrid_provider import ConsoleGridProvider
from ice.gridproviders.local_provider import LocalProvider
from ice import settings
and context from other files:
# Path: ice/gridproviders/consolegrid_provider.py
# class ConsoleGridProvider(grid_image_provider.GridImageProvider):
#
# @staticmethod
# def api_url():
# return "http://consolegrid.com/api/top_picture"
#
# @staticmethod
# def is_enabled():
# # TODO: Return True/False based on the current network status
# return True
#
# def consolegrid_top_picture_url(self, rom):
# host = self.api_url()
# quoted_name = urllib.quote(rom.name)
# return "%s?console=%s&game=%s" % (host, rom.console.shortname, quoted_name)
#
# def find_url_for_rom(self, rom):
# """
# Determines a suitable grid image for a given ROM by hitting
# ConsoleGrid.com
# """
# try:
# response = urllib2.urlopen(self.consolegrid_top_picture_url(rom))
# if response.getcode() == 204:
# name = rom.name
# console = rom.console.fullname
# logger.debug(
# "ConsoleGrid has no game called `%s` for %s" % (name, console)
# )
# else:
# return response.read()
# except urllib2.URLError as error:
# # Connection was refused. ConsoleGrid may be down, or something bad
# # may have happened
# logger.debug(
# "No image was downloaded due to an error with ConsoleGrid"
# )
#
# def download_image(self, url):
# """
# Downloads the image at 'url' and returns the path to the image on the
# local filesystem
# """
# (path, headers) = urllib.urlretrieve(url)
# return path
#
# def image_for_rom(self, rom):
# image_url = self.find_url_for_rom(rom)
# if image_url is None or image_url == "":
# return None
# return self.download_image(image_url)
#
# Path: ice/gridproviders/local_provider.py
# class LocalProvider(grid_image_provider.GridImageProvider):
#
# def valid_extensions(self):
# return ['.png', '.jpg', '.jpeg', '.tiff']
#
# def image_for_rom(self, rom):
# """
# Checks the filesystem for images for a given ROM. To do so, it makes
# use of a consoles 'images' directory. If it finds an image in that
# directory with the same name as the ROMs name then it will return that.
# """
# img_dir = rom.console.images_directory
# if img_dir == "":
# logger.debug(
# "[%s] No images directory specified for %s" %
# (rom.name, rom.console.shortname)
# )
# return None
# for extension in self.valid_extensions():
# filename = rom.name + extension
# path = os.path.join(img_dir, filename)
# if os.path.isfile(path):
# # We found a valid path, return it
# return path
# # We went through all of the possible filenames for this ROM and a
# # file didnt exist with any of them. There is no image for this ROM in
# # the consoles image directory
# return None
#
# Path: ice/settings.py
# def find_settings_file(name, filesystem):
# def settings_file_path(name, filesystem, override = None):
# def load_configuration(filesystem, override = None):
# def load_emulators(filesystem, override = None):
# def load_consoles(emulators, filesystem, override = None):
# def load_app_settings(filesystem, file_overrides = {}):
# def image_provider(config):
, which may contain function names, class names, or code. Output only the next line. | ("local", [LocalProvider]), |
Given snippet: <|code_start|>
class SettingsTests(unittest.TestCase):
def setUp(self):
pass
@parameterized.expand([
("local", [LocalProvider]),
("consolegrid", [ConsoleGridProvider]),
("local, consolegrid", [LocalProvider, ConsoleGridProvider]),
("consOLEGRId , LOcaL ", [ConsoleGridProvider, LocalProvider])
])
def test_image_provider(self, spec, classes):
config = mock()
config.provider_spec = spec
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import shutil
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from ice.gridproviders.consolegrid_provider import ConsoleGridProvider
from ice.gridproviders.local_provider import LocalProvider
from ice import settings
and context:
# Path: ice/gridproviders/consolegrid_provider.py
# class ConsoleGridProvider(grid_image_provider.GridImageProvider):
#
# @staticmethod
# def api_url():
# return "http://consolegrid.com/api/top_picture"
#
# @staticmethod
# def is_enabled():
# # TODO: Return True/False based on the current network status
# return True
#
# def consolegrid_top_picture_url(self, rom):
# host = self.api_url()
# quoted_name = urllib.quote(rom.name)
# return "%s?console=%s&game=%s" % (host, rom.console.shortname, quoted_name)
#
# def find_url_for_rom(self, rom):
# """
# Determines a suitable grid image for a given ROM by hitting
# ConsoleGrid.com
# """
# try:
# response = urllib2.urlopen(self.consolegrid_top_picture_url(rom))
# if response.getcode() == 204:
# name = rom.name
# console = rom.console.fullname
# logger.debug(
# "ConsoleGrid has no game called `%s` for %s" % (name, console)
# )
# else:
# return response.read()
# except urllib2.URLError as error:
# # Connection was refused. ConsoleGrid may be down, or something bad
# # may have happened
# logger.debug(
# "No image was downloaded due to an error with ConsoleGrid"
# )
#
# def download_image(self, url):
# """
# Downloads the image at 'url' and returns the path to the image on the
# local filesystem
# """
# (path, headers) = urllib.urlretrieve(url)
# return path
#
# def image_for_rom(self, rom):
# image_url = self.find_url_for_rom(rom)
# if image_url is None or image_url == "":
# return None
# return self.download_image(image_url)
#
# Path: ice/gridproviders/local_provider.py
# class LocalProvider(grid_image_provider.GridImageProvider):
#
# def valid_extensions(self):
# return ['.png', '.jpg', '.jpeg', '.tiff']
#
# def image_for_rom(self, rom):
# """
# Checks the filesystem for images for a given ROM. To do so, it makes
# use of a consoles 'images' directory. If it finds an image in that
# directory with the same name as the ROMs name then it will return that.
# """
# img_dir = rom.console.images_directory
# if img_dir == "":
# logger.debug(
# "[%s] No images directory specified for %s" %
# (rom.name, rom.console.shortname)
# )
# return None
# for extension in self.valid_extensions():
# filename = rom.name + extension
# path = os.path.join(img_dir, filename)
# if os.path.isfile(path):
# # We found a valid path, return it
# return path
# # We went through all of the possible filenames for this ROM and a
# # file didnt exist with any of them. There is no image for this ROM in
# # the consoles image directory
# return None
#
# Path: ice/settings.py
# def find_settings_file(name, filesystem):
# def settings_file_path(name, filesystem, override = None):
# def load_configuration(filesystem, override = None):
# def load_emulators(filesystem, override = None):
# def load_consoles(emulators, filesystem, override = None):
# def load_app_settings(filesystem, file_overrides = {}):
# def image_provider(config):
which might include code, classes, or functions. Output only the next line. | result = settings.image_provider(config) |
Predict the next line after this snippet: <|code_start|>
class ConsoleGridProvider(grid_image_provider.GridImageProvider):
@staticmethod
def api_url():
return "http://consolegrid.com/api/top_picture"
@staticmethod
def is_enabled():
# TODO: Return True/False based on the current network status
return True
def consolegrid_top_picture_url(self, rom):
host = self.api_url()
quoted_name = urllib.quote(rom.name)
return "%s?console=%s&game=%s" % (host, rom.console.shortname, quoted_name)
def find_url_for_rom(self, rom):
"""
Determines a suitable grid image for a given ROM by hitting
ConsoleGrid.com
"""
try:
response = urllib2.urlopen(self.consolegrid_top_picture_url(rom))
if response.getcode() == 204:
name = rom.name
console = rom.console.fullname
<|code_end|>
using the current file's imports:
import sys
import os
import urllib
import urllib2
import grid_image_provider
from ice.logs import logger
and any relevant context from other files:
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
. Output only the next line. | logger.debug( |
Using the snippet: <|code_start|>
class ROMsTests(unittest.TestCase):
def setUp(self):
pass
@parameterized.expand([
(None, paths.default_roms_directory()),
("", paths.default_roms_directory()),
('/roms/', '/roms/'),
])
def test_roms_directory(self, config_directory, expected):
config = mock()
config.roms_directory = config_directory
self.assertEqual(roms.roms_directory(config), expected)
@parameterized.expand([
('Banjo Kazoomie', None, 'Banjo Kazoomie'),
('Banjo Kazoomie', '[Vapor]', '[Vapor] Banjo Kazoomie'),
('Game Name', '!Something!', '!Something! Game Name'),
])
def test_rom_shortcut_name(self, name, console_prefix, expected):
<|code_end|>
, determine the next line of code. You have imports:
import os
import shutil
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from pysteam import model as steam_model
from ice import model
from ice import paths
from ice import roms
from testinfra import fixtures
and context (class names, function names, or code) available:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/paths.py
# def application_data_directory():
# def data_file_path(filename):
# def archive_path():
# def log_file_location():
# def default_roms_directory():
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
. Output only the next line. | console = model.Console( |
Based on the snippet: <|code_start|>
class ROMsTests(unittest.TestCase):
def setUp(self):
pass
@parameterized.expand([
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import shutil
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from pysteam import model as steam_model
from ice import model
from ice import paths
from ice import roms
from testinfra import fixtures
and context (classes, functions, sometimes code) from other files:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/paths.py
# def application_data_directory():
# def data_file_path(filename):
# def archive_path():
# def log_file_location():
# def default_roms_directory():
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
. Output only the next line. | (None, paths.default_roms_directory()), |
Predict the next line for this snippet: <|code_start|>
class ROMsTests(unittest.TestCase):
def setUp(self):
pass
@parameterized.expand([
(None, paths.default_roms_directory()),
("", paths.default_roms_directory()),
('/roms/', '/roms/'),
])
def test_roms_directory(self, config_directory, expected):
config = mock()
config.roms_directory = config_directory
<|code_end|>
with the help of current file imports:
import os
import shutil
import tempfile
import unittest
from mockito import *
from nose_parameterized import parameterized
from pysteam import model as steam_model
from ice import model
from ice import paths
from ice import roms
from testinfra import fixtures
and context from other files:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/paths.py
# def application_data_directory():
# def data_file_path(filename):
# def archive_path():
# def log_file_location():
# def default_roms_directory():
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
, which may contain function names, class names, or code. Output only the next line. | self.assertEqual(roms.roms_directory(config), expected) |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# encoding: utf-8
# TODO(#368); This shouldn't be necessary as part of the app. We shouldn't be
# relying on log messages as our UI
class LogAppStateTask(object):
def __call__(self, app_settings, users, dry_run):
for emulator in app_settings.emulators:
<|code_end|>
with the help of current file imports:
from ice.logs import logger
and context from other files:
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
, which may contain function names, class names, or code. Output only the next line. | logger.info("Detected Emulator: %s" % emulator.name) |
Predict the next line after this snippet: <|code_start|> def _create_default_directories(self):
"""This method creates all of the directories that Steam normally creates
for a user."""
# Assert that the userdata directory is there
assert(os.path.exists(self.steam_fixture.get_steam().userdata_directory))
# The data directory for our user, which acts as the root of userdata
# hierarchy
os.mkdir(paths.user_specific_data_directory(self.get_context()))
# The "config" directory, which stores shortcuts.vdf and the grid directory
# TODO: There should probably be a helper function for this in pysteam
os.mkdir(os.path.join(paths.user_specific_data_directory(self.get_context()), "config"))
# The directory which stores grid images
os.mkdir(paths.custom_images_directory(self.get_context()))
def get_user_id(self):
return self.uid
def get_context(self):
return steam_model.LocalUserContext(self.steam_fixture.get_steam(), self.uid)
def DataFixture(data):
"""Creates a pseudo class out of the parameter dictionary `data`, and
populates the object such that calling `object.foo` will return `bar` if the
input dictionary has a key/value pair `"foo": "bar"`."""
# Transform keys to remove invalid characters, like ` `.
assert all([all([c.isalnum() or c=='_' for c in key]) for key in data.keys()]), "All dictionary keys must be valid python variable names"
DataPseudoClass = collections.namedtuple('DataPseudoClass', data.keys())
return DataPseudoClass(**data)
emulators = DataFixture({
<|code_end|>
using the current file's imports:
import collections
import os
import random
import shutil
import tempfile
from pysteam import model as steam_model
from pysteam import paths
from ice import model
from ice import roms
and any relevant context from other files:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
. Output only the next line. | "mednafen": model.Emulator( |
Here is a snippet: <|code_start|>
consoles = DataFixture({
"nes": model.Console(
fullname = 'Nintendo Entertainment System',
shortname = 'NES',
extensions = 'nes',
custom_roms_directory = '',
prefix = '[NES]',
icon = '/consoles/icons/nes.png',
images_directory = '/consoles/grid/nes/',
emulator = emulators.mednafen
),
"snes": model.Console(
fullname = 'Super Nintendo',
shortname = 'SNES',
extensions = 'snes',
custom_roms_directory = '/external/consoles/roms/snes',
prefix = '',
icon = '/consoles/icons/snes.png',
images_directory = '/consoles/grid/snes/',
emulator = emulators.mednafen
),
# HACK: We're cheating a bit here. For a tiny while Ice would automatically
# add an extra category to shortcuts to let itself know whether it should be
# responsibile for removing said shortcut if the exe went missing. We don't
# do this anymore, but we may still want to test things which have the
# special category attached. To work around this, we make a test console
# with a fullname set to the flag, so that the automatically added category
# has the flag tag.
"flagged": model.Console(
<|code_end|>
. Write the next line using the current file imports:
import collections
import os
import random
import shutil
import tempfile
from pysteam import model as steam_model
from pysteam import paths
from ice import model
from ice import roms
and context from other files:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
, which may include functions, classes, or code. Output only the next line. | fullname = roms.ICE_FLAG_TAG, |
Given the code snippet: <|code_start|>#!/usr/bin/env python
# encoding: utf-8
"""
local_provider.py
Created by Scott on 2013-12-26.
Copyright (c) 2013 Scott Rice. All rights reserved.
"""
class LocalProvider(grid_image_provider.GridImageProvider):
def valid_extensions(self):
return ['.png', '.jpg', '.jpeg', '.tiff']
def image_for_rom(self, rom):
"""
Checks the filesystem for images for a given ROM. To do so, it makes
use of a consoles 'images' directory. If it finds an image in that
directory with the same name as the ROMs name then it will return that.
"""
img_dir = rom.console.images_directory
if img_dir == "":
<|code_end|>
, generate the next line using the imports in this file:
import sys
import os
import grid_image_provider
from ice.logs import logger
and context (functions, classes, or occasionally code) from other files:
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
. Output only the next line. | logger.debug( |
Continue the code snippet: <|code_start|># encoding: utf-8
"""
local_provider_tests.py
Created by Scott on 2014-08-18.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
class CombinedProviderTests(unittest.TestCase):
def test_is_enabled_returns_false_when_no_providers_are_enabled(self):
provider1 = mock()
when(provider1).is_enabled().thenReturn(False)
provider2 = mock()
when(provider2).is_enabled().thenReturn(False)
<|code_end|>
. Use current file imports:
import unittest
from mockito import *
from ice.gridproviders.combined_provider import CombinedProvider
and context (classes, functions, or code) from other files:
# Path: ice/gridproviders/combined_provider.py
# class CombinedProvider(grid_image_provider.GridImageProvider):
#
# def __init__(self, *args):
# """
# Creates a CombinedProvider out of the providers that were passed in `args`
#
# ORDER MATTERS. `image_for_rom` will return the first non-None result from
# a provider. So if you want to check the users filesystem but check
# ConsoleGrid if nothing is found then you would do
#
# CombinedProvider(LocalProvider(), ConsoleGridProvider())
#
# But if you wanted to, say, use ConsoleGrid but show a placeholder image in
# the case of an error you would do
#
# CombinedProvider(ConsoleGridProvider(), PlaceholderProvider())
# """
# self.providers = args
#
# def _enabled_providers(self):
# return filter(lambda provider: provider.is_enabled(), self.providers)
#
# def is_enabled(self):
# """
# Returns True if any child provider is enabled
# """
# return len(self._enabled_providers()) > 0
#
# def image_for_rom(self, rom):
# """
# Returns the first image found
# """
# return reduce(lambda image, provider: image if image else provider.image_for_rom(
# rom), self._enabled_providers(), None)
. Output only the next line. | combined_provider = CombinedProvider(provider1, provider2) |
Predict the next line after this snippet: <|code_start|>
class FilesystemTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
<|code_end|>
using the current file's imports:
import os
import shutil
import sys
import tempfile
import unittest
from nose_parameterized import parameterized
from ice import filesystem
and any relevant context from other files:
# Path: ice/filesystem.py
# class RealFilesystem(object):
# class FakeFilesystem(object):
# def create_directories(self, path):
# def path_exists(self, path):
# def is_directory(self, path):
# def is_file(self, path):
# def is_writable(self, path):
# def _paths_in_directory(self, directory, incl_subdirs=False):
# def files_in_directory(self, directory, include_subdirectories=False):
# def subdirectories_of_directory(self, directory, recursive=False):
# def __init__(self, root):
# def adjusted_path(self, path):
# def create_directories(self, path):
# def path_exists(self, path):
# def is_directory(self, path):
# def is_file(self, path):
# def is_writable(self, path):
# def files_in_directory(self, directory, include_subdirectories=False):
# def subdirectories_of_directory(self, directory, recursive=False):
. Output only the next line. | self.filesystem = filesystem.RealFilesystem() |
Predict the next line for this snippet: <|code_start|>
def new(self, backing_store, identifier):
fullname = identifier
shortname = backing_store.get(identifier, 'nickname', fullname)
extensions = backing_store.get(identifier, 'extensions', "")
custom_roms_directory = backing_store.get(identifier, 'roms directory', "")
prefix = backing_store.get(identifier, 'prefix', "")
icon = backing_store.get(identifier, 'icon', "")
images_directory = backing_store.get(identifier, 'images directory', "")
emulator_identifier = backing_store.get(identifier, 'emulator', "")
icon = os.path.expanduser(icon)
custom_roms_directory = os.path.expanduser(custom_roms_directory)
images_directory = os.path.expanduser(images_directory)
emulator = self.emulators.find(emulator_identifier)
return model.Console(
fullname,
shortname,
extensions,
custom_roms_directory,
prefix,
icon,
images_directory,
emulator,
)
def verify(self, console):
if console.emulator is None:
<|code_end|>
with the help of current file imports:
import os
from ice.logs import logger
from ice import model
and context from other files:
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
, which may contain function names, class names, or code. Output only the next line. | logger.debug("No emulator provided for console `%s`" % console.fullname) |
Predict the next line for this snippet: <|code_start|>
class ConsoleBackedObjectAdapter(object):
def __init__(self, emulators):
self.emulators = emulators
def new(self, backing_store, identifier):
fullname = identifier
shortname = backing_store.get(identifier, 'nickname', fullname)
extensions = backing_store.get(identifier, 'extensions', "")
custom_roms_directory = backing_store.get(identifier, 'roms directory', "")
prefix = backing_store.get(identifier, 'prefix', "")
icon = backing_store.get(identifier, 'icon', "")
images_directory = backing_store.get(identifier, 'images directory', "")
emulator_identifier = backing_store.get(identifier, 'emulator', "")
icon = os.path.expanduser(icon)
custom_roms_directory = os.path.expanduser(custom_roms_directory)
images_directory = os.path.expanduser(images_directory)
emulator = self.emulators.find(emulator_identifier)
<|code_end|>
with the help of current file imports:
import os
from ice.logs import logger
from ice import model
and context from other files:
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
#
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
, which may contain function names, class names, or code. Output only the next line. | return model.Console( |
Given the code snippet: <|code_start|>
class ROMFinderTests(unittest.TestCase):
def setUp(self):
self.mock_config = mock()
self.mock_filesystem = mock()
self.mock_parser = mock()
self.rom_finder = rom_finder.ROMFinder(
self.mock_filesystem,
self.mock_parser,
)
def _dummy_console(self, extensions, emu, roms_directory = ""):
<|code_end|>
, generate the next line using the imports in this file:
import os
import shutil
import tempfile
import unittest
from mockito import *
from ice import model
from ice import rom_finder
and context (functions, classes, or occasionally code) from other files:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/rom_finder.py
# class ROMFinder(object):
# def __init__(self, filesystem, parser):
# def rom_for_path(self, console, path):
# def _search(self, roms_directory, console):
# def roms_for_console(self, config, console):
# def roms_for_consoles(self, config, consoles):
. Output only the next line. | return model.Console("Nintendo", "NES", extensions, roms_directory, "", "", "", emu) |
Predict the next line after this snippet: <|code_start|>
class ROMFinderTests(unittest.TestCase):
def setUp(self):
self.mock_config = mock()
self.mock_filesystem = mock()
self.mock_parser = mock()
<|code_end|>
using the current file's imports:
import os
import shutil
import tempfile
import unittest
from mockito import *
from ice import model
from ice import rom_finder
and any relevant context from other files:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/rom_finder.py
# class ROMFinder(object):
# def __init__(self, filesystem, parser):
# def rom_for_path(self, console, path):
# def _search(self, roms_directory, console):
# def roms_for_console(self, config, console):
# def roms_for_consoles(self, config, consoles):
. Output only the next line. | self.rom_finder = rom_finder.ROMFinder( |
Given snippet: <|code_start|>
class SteamGridUpdaterTests(unittest.TestCase):
def setUp(self):
self.steam_fixture = fixtures.SteamFixture()
self.user_fixture = fixtures.UserFixture(self.steam_fixture)
self.mock_provider = mock()
self.updater = steam_grid_updater.SteamGridUpdater(
self.mock_provider,
)
def tearDown(self):
self.user_fixture.tearDown()
self.steam_fixture.tearDown()
def test_updater_sets_image_if_provider_has_one(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import tempfile
import unittest
from mockito import *
from pysteam import grid
from pysteam import shortcuts
from ice import model
from ice import roms
from ice import steam_grid_updater
from testinfra import fixtures
and context:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
#
# Path: ice/steam_grid_updater.py
# class SteamGridUpdater(object):
# def __init__(self, provider):
# def update_rom_artwork(self, user, rom, dry_run=False):
# def update_artwork_for_rom_collection(self, user, roms, dry_run=False):
which might include code, classes, or functions. Output only the next line. | rom = model.ROM(name = 'Game1', path = '/Path/to/game1', console = fixtures.consoles.flagged) |
Based on the snippet: <|code_start|>
class SteamGridUpdaterTests(unittest.TestCase):
def setUp(self):
self.steam_fixture = fixtures.SteamFixture()
self.user_fixture = fixtures.UserFixture(self.steam_fixture)
self.mock_provider = mock()
self.updater = steam_grid_updater.SteamGridUpdater(
self.mock_provider,
)
def tearDown(self):
self.user_fixture.tearDown()
self.steam_fixture.tearDown()
def test_updater_sets_image_if_provider_has_one(self):
rom = model.ROM(name = 'Game1', path = '/Path/to/game1', console = fixtures.consoles.flagged)
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import tempfile
import unittest
from mockito import *
from pysteam import grid
from pysteam import shortcuts
from ice import model
from ice import roms
from ice import steam_grid_updater
from testinfra import fixtures
and context (classes, functions, sometimes code) from other files:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
#
# Path: ice/steam_grid_updater.py
# class SteamGridUpdater(object):
# def __init__(self, provider):
# def update_rom_artwork(self, user, rom, dry_run=False):
# def update_artwork_for_rom_collection(self, user, roms, dry_run=False):
. Output only the next line. | shortcut = roms.rom_to_shortcut(rom) |
Using the snippet: <|code_start|>
class SteamGridUpdaterTests(unittest.TestCase):
def setUp(self):
self.steam_fixture = fixtures.SteamFixture()
self.user_fixture = fixtures.UserFixture(self.steam_fixture)
self.mock_provider = mock()
<|code_end|>
, determine the next line of code. You have imports:
import os
import tempfile
import unittest
from mockito import *
from pysteam import grid
from pysteam import shortcuts
from ice import model
from ice import roms
from ice import steam_grid_updater
from testinfra import fixtures
and context (class names, function names, or code) available:
# Path: ice/model.py
# ROM = collections.namedtuple('ROM', [
# 'name',
# 'path',
# 'console',
# ])
#
# Path: ice/roms.py
# ICE_FLAG_TAG = "~ManagedByIce"
# def roms_directory(config):
# def rom_shortcut_name(rom):
# def rom_to_shortcut(rom):
#
# Path: ice/steam_grid_updater.py
# class SteamGridUpdater(object):
# def __init__(self, provider):
# def update_rom_artwork(self, user, rom, dry_run=False):
# def update_artwork_for_rom_collection(self, user, roms, dry_run=False):
. Output only the next line. | self.updater = steam_grid_updater.SteamGridUpdater( |
Continue the code snippet: <|code_start|>"""
backed_object_manager_tests.py
Created by Scott on 2014-08-20.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
# The fact that this class exists should probably signal that my current API
# isn't perfect...
class BackedObjectBackedObjectAdapter(object):
def __init__(self):
# Verifier that accepts everything by default
self.verifier = lambda obj: True
def new(self, backing_store, identifier):
<|code_end|>
. Use current file imports:
from mockito import *
from ice.persistence.backed_object import BackedObject
from ice.persistence.backed_object_manager import BackedObjectManager
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
import os
import shutil
import tempfile
import unittest
and context (classes, functions, or code) from other files:
# Path: ice/persistence/backed_object.py
# class BackedObject(object):
#
# def __init__(self, backing_store, identifier):
# self.backing_store = backing_store
# self.identifier = identifier
#
# def backed_value(self, key, default=None):
# return self.backing_store.get(self.identifier, key, default)
#
# Path: ice/persistence/backed_object_manager.py
# class BackedObjectManager(object):
#
# def __init__(self, backing_store, model_adapter):
# self.backing_store = backing_store
# self.adapter = model_adapter
# self.managed_objects = {}
#
# def __iter__(self):
# return iter(self.all())
#
# def all(self):
# # Since not all identifiers are guaranteed to return full objects, we
# # filter out any `None` elements before returning
# return filter(None, map(self.find, self.backing_store.identifiers()));
#
# def new(self, identifier):
# obj = self.adapter.new(self.backing_store, identifier)
# if self.adapter.verify(obj):
# return obj
#
# def find(self, identifier):
# if identifier not in self.backing_store.identifiers():
# return None
#
# # See if we have a cached version from before
# if identifier in self.managed_objects:
# return self.managed_objects[identifier]
#
# # If not, create it lazily
# obj = self.new(identifier)
# self.managed_objects[identifier] = obj
# return obj
#
# def set_object_for_identifier(self, obj, identifier):
# self.managed_objects[identifier] = obj
# # Ensure that the identifier exists in the backing store before we ask
# # the adapter to save it
# if not self.backing_store.has_identifier(identifier):
# self.backing_store.add_identifier(identifier)
# # Make the adapter do the actual saving
# self.adapter.save_in_store(self.backing_store, identifier, obj)
#
# Path: ice/persistence/config_file_backing_store.py
# class ConfigFileBackingStore(backing_store.BackingStore):
#
# def __init__(self, path):
# super(ConfigFileBackingStore, self).__init__(path)
# self.configParser = ConfigParser.RawConfigParser()
# self.configParser.read(self.path)
#
# def identifiers(self):
# return self.configParser.sections()
#
# def add_identifier(self, ident):
# try:
# self.configParser.add_section(ident)
# except ConfigParser.DuplicateSectionError:
# raise ValueError("The identifier `%s` already exists" % str(ident))
#
# def remove_identifier(self, ident):
# self.configParser.remove_section(ident)
#
# def keys(self, ident):
# try:
# return self.configParser.options(ident)
# except ConfigParser.NoSectionError:
# raise ValueError("No identifier named `%s` exists" % str(ident))
#
# def get(self, ident, key, default=None):
# try:
# val = self.configParser.get(ident, key.lower())
# return val
# except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# return default
#
# def set(self, ident, key, value):
# self.configParser.set(ident, key.lower(), value)
#
# def save(self):
# try:
# with open(self.path, "w") as configFile:
# self.configParser.write(configFile)
# except IOError:
# raise IOError("Cannot save data to `%s`. Permission Denied")
. Output only the next line. | return BackedObject(backing_store, identifier) |
Predict the next line for this snippet: <|code_start|>Created by Scott on 2014-08-20.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
# The fact that this class exists should probably signal that my current API
# isn't perfect...
class BackedObjectBackedObjectAdapter(object):
def __init__(self):
# Verifier that accepts everything by default
self.verifier = lambda obj: True
def new(self, backing_store, identifier):
return BackedObject(backing_store, identifier)
def verify(self, obj):
return self.verifier(obj)
def save_in_store(self, backing_store, identifier, obj):
pass
class BackedObjectManagerTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.tempfile = os.path.join(self.tempdir, "test.ini")
self.backing_store = ConfigFileBackingStore(self.tempfile)
self.adapter = BackedObjectBackedObjectAdapter()
<|code_end|>
with the help of current file imports:
from mockito import *
from ice.persistence.backed_object import BackedObject
from ice.persistence.backed_object_manager import BackedObjectManager
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
import os
import shutil
import tempfile
import unittest
and context from other files:
# Path: ice/persistence/backed_object.py
# class BackedObject(object):
#
# def __init__(self, backing_store, identifier):
# self.backing_store = backing_store
# self.identifier = identifier
#
# def backed_value(self, key, default=None):
# return self.backing_store.get(self.identifier, key, default)
#
# Path: ice/persistence/backed_object_manager.py
# class BackedObjectManager(object):
#
# def __init__(self, backing_store, model_adapter):
# self.backing_store = backing_store
# self.adapter = model_adapter
# self.managed_objects = {}
#
# def __iter__(self):
# return iter(self.all())
#
# def all(self):
# # Since not all identifiers are guaranteed to return full objects, we
# # filter out any `None` elements before returning
# return filter(None, map(self.find, self.backing_store.identifiers()));
#
# def new(self, identifier):
# obj = self.adapter.new(self.backing_store, identifier)
# if self.adapter.verify(obj):
# return obj
#
# def find(self, identifier):
# if identifier not in self.backing_store.identifiers():
# return None
#
# # See if we have a cached version from before
# if identifier in self.managed_objects:
# return self.managed_objects[identifier]
#
# # If not, create it lazily
# obj = self.new(identifier)
# self.managed_objects[identifier] = obj
# return obj
#
# def set_object_for_identifier(self, obj, identifier):
# self.managed_objects[identifier] = obj
# # Ensure that the identifier exists in the backing store before we ask
# # the adapter to save it
# if not self.backing_store.has_identifier(identifier):
# self.backing_store.add_identifier(identifier)
# # Make the adapter do the actual saving
# self.adapter.save_in_store(self.backing_store, identifier, obj)
#
# Path: ice/persistence/config_file_backing_store.py
# class ConfigFileBackingStore(backing_store.BackingStore):
#
# def __init__(self, path):
# super(ConfigFileBackingStore, self).__init__(path)
# self.configParser = ConfigParser.RawConfigParser()
# self.configParser.read(self.path)
#
# def identifiers(self):
# return self.configParser.sections()
#
# def add_identifier(self, ident):
# try:
# self.configParser.add_section(ident)
# except ConfigParser.DuplicateSectionError:
# raise ValueError("The identifier `%s` already exists" % str(ident))
#
# def remove_identifier(self, ident):
# self.configParser.remove_section(ident)
#
# def keys(self, ident):
# try:
# return self.configParser.options(ident)
# except ConfigParser.NoSectionError:
# raise ValueError("No identifier named `%s` exists" % str(ident))
#
# def get(self, ident, key, default=None):
# try:
# val = self.configParser.get(ident, key.lower())
# return val
# except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# return default
#
# def set(self, ident, key, value):
# self.configParser.set(ident, key.lower(), value)
#
# def save(self):
# try:
# with open(self.path, "w") as configFile:
# self.configParser.write(configFile)
# except IOError:
# raise IOError("Cannot save data to `%s`. Permission Denied")
, which may contain function names, class names, or code. Output only the next line. | self.manager = BackedObjectManager(self.backing_store, self.adapter) |
Given the code snippet: <|code_start|>backed_object_manager_tests.py
Created by Scott on 2014-08-20.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
# The fact that this class exists should probably signal that my current API
# isn't perfect...
class BackedObjectBackedObjectAdapter(object):
def __init__(self):
# Verifier that accepts everything by default
self.verifier = lambda obj: True
def new(self, backing_store, identifier):
return BackedObject(backing_store, identifier)
def verify(self, obj):
return self.verifier(obj)
def save_in_store(self, backing_store, identifier, obj):
pass
class BackedObjectManagerTests(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.tempfile = os.path.join(self.tempdir, "test.ini")
<|code_end|>
, generate the next line using the imports in this file:
from mockito import *
from ice.persistence.backed_object import BackedObject
from ice.persistence.backed_object_manager import BackedObjectManager
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
import os
import shutil
import tempfile
import unittest
and context (functions, classes, or occasionally code) from other files:
# Path: ice/persistence/backed_object.py
# class BackedObject(object):
#
# def __init__(self, backing_store, identifier):
# self.backing_store = backing_store
# self.identifier = identifier
#
# def backed_value(self, key, default=None):
# return self.backing_store.get(self.identifier, key, default)
#
# Path: ice/persistence/backed_object_manager.py
# class BackedObjectManager(object):
#
# def __init__(self, backing_store, model_adapter):
# self.backing_store = backing_store
# self.adapter = model_adapter
# self.managed_objects = {}
#
# def __iter__(self):
# return iter(self.all())
#
# def all(self):
# # Since not all identifiers are guaranteed to return full objects, we
# # filter out any `None` elements before returning
# return filter(None, map(self.find, self.backing_store.identifiers()));
#
# def new(self, identifier):
# obj = self.adapter.new(self.backing_store, identifier)
# if self.adapter.verify(obj):
# return obj
#
# def find(self, identifier):
# if identifier not in self.backing_store.identifiers():
# return None
#
# # See if we have a cached version from before
# if identifier in self.managed_objects:
# return self.managed_objects[identifier]
#
# # If not, create it lazily
# obj = self.new(identifier)
# self.managed_objects[identifier] = obj
# return obj
#
# def set_object_for_identifier(self, obj, identifier):
# self.managed_objects[identifier] = obj
# # Ensure that the identifier exists in the backing store before we ask
# # the adapter to save it
# if not self.backing_store.has_identifier(identifier):
# self.backing_store.add_identifier(identifier)
# # Make the adapter do the actual saving
# self.adapter.save_in_store(self.backing_store, identifier, obj)
#
# Path: ice/persistence/config_file_backing_store.py
# class ConfigFileBackingStore(backing_store.BackingStore):
#
# def __init__(self, path):
# super(ConfigFileBackingStore, self).__init__(path)
# self.configParser = ConfigParser.RawConfigParser()
# self.configParser.read(self.path)
#
# def identifiers(self):
# return self.configParser.sections()
#
# def add_identifier(self, ident):
# try:
# self.configParser.add_section(ident)
# except ConfigParser.DuplicateSectionError:
# raise ValueError("The identifier `%s` already exists" % str(ident))
#
# def remove_identifier(self, ident):
# self.configParser.remove_section(ident)
#
# def keys(self, ident):
# try:
# return self.configParser.options(ident)
# except ConfigParser.NoSectionError:
# raise ValueError("No identifier named `%s` exists" % str(ident))
#
# def get(self, ident, key, default=None):
# try:
# val = self.configParser.get(ident, key.lower())
# return val
# except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# return default
#
# def set(self, ident, key, value):
# self.configParser.set(ident, key.lower(), value)
#
# def save(self):
# try:
# with open(self.path, "w") as configFile:
# self.configParser.write(configFile)
# except IOError:
# raise IOError("Cannot save data to `%s`. Permission Denied")
. Output only the next line. | self.backing_store = ConfigFileBackingStore(self.tempfile) |
Based on the snippet: <|code_start|>#!/usr/bin/env python
# encoding: utf-8
class UpdateGridImagesTask(object):
def __init__(self, rom_finder):
self.rom_finder = rom_finder
def __call__(self, app_settings, users, dry_run):
roms = self.rom_finder.roms_for_consoles(
app_settings.config,
app_settings.consoles,
)
<|code_end|>
, predict the immediate next line with the help of imports:
from ice import settings
from ice import steam_grid_updater
from ice.logs import logger
and context (classes, functions, sometimes code) from other files:
# Path: ice/settings.py
# def find_settings_file(name, filesystem):
# def settings_file_path(name, filesystem, override = None):
# def load_configuration(filesystem, override = None):
# def load_emulators(filesystem, override = None):
# def load_consoles(emulators, filesystem, override = None):
# def load_app_settings(filesystem, file_overrides = {}):
# def image_provider(config):
#
# Path: ice/steam_grid_updater.py
# class SteamGridUpdater(object):
# def __init__(self, provider):
# def update_rom_artwork(self, user, rom, dry_run=False):
# def update_artwork_for_rom_collection(self, user, roms, dry_run=False):
#
# Path: ice/logs.py
# STREAM_STRING_FORMAT = '%(leveltag)s%(message)s'
# FILE_STRING_FORMAT = '%(asctime)s [%(levelname)s][%(filename)s][%(funcName)s:%(lineno)s]: %(message)s'
# def is_test_stack_frame(frame):
# def is_running_in_test():
# def _tag_for_level(self, levelno):
# def filter(self, record):
# def create_stream_handler(level):
# def create_file_handler(level):
# def create_logger():
# class IceLevelTagFilter(logging.Formatter):
. Output only the next line. | provider = settings.image_provider(app_settings.config) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.