Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Based on the snippet: <|code_start|> def __init__(self, character, data):
NONE = 'None'
NOICON = 'inv_misc_questionmark' # The infamous macro 'question mark' icon, because Blizzard uses it in this situation.
self._character = character
self._data = data
self.build = data['build']
self.icon = data.get('icon') if self.build.strip('0') else NOICON
self.name = data['name'] if self.build.strip('0') else NONE
self.selected = data.get('selected', False)
self.glyphs = {
'prime': [],
'major': [],
'minor': [],
}
if 'glyphs' in data:
for type_ in self.glyphs.keys():
self.glyphs[type_] = [Glyph(self, glyph) for glyph in data['glyphs'][type_]]
Tree = collections.namedtuple('Tree', ('points', 'total',))
self.trees = [Tree(**tree) for tree in data['trees']]
def __str__(self):
return self.name + ' (%d/%d/%d)' % tuple(map(operator.attrgetter('total'), self.trees))
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, str(self))
def get_icon_url(self, size='large'):
<|code_end|>
, predict the immediate next line with the help of imports:
import operator
import collections
import datetime
import simplejson as json
import json
from .enums import RACE, CLASS, QUALITY, RACE_TO_FACTION
from .utils import make_icon_url, normalize, make_connection
and context (classes, functions, sometimes code) from other files:
# Path: battlenet/enums.py
# RACE = {
# 1: 'Human',
# 2: 'Orc',
# 3: 'Dwarf',
# 4: 'Night Elf',
# 5: 'Undead',
# 6: 'Tauren',
# 7: 'Gnome',
# 8: 'Troll',
# 9: 'Goblin',
# 10: 'Blood Elf',
# 11: 'Draenei',
# 22: 'Worgen',
# }
#
# CLASS = {
# 1: 'Warrior',
# 2: 'Paladin',
# 3: 'Hunter',
# 4: 'Rogue',
# 5: 'Priest',
# 7: 'Shaman',
# 8: 'Mage',
# 9: 'Warlock',
# 11: 'Druid',
# 6: 'Death Knight',
# }
#
# QUALITY = {
# 1: 'Common',
# 2: 'Uncommon',
# 3: 'Rare',
# 4: 'Epic',
# 5: 'Legendary',
# }
#
# RACE_TO_FACTION = {
# 1: 'Alliance',
# 2: 'Horde',
# 3: 'Alliance',
# 4: 'Alliance',
# 5: 'Horde',
# 6: 'Horde',
# 7: 'Alliance',
# 8: 'Horde',
# 9: 'Horde',
# 10: 'Horde',
# 11: 'Alliance',
# 22: 'Alliance',
# }
#
# Path: battlenet/utils.py
# def make_icon_url(region, icon, size='large'):
# if not icon:
# return ''
#
# if size == 'small':
# size = 18
# else:
# size = 56
#
# return 'http://%s.media.blizzard.com/wow/icons/%d/%s.jpg' % (region, size, icon)
#
# def normalize(name):
# if not isinstance(name, unicode):
# name = name.decode('utf8')
#
# return unicodedata.normalize('NFKC', name).encode('utf8')
#
# def make_connection():
# if not hasattr(make_connection, 'Connection'):
# from .connection import Connection
# make_connection.Connection = Connection
#
# return make_connection.Connection()
. Output only the next line. | return make_icon_url(self._character.region, self.icon, size) |
Based on the snippet: <|code_start|> PROGRESSION, ACHIEVEMENTS]
def __init__(self, region, realm=None, name=None, data=None, fields=None, connection=None):
self.region = region
self.connection = connection or make_connection()
self._fields = set(fields or [])
if realm and name and not data:
data = self.connection.get_character(region, realm, name, raw=True, fields=self._fields)
self._populate_data(data)
def __str__(self):
return self.name
def __repr__(self):
return '<%s: %s@%s>' % (self.__class__.__name__, self.name, self._data['realm'])
def __eq__(self, other):
if not isinstance(other, Character):
return False
return self.connection == other.connection \
and self.name == other.name \
and self.get_realm_name() == other.get_realm_name()
def _populate_data(self, data):
self._data = data
<|code_end|>
, predict the immediate next line with the help of imports:
import operator
import collections
import datetime
import simplejson as json
import json
from .enums import RACE, CLASS, QUALITY, RACE_TO_FACTION
from .utils import make_icon_url, normalize, make_connection
and context (classes, functions, sometimes code) from other files:
# Path: battlenet/enums.py
# RACE = {
# 1: 'Human',
# 2: 'Orc',
# 3: 'Dwarf',
# 4: 'Night Elf',
# 5: 'Undead',
# 6: 'Tauren',
# 7: 'Gnome',
# 8: 'Troll',
# 9: 'Goblin',
# 10: 'Blood Elf',
# 11: 'Draenei',
# 22: 'Worgen',
# }
#
# CLASS = {
# 1: 'Warrior',
# 2: 'Paladin',
# 3: 'Hunter',
# 4: 'Rogue',
# 5: 'Priest',
# 7: 'Shaman',
# 8: 'Mage',
# 9: 'Warlock',
# 11: 'Druid',
# 6: 'Death Knight',
# }
#
# QUALITY = {
# 1: 'Common',
# 2: 'Uncommon',
# 3: 'Rare',
# 4: 'Epic',
# 5: 'Legendary',
# }
#
# RACE_TO_FACTION = {
# 1: 'Alliance',
# 2: 'Horde',
# 3: 'Alliance',
# 4: 'Alliance',
# 5: 'Horde',
# 6: 'Horde',
# 7: 'Alliance',
# 8: 'Horde',
# 9: 'Horde',
# 10: 'Horde',
# 11: 'Alliance',
# 22: 'Alliance',
# }
#
# Path: battlenet/utils.py
# def make_icon_url(region, icon, size='large'):
# if not icon:
# return ''
#
# if size == 'small':
# size = 18
# else:
# size = 56
#
# return 'http://%s.media.blizzard.com/wow/icons/%d/%s.jpg' % (region, size, icon)
#
# def normalize(name):
# if not isinstance(name, unicode):
# name = name.decode('utf8')
#
# return unicodedata.normalize('NFKC', name).encode('utf8')
#
# def make_connection():
# if not hasattr(make_connection, 'Connection'):
# from .connection import Connection
# make_connection.Connection = Connection
#
# return make_connection.Connection()
. Output only the next line. | self.name = normalize(data['name']) |
Continue the code snippet: <|code_start|> LEATHERWORKING = 'Leatherworking'
MINING = 'Mining'
Skinning = 'Skinning'
TAILORING = 'Tailoring'
ARCHAEOLOGY = 'Archaeology'
COOKING = 'Cooking'
FIRST_AID = 'First Aid'
FISHING = 'Fishing'
STATS = 'stats'
TALENTS = 'talents'
ITEMS = 'items'
REPUTATIONS = 'reputation'
TITLES = 'titles'
PROFESSIONS = 'professions'
APPEARANCE = 'appearance'
COMPANIONS = 'companions'
MOUNTS = 'mounts'
GUILD = 'guild'
QUESTS = 'quests'
PETS = 'pets'
PROGRESSION = 'progression'
ACHIEVEMENTS = 'achievements'
ALL_FIELDS = [STATS, TALENTS, ITEMS, REPUTATIONS, TITLES, PROFESSIONS,
APPEARANCE, COMPANIONS, MOUNTS, GUILD, QUESTS, PETS,
PROGRESSION, ACHIEVEMENTS]
def __init__(self, region, realm=None, name=None, data=None, fields=None, connection=None):
self.region = region
<|code_end|>
. Use current file imports:
import operator
import collections
import datetime
import simplejson as json
import json
from .enums import RACE, CLASS, QUALITY, RACE_TO_FACTION
from .utils import make_icon_url, normalize, make_connection
and context (classes, functions, or code) from other files:
# Path: battlenet/enums.py
# RACE = {
# 1: 'Human',
# 2: 'Orc',
# 3: 'Dwarf',
# 4: 'Night Elf',
# 5: 'Undead',
# 6: 'Tauren',
# 7: 'Gnome',
# 8: 'Troll',
# 9: 'Goblin',
# 10: 'Blood Elf',
# 11: 'Draenei',
# 22: 'Worgen',
# }
#
# CLASS = {
# 1: 'Warrior',
# 2: 'Paladin',
# 3: 'Hunter',
# 4: 'Rogue',
# 5: 'Priest',
# 7: 'Shaman',
# 8: 'Mage',
# 9: 'Warlock',
# 11: 'Druid',
# 6: 'Death Knight',
# }
#
# QUALITY = {
# 1: 'Common',
# 2: 'Uncommon',
# 3: 'Rare',
# 4: 'Epic',
# 5: 'Legendary',
# }
#
# RACE_TO_FACTION = {
# 1: 'Alliance',
# 2: 'Horde',
# 3: 'Alliance',
# 4: 'Alliance',
# 5: 'Horde',
# 6: 'Horde',
# 7: 'Alliance',
# 8: 'Horde',
# 9: 'Horde',
# 10: 'Horde',
# 11: 'Alliance',
# 22: 'Alliance',
# }
#
# Path: battlenet/utils.py
# def make_icon_url(region, icon, size='large'):
# if not icon:
# return ''
#
# if size == 'small':
# size = 18
# else:
# size = 56
#
# return 'http://%s.media.blizzard.com/wow/icons/%d/%s.jpg' % (region, size, icon)
#
# def normalize(name):
# if not isinstance(name, unicode):
# name = name.decode('utf8')
#
# return unicodedata.normalize('NFKC', name).encode('utf8')
#
# def make_connection():
# if not hasattr(make_connection, 'Connection'):
# from .connection import Connection
# make_connection.Connection = Connection
#
# return make_connection.Connection()
. Output only the next line. | self.connection = connection or make_connection() |
Based on the snippet: <|code_start|>
@handler('news_story', authed=True)
def get_index(id):
return dict(
<|code_end|>
, predict the immediate next line with the help of imports:
from handler import *
from model import News
and context (classes, functions, sometimes code) from other files:
# Path: model.py
# class News(object):
# creator_id = ForeignKey(Integer, 'User.id')
# headline = Unicode()
# story = Unicode()
# story_markdown = Unicode()
#
# @staticmethod
# def getLast(number=5):
# return transact.query(News).order_by(News.id.desc()).limit(number).all()
. Output only the next line. | story=News.one(id=id) |
Predict the next line for this snippet: <|code_start|>
all = {}
def handler(_tpl=None, _json=False, admin=False, authed=True):
def sub(func):
name = func.func_name
rpc = False
tpl = _tpl
json = _json
if name.startswith('get_'):
name = name[4:]
method = 'GET'
elif name.startswith('post_'):
method = 'POST'
elif name.startswith('rpc_'):
method = 'POST'
rpc = json = True
tpl = None
else:
raise Exception('All handlers must be marked get_ or post_.')
module = func.__module__.split('.')[-1]
if not module in all:
all[module] = DictObject()
setattr(handler, module, all[module])
args = func.__code__.co_varnames[:func.__code__.co_argcount]
hasId = len(args) > 0 and args[0] == 'id' and not rpc
ofunc = func
def func(id=None):
<|code_end|>
with the help of current file imports:
import os
import smtplib
from json import dumps
from flask import abort, render_template, request, session
from flask import redirect as _redirect
from werkzeug.exceptions import HTTPException
from metamodel import createLocalSession, closeLocalSession
from model import User
from urllib import quote, urlencode
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from jinja2 import Environment, FileSystemLoader
and context from other files:
# Path: metamodel.py
# def createLocalSession():
# request._session = scoped_session(sessionmaker())
# request._session.configure(bind=engine)
#
# def closeLocalSession():
# request._session.remove()
#
# Path: model.py
# class User(object):
# enabled = Boolean
# admin = Boolean
# username = Unicode(255)
# password = String(88)
# gold = Integer
# email = String
# email_verified = Boolean
# email_verification = String
# email_notifications = Boolean
# phone_number = String
# phone_verified = Boolean
# phone_verification_code = Integer
# phone_verification_tries = Integer
# phone_notifications = Boolean
# feedback_score = Integer
# feedback_positive = Integer
# feedback_negative = Integer
#
# characters = Character.relation(backref='user')
# news = News.relation(backref='creator')
# jobs = Job.relation(backref='user')
# gold_history = GoldHistory.relation(backref='user')
# feedbacks = Feedback.relation(backref='profile')
#
#
# @staticmethod
# def hash(password):
# salt = ''.join('%02x' % ord(c) for c in os.urandom(24))
# for i in range(1000):
# password = hashlib.sha1(salt + password + salt).hexdigest()
# return salt+password
#
# @staticmethod
# def checkHash(hash, password):
# salt, hash = hash[:48], hash[48:]
# for i in range(1000):
# password = hashlib.sha1(salt + password + salt).hexdigest()
# if password == hash:
# return True
# return False
#
# @staticmethod
# def add(username, password, admin):
# if User.one(enabled=True, username=username):
# return None
# with transact:
# return User.create(
# enabled=True,
# username=username,
# password=User.hash(password),
# admin=admin,
# gold=0,
# phone_number='',
# phone_verified=False,
# phone_notifications=True,
# email='',
# email_verified=False,
# email_notifications=True,
# feedback_score = 0,
# feedback_positive = 0,
# feedback_negative = 0
# )
#
# @staticmethod
# def find(username, password):
# if username == None or password == None:
# return None
# user = User.one(enabled=True, username=username)
# if user and User.checkHash(user.password, password):
# return user
# if not user and len(User.all()) == 0:
# return User.add(username, password, True)
# return None
#
# def change(self, email=None):
# if email != None and email != self.email:
# with transact:
# self.update(
# email=email,
# email_verified=False
# )
# self.generateEmailVerification()
#
# def generateEmailVerification(self):
# from handler import email
# code = ''.join('%02X' % random.randrange(256) for i in range(20))
# with transact:
# self.update(email_verification=code)
# email(self.email, 'verify', code=code)
#
# def addGold(self, amount, price):
# with transact:
# self.update(gold=self.gold+amount)
# GoldHistory.create(
# user=self,
# date=datetime.now(),
# amount=amount,
# balance=self.gold,
# dollars=price,
# job=None,
# desc=u'Bought %i gold for $%.2f' % (amount, price / 100.0)
# )
#
# def sms(self, message):
# if not self.phone_verified:
# return False
#
# sms(self.phone_number, message)
# return True
, which may contain function names, class names, or code. Output only the next line. | createLocalSession() |
Given the code snippet: <|code_start|> params = request.form if method == 'POST' else request.args
kwargs = {}
for i, arg in enumerate(args):
if i == 0 and arg == 'id' and not rpc:
continue
if arg in params:
kwargs[arg] = params[arg]
else:
assert not rpc # RPC requires all arguments.
try:
if hasId and id != None:
ret = ofunc(int(id), **kwargs)
else:
ret = ofunc(**kwargs)
except RedirectException, r:
return _redirect(r.url)
if json:
ret = dumps(ret)
elif tpl != None:
if ret == None:
ret = {}
ret['handler'] = handler
ret['request'] = request
ret['session'] = session
ret['len'] = len
ret = render_template(tpl + '.html', **ret)
csrf = '<input type="hidden" name="csrf" value="%s">' % session['csrf']
ret = ret.replace('$CSRF$', csrf)
<|code_end|>
, generate the next line using the imports in this file:
import os
import smtplib
from json import dumps
from flask import abort, render_template, request, session
from flask import redirect as _redirect
from werkzeug.exceptions import HTTPException
from metamodel import createLocalSession, closeLocalSession
from model import User
from urllib import quote, urlencode
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from jinja2 import Environment, FileSystemLoader
and context (functions, classes, or occasionally code) from other files:
# Path: metamodel.py
# def createLocalSession():
# request._session = scoped_session(sessionmaker())
# request._session.configure(bind=engine)
#
# def closeLocalSession():
# request._session.remove()
#
# Path: model.py
# class User(object):
# enabled = Boolean
# admin = Boolean
# username = Unicode(255)
# password = String(88)
# gold = Integer
# email = String
# email_verified = Boolean
# email_verification = String
# email_notifications = Boolean
# phone_number = String
# phone_verified = Boolean
# phone_verification_code = Integer
# phone_verification_tries = Integer
# phone_notifications = Boolean
# feedback_score = Integer
# feedback_positive = Integer
# feedback_negative = Integer
#
# characters = Character.relation(backref='user')
# news = News.relation(backref='creator')
# jobs = Job.relation(backref='user')
# gold_history = GoldHistory.relation(backref='user')
# feedbacks = Feedback.relation(backref='profile')
#
#
# @staticmethod
# def hash(password):
# salt = ''.join('%02x' % ord(c) for c in os.urandom(24))
# for i in range(1000):
# password = hashlib.sha1(salt + password + salt).hexdigest()
# return salt+password
#
# @staticmethod
# def checkHash(hash, password):
# salt, hash = hash[:48], hash[48:]
# for i in range(1000):
# password = hashlib.sha1(salt + password + salt).hexdigest()
# if password == hash:
# return True
# return False
#
# @staticmethod
# def add(username, password, admin):
# if User.one(enabled=True, username=username):
# return None
# with transact:
# return User.create(
# enabled=True,
# username=username,
# password=User.hash(password),
# admin=admin,
# gold=0,
# phone_number='',
# phone_verified=False,
# phone_notifications=True,
# email='',
# email_verified=False,
# email_notifications=True,
# feedback_score = 0,
# feedback_positive = 0,
# feedback_negative = 0
# )
#
# @staticmethod
# def find(username, password):
# if username == None or password == None:
# return None
# user = User.one(enabled=True, username=username)
# if user and User.checkHash(user.password, password):
# return user
# if not user and len(User.all()) == 0:
# return User.add(username, password, True)
# return None
#
# def change(self, email=None):
# if email != None and email != self.email:
# with transact:
# self.update(
# email=email,
# email_verified=False
# )
# self.generateEmailVerification()
#
# def generateEmailVerification(self):
# from handler import email
# code = ''.join('%02X' % random.randrange(256) for i in range(20))
# with transact:
# self.update(email_verification=code)
# email(self.email, 'verify', code=code)
#
# def addGold(self, amount, price):
# with transact:
# self.update(gold=self.gold+amount)
# GoldHistory.create(
# user=self,
# date=datetime.now(),
# amount=amount,
# balance=self.gold,
# dollars=price,
# job=None,
# desc=u'Bought %i gold for $%.2f' % (amount, price / 100.0)
# )
#
# def sms(self, message):
# if not self.phone_verified:
# return False
#
# sms(self.phone_number, message)
# return True
. Output only the next line. | closeLocalSession() |
Predict the next line after this snippet: <|code_start|> json = _json
if name.startswith('get_'):
name = name[4:]
method = 'GET'
elif name.startswith('post_'):
method = 'POST'
elif name.startswith('rpc_'):
method = 'POST'
rpc = json = True
tpl = None
else:
raise Exception('All handlers must be marked get_ or post_.')
module = func.__module__.split('.')[-1]
if not module in all:
all[module] = DictObject()
setattr(handler, module, all[module])
args = func.__code__.co_varnames[:func.__code__.co_argcount]
hasId = len(args) > 0 and args[0] == 'id' and not rpc
ofunc = func
def func(id=None):
createLocalSession()
if 'csrf' not in session:
token = os.urandom(16)
session['csrf'] = ''.join('%02x' % ord(c) for c in token)
if method == 'POST' and \
('csrf' not in request.form or request.form['csrf'] != session['csrf']):
abort(403)
if 'userId' in session and session['userId']:
<|code_end|>
using the current file's imports:
import os
import smtplib
from json import dumps
from flask import abort, render_template, request, session
from flask import redirect as _redirect
from werkzeug.exceptions import HTTPException
from metamodel import createLocalSession, closeLocalSession
from model import User
from urllib import quote, urlencode
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from jinja2 import Environment, FileSystemLoader
and any relevant context from other files:
# Path: metamodel.py
# def createLocalSession():
# request._session = scoped_session(sessionmaker())
# request._session.configure(bind=engine)
#
# def closeLocalSession():
# request._session.remove()
#
# Path: model.py
# class User(object):
# enabled = Boolean
# admin = Boolean
# username = Unicode(255)
# password = String(88)
# gold = Integer
# email = String
# email_verified = Boolean
# email_verification = String
# email_notifications = Boolean
# phone_number = String
# phone_verified = Boolean
# phone_verification_code = Integer
# phone_verification_tries = Integer
# phone_notifications = Boolean
# feedback_score = Integer
# feedback_positive = Integer
# feedback_negative = Integer
#
# characters = Character.relation(backref='user')
# news = News.relation(backref='creator')
# jobs = Job.relation(backref='user')
# gold_history = GoldHistory.relation(backref='user')
# feedbacks = Feedback.relation(backref='profile')
#
#
# @staticmethod
# def hash(password):
# salt = ''.join('%02x' % ord(c) for c in os.urandom(24))
# for i in range(1000):
# password = hashlib.sha1(salt + password + salt).hexdigest()
# return salt+password
#
# @staticmethod
# def checkHash(hash, password):
# salt, hash = hash[:48], hash[48:]
# for i in range(1000):
# password = hashlib.sha1(salt + password + salt).hexdigest()
# if password == hash:
# return True
# return False
#
# @staticmethod
# def add(username, password, admin):
# if User.one(enabled=True, username=username):
# return None
# with transact:
# return User.create(
# enabled=True,
# username=username,
# password=User.hash(password),
# admin=admin,
# gold=0,
# phone_number='',
# phone_verified=False,
# phone_notifications=True,
# email='',
# email_verified=False,
# email_notifications=True,
# feedback_score = 0,
# feedback_positive = 0,
# feedback_negative = 0
# )
#
# @staticmethod
# def find(username, password):
# if username == None or password == None:
# return None
# user = User.one(enabled=True, username=username)
# if user and User.checkHash(user.password, password):
# return user
# if not user and len(User.all()) == 0:
# return User.add(username, password, True)
# return None
#
# def change(self, email=None):
# if email != None and email != self.email:
# with transact:
# self.update(
# email=email,
# email_verified=False
# )
# self.generateEmailVerification()
#
# def generateEmailVerification(self):
# from handler import email
# code = ''.join('%02X' % random.randrange(256) for i in range(20))
# with transact:
# self.update(email_verification=code)
# email(self.email, 'verify', code=code)
#
# def addGold(self, amount, price):
# with transact:
# self.update(gold=self.gold+amount)
# GoldHistory.create(
# user=self,
# date=datetime.now(),
# amount=amount,
# balance=self.gold,
# dollars=price,
# job=None,
# desc=u'Bought %i gold for $%.2f' % (amount, price / 100.0)
# )
#
# def sms(self, message):
# if not self.phone_verified:
# return False
#
# sms(self.phone_number, message)
# return True
. Output only the next line. | session.user = User.one(id=int(session['userId'])) |
Predict the next line for this snippet: <|code_start|> return phone_number
user = User.one(id=id)
if not user or user != session.user: abort(403)
phone_number = normalize(phone_number)
if phone_number != '' and len(phone_number) != 10:
redirect(get_index.url(id, error='Invalid phone number'))
elif email != '' and (u'@' not in email or '\n' in email or '\r' in email or ',' in email):
redirect(get_index.url(id, error='Invalid email'))
with transact:
user.update(
phone_notifications=phone_notify == u'on',
email_notifications=email_notify == u'on'
)
if phone_number != user.phone_number:
user.update(
phone_number=phone_number,
phone_verified=False
)
user.change(email=email)
redirect(get_index.url(id))
def generatePhoneVerification():
with transact:
session.user.update(
phone_verification_code=random.randrange(1, 1000000),
phone_verification_tries=0
)
<|code_end|>
with the help of current file imports:
import random, re
import markdown2
from sms import sms
from handler import *
from model import *
from datetime import datetime
and context from other files:
# Path: sms.py
# def sms(number, message):
# client = TwilioRestClient(account, token)
# if number[0] != '+':
# if number[0] == '1':
# number = '+' + number
# else:
# assert len(number) == 10
# number = '+1' + number
# client.sms.messages.create(to=number, from_=from_number, body=message)
, which may contain function names, class names, or code. Output only the next line. | sms(session.user.phone_number, 'Quest Companions verification code: %06i' % session.user.phone_verification_code) |
Given snippet: <|code_start|> help='number of levels to recurse thru, defaults to infinite',
dest='maxrecurselevel', default=defaults.maxrecurselevel)
addarg('-k', '--skip', nargs='?', default=[],
help='steps to skip, can be any combination of: ' + ' '.join(
'='.join((k, v)) for k, v in SkippableSteps.items()), dest='skip')
addarg('-C', '--useCaches',
help='recompute cached intermediate results', action="store_true")
addarg('-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
addarg('-V', '--version', help='report version and exit',
default=False, action="store_true")
addarg('--calledFromCmdline',
help='signals that script is run from commandline', default=True)
return parser.parse_args()
def getFileList(dirName):
# todo replace this section in task formDictionary
allpdffname='allpdfnames.txt'
allpdfpath = pathjoin(dirName,allpdffname)
if not ut.exists(allpdfpath):
if 1: # until resolve urllib2 code below
# todo either use allpdfnames file in place,
# or run all symlinking as a separate pass
allpdfpath = ut.Resource(appname, 'static/'+allpdffname).path()
allpdfLink = pathjoin(dirName,allpdffname)
try:
if not ut.exists(allpdfLink):
os.symlink(allpdfpath, allpdfLink)
except Exception as e:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
which might include code, classes, or functions. Output only the next line. | log.warn('cannot symlink %s to %s because %s, copying instead'%( |
Predict the next line for this snippet: <|code_start|>from __future__ import print_function, absolute_import
RecurseInfinitely = -1
RecursionRootLevel = 0
SkippableSteps = (
ut.ChainablyUpdatableOrderedDict()
(x='xfaParsing')
(m='mathParsing')
(r='referenceParsing')
(d='databaseOutput')
(h='htmlOutput')
(c='cleanupFiles')) # remove intermediate files
<|code_end|>
with the help of current file imports:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
, which may contain function names, class names, or code. Output only the next line. | defaults = Bag(dict( |
Given the code snippet: <|code_start|> args = None
if overrideArgs.get('readCmdlineArgs'):
args = parseCmdline()
cfg.update((args, lambda x: x.__dict__.items()))
if cfg.version:
print(appversion)
sys.exit()
for k, v in overrideArgs.items():
cfg[k] = v
if not cfg.quiet and args is not None:
logg('commandlineArgs:' + str(args), [ut.stdout])
if cfg.quiet:
ut.quiet = True
if cfg.debug:
cfg.loglevel = 'DEBUG'
cfg.verbose = True
if 'steps' in cfg:
if cfg.steps and len(cfg.steps[0]) > 1:
assert len(cfg.steps) == 1
cfg.steps = cfg.steps[0].split()
else:
cfg.steps = [step for step in SkippableSteps if step not in cfg.skip]
if cfg.formyear is None:
cfg.formyear = cfg.latestTaxYear
dirName = cfg.dirName
if cfg.rootForms:
rootForms = [f.strip() for f in cfg.rootForms.split(',')]
else:
rootForms = []
logname = setLogname(rootForms, cfg)
<|code_end|>
, generate the next line using the imports in this file:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context (functions, classes, or occasionally code) from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
. Output only the next line. | loginfo = setupLogging(logname, cfg) |
Predict the next line for this snippet: <|code_start|> ' which are used only when serving html files'%(
staticDir, cfg.staticRoot, e))
def setupStaticDir(dirName):
cfg.staticRoot = cfg.staticRoot \
if cfg.staticRoot.startswith('/') \
else pathjoin(dirName, cfg.staticRoot)
cfg.pdfDir = pathjoin(cfg.staticRoot, 'pdf')
cfg.svgDir = pathjoin(cfg.staticRoot, 'svg')
copyStaticDir(appname)
ut.ensure_dir(cfg.pdfDir)
ut.ensure_dir(cfg.svgDir)
def setup(**overrideArgs):
# note formyear will default to latestTaxYear even if dirName=='2014'
global alreadySetup
if alreadySetup:
return
args = None
if overrideArgs.get('readCmdlineArgs'):
args = parseCmdline()
cfg.update((args, lambda x: x.__dict__.items()))
if cfg.version:
print(appversion)
sys.exit()
for k, v in overrideArgs.items():
cfg[k] = v
if not cfg.quiet and args is not None:
<|code_end|>
with the help of current file imports:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
, which may contain function names, class names, or code. Output only the next line. | logg('commandlineArgs:' + str(args), [ut.stdout]) |
Here is a snippet: <|code_start|> if 1: # until resolve urllib2 code below
# todo either use allpdfnames file in place,
# or run all symlinking as a separate pass
allpdfpath = ut.Resource(appname, 'static/'+allpdffname).path()
allpdfLink = pathjoin(dirName,allpdffname)
try:
if not ut.exists(allpdfLink):
os.symlink(allpdfpath, allpdfLink)
except Exception as e:
log.warn('cannot symlink %s to %s because %s, copying instead'%(
allpdfpath, allpdfLink, e, ))
shutil.copy(allpdfpath,allpdfLink)
elif not cfg.okToDownload:
msg = 'allPdfNames file [%s] not found but dontDownload' % (
allpdfpath)
raise Exception(msg)
else:
# todo why did this stop working? my own env?
try:
# could use https://www.irs.gov/pub/irs-pdf/pdfnames.txt but
# this way we avoid errors in that file
fin = urlopen('https://www.irs.gov/pub/irs-pdf/', 'rb')
if fin.getcode() != 200:
raise Exception('getFileList/urlopen/getcode=[%d]' % (
fin.getcode(), ))
allpdffiles_html = fin.read()
fin.close()
allpdfnames = re.findall(r'f[\w\d-]+\.pdf', allpdffiles_html)
allpdfnames = ut.uniqify(sorted(allpdfnames))
with open(allpdfpath, 'w') as f:
<|code_end|>
. Write the next line using the current file imports:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
, which may include functions, classes, or code. Output only the next line. | f.write(NL.join(allpdfnames)) |
Based on the snippet: <|code_start|> addarg('-v', '--verbose',
help='log more [only affects doctests]', action="store_true")
addarg('-g', '--debug', help='debug', action="store_true")
addarg('-D', '--dontDownload', dest='okToDownload',
help='report error for forms not present locally',
action="store_false")
addarg('-q', '--quiet', help='suppress stdout',
action="store_true")
addarg('-r', '--recurse',
help='recurse thru all referenced forms', action="store_true")
addarg('-R', '--recurselevel', type=int,
help='number of levels to recurse thru, defaults to infinite',
dest='maxrecurselevel', default=defaults.maxrecurselevel)
addarg('-k', '--skip', nargs='?', default=[],
help='steps to skip, can be any combination of: ' + ' '.join(
'='.join((k, v)) for k, v in SkippableSteps.items()), dest='skip')
addarg('-C', '--useCaches',
help='recompute cached intermediate results', action="store_true")
addarg('-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
addarg('-V', '--version', help='report version and exit',
default=False, action="store_true")
addarg('--calledFromCmdline',
help='signals that script is run from commandline', default=True)
return parser.parse_args()
def getFileList(dirName):
# todo replace this section in task formDictionary
allpdffname='allpdfnames.txt'
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context (classes, functions, sometimes code) from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
. Output only the next line. | allpdfpath = pathjoin(dirName,allpdffname) |
Given the following code snippet before the placeholder: <|code_start|> action="store_false")
addarg('-q', '--quiet', help='suppress stdout',
action="store_true")
addarg('-r', '--recurse',
help='recurse thru all referenced forms', action="store_true")
addarg('-R', '--recurselevel', type=int,
help='number of levels to recurse thru, defaults to infinite',
dest='maxrecurselevel', default=defaults.maxrecurselevel)
addarg('-k', '--skip', nargs='?', default=[],
help='steps to skip, can be any combination of: ' + ' '.join(
'='.join((k, v)) for k, v in SkippableSteps.items()), dest='skip')
addarg('-C', '--useCaches',
help='recompute cached intermediate results', action="store_true")
addarg('-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
addarg('-V', '--version', help='report version and exit',
default=False, action="store_true")
addarg('--calledFromCmdline',
help='signals that script is run from commandline', default=True)
return parser.parse_args()
def getFileList(dirName):
# todo replace this section in task formDictionary
allpdffname='allpdfnames.txt'
allpdfpath = pathjoin(dirName,allpdffname)
if not ut.exists(allpdfpath):
if 1: # until resolve urllib2 code below
# todo either use allpdfnames file in place,
# or run all symlinking as a separate pass
<|code_end|>
, predict the next line using imports from the current file:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context including class names, function names, and sometimes code from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
. Output only the next line. | allpdfpath = ut.Resource(appname, 'static/'+allpdffname).path() |
Here is a snippet: <|code_start|> try:
shutil.copytree(staticDir, cfg.staticRoot)
except Exception as e:
log.warn('cannot copy %s to %s because %s,'
' continuing without static files,'
' which are used only when serving html files'%(
staticDir, cfg.staticRoot, e))
def setupStaticDir(dirName):
cfg.staticRoot = cfg.staticRoot \
if cfg.staticRoot.startswith('/') \
else pathjoin(dirName, cfg.staticRoot)
cfg.pdfDir = pathjoin(cfg.staticRoot, 'pdf')
cfg.svgDir = pathjoin(cfg.staticRoot, 'svg')
copyStaticDir(appname)
ut.ensure_dir(cfg.pdfDir)
ut.ensure_dir(cfg.svgDir)
def setup(**overrideArgs):
# note formyear will default to latestTaxYear even if dirName=='2014'
global alreadySetup
if alreadySetup:
return
args = None
if overrideArgs.get('readCmdlineArgs'):
args = parseCmdline()
cfg.update((args, lambda x: x.__dict__.items()))
if cfg.version:
<|code_end|>
. Write the next line using the current file imports:
import os
import os.path
import re
import shutil
import sys
import shutil
import doctest
from argparse import ArgumentParser
from os.path import isfile, join as joinpath
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from . import ut
from .ut import log, Bag, setupLogging, logg, NL, pathjoin
from .version import appname, appversion
from .Form import Form
and context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/version.py
, which may include functions, classes, or code. Output only the next line. | print(appversion) |
Next line prediction: <|code_start|>#! /usr/bin/env python
# 'Otf' below [eg TestOtfSteps] is 'OpenTaxForms'
from __future__ import print_function
def checkDependencies():
# todo read dependencies from a central location
dependencies = ['pdf2svg']
missingDeps = []
for dep in dependencies:
try:
check_output(dep)
except OSError:
missingDeps.append(dep)
except:
pass
if missingDeps:
raise Exception('missing dependencies: ' + str(missingDeps) + '; see README.')
class TestOtfBase(object):
def setup_method(self, _):
dirName = 'forms'
formDirName = osp.join(dirName, 'static', 'pdf')
theForm = 'f1040.pdf'
<|code_end|>
. Use current file imports:
(import os
import os.path as osp
import shutil
import filecmp
import json
import sys
from subprocess import check_output
from opentaxforms import ut
from opentaxforms import main as otf
from opentaxforms.serve import createApp)
and context including class names, function names, or small code snippets from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
. Output only the next line. | dbpath='sqlite:///'+ut.Resource('test','opentaxforms.sqlite3').path() |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
from __future__ import print_function, absolute_import
def createApi(app,**kw):
db = SQLAlchemy(app)
<|code_end|>
with the help of current file imports:
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
and context from other files:
# Path: opentaxforms/db.py
# def connect(appname, **kw):
# # default values
# user = pw = 'user'
# dbname = appname.lower()
# # optionally override defaults
# user = os.environ.get(appname.upper() + '_DBUSER', user)
# pw = os.environ.get(appname.upper() + '_DBPASS', pw)
# dbname = os.environ.get(appname.upper() + '_DBNAME', dbname)
# global conn
# conn, engine, metadata, md = connect_(user=user, pw=pw, db=dbname, **kw)
# return conn, engine, metadata, md
#
# Path: opentaxforms/version.py
#
# Path: opentaxforms/ut.py
# class Bag(object):
#
# # after alexMartelli at http://stackoverflow.com/questions/2597278
# def __init__(self, *maps, **kw):
# '''
# >>> b=Bag(a=0)
# >>> b.a=1
# >>> b.b=0
# >>> c=Bag(b)
# '''
# for mapp in maps:
# getdict = None
# if type(mapp) == dict:
# getdict = lambda x: x
# # def getdict(x): return x
# elif type(mapp) == Bag:
# getdict = lambda x: x.__dict__
# # def getdict(x): return x.__dict__
# elif type(mapp) == tuple:
# mapp, getdict = mapp
# if getdict is not None:
# self.__dict__.update(getdict(mapp))
# else:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# self.__dict__.update(kw)
#
# def _getGetitems(self, mapp):
# if type(mapp) == tuple:
# mapp, getitems = mapp
# else:
# getitems = lambda m: m.items()
# # def getitems(m): return m.items()
# return mapp, getitems
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, val):
# self.__dict__[key] = val
#
# def __len__(self):
# return len(self.__dict__)
#
# def __call__(self, *keys):
# '''slicing interface
# gimmicky but useful, and doesnt pollute key namespace
# >>> b=Bag(a=1,b=2)
# >>> assert b('a','b')==(1,2)
# '''
# return tuple(self.__dict__[k] for k in keys)
#
# def clear(self):
# self.__dict__={}
#
# def update(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b.update(Bag(a=1,b=1,c=0))
# Bag({'a': 1, 'b': 1, 'c': 0})
# '''
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# return self
#
# def __add__(self, *maps):
# self.__iadd__(*maps)
# return self
#
# def __iadd__(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b+=Bag(a=1,b=1,c=0)
# >>> assert b('a','b','c')==(2,3,0)
# >>> b=Bag(a='1',b='2')
# >>> b+=Bag(a='1',b='1',c='0')
# >>> assert b('a','b','c')==('11','21','0')
# '''
# # todo error for empty maps[0]
# zero = type(list(maps[0].values())[0])()
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__.setdefault(k, zero)
# self.__dict__[k] += v
# return self
#
# def __iter__(self):
# return self.iterkeys()
#
# def iterkeys(self):
# return iter(self.__dict__.keys())
#
# def keys(self):
# return self.__dict__.keys()
#
# def values(self):
# return self.__dict__.values()
#
# def items(self):
# return self.__dict__.items()
#
# def iteritems(self):
# return self.__dict__.iteritems()
#
# def get(self, key, dflt=None):
# return self.__dict__.get(key, dflt)
#
# def __str__(self):
# return 'Bag(' + pf(self.__dict__) + ')'
#
# def __repr__(self):
# return self.__str__()
, which may contain function names, class names, or code. Output only the next line. | conn, engine, metadata, md = connect(appname, **kw) |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
from __future__ import print_function, absolute_import
def createApi(app,**kw):
db = SQLAlchemy(app)
<|code_end|>
with the help of current file imports:
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
and context from other files:
# Path: opentaxforms/db.py
# def connect(appname, **kw):
# # default values
# user = pw = 'user'
# dbname = appname.lower()
# # optionally override defaults
# user = os.environ.get(appname.upper() + '_DBUSER', user)
# pw = os.environ.get(appname.upper() + '_DBPASS', pw)
# dbname = os.environ.get(appname.upper() + '_DBNAME', dbname)
# global conn
# conn, engine, metadata, md = connect_(user=user, pw=pw, db=dbname, **kw)
# return conn, engine, metadata, md
#
# Path: opentaxforms/version.py
#
# Path: opentaxforms/ut.py
# class Bag(object):
#
# # after alexMartelli at http://stackoverflow.com/questions/2597278
# def __init__(self, *maps, **kw):
# '''
# >>> b=Bag(a=0)
# >>> b.a=1
# >>> b.b=0
# >>> c=Bag(b)
# '''
# for mapp in maps:
# getdict = None
# if type(mapp) == dict:
# getdict = lambda x: x
# # def getdict(x): return x
# elif type(mapp) == Bag:
# getdict = lambda x: x.__dict__
# # def getdict(x): return x.__dict__
# elif type(mapp) == tuple:
# mapp, getdict = mapp
# if getdict is not None:
# self.__dict__.update(getdict(mapp))
# else:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# self.__dict__.update(kw)
#
# def _getGetitems(self, mapp):
# if type(mapp) == tuple:
# mapp, getitems = mapp
# else:
# getitems = lambda m: m.items()
# # def getitems(m): return m.items()
# return mapp, getitems
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, val):
# self.__dict__[key] = val
#
# def __len__(self):
# return len(self.__dict__)
#
# def __call__(self, *keys):
# '''slicing interface
# gimmicky but useful, and doesnt pollute key namespace
# >>> b=Bag(a=1,b=2)
# >>> assert b('a','b')==(1,2)
# '''
# return tuple(self.__dict__[k] for k in keys)
#
# def clear(self):
# self.__dict__={}
#
# def update(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b.update(Bag(a=1,b=1,c=0))
# Bag({'a': 1, 'b': 1, 'c': 0})
# '''
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# return self
#
# def __add__(self, *maps):
# self.__iadd__(*maps)
# return self
#
# def __iadd__(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b+=Bag(a=1,b=1,c=0)
# >>> assert b('a','b','c')==(2,3,0)
# >>> b=Bag(a='1',b='2')
# >>> b+=Bag(a='1',b='1',c='0')
# >>> assert b('a','b','c')==('11','21','0')
# '''
# # todo error for empty maps[0]
# zero = type(list(maps[0].values())[0])()
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__.setdefault(k, zero)
# self.__dict__[k] += v
# return self
#
# def __iter__(self):
# return self.iterkeys()
#
# def iterkeys(self):
# return iter(self.__dict__.keys())
#
# def keys(self):
# return self.__dict__.keys()
#
# def values(self):
# return self.__dict__.values()
#
# def items(self):
# return self.__dict__.items()
#
# def iteritems(self):
# return self.__dict__.iteritems()
#
# def get(self, key, dflt=None):
# return self.__dict__.get(key, dflt)
#
# def __str__(self):
# return 'Bag(' + pf(self.__dict__) + ')'
#
# def __repr__(self):
# return self.__str__()
, which may contain function names, class names, or code. Output only the next line. | conn, engine, metadata, md = connect(appname, **kw) |
Given the code snippet: <|code_start|> # todo should flask_restless need __tablename__?
__tablename__=str(tabl),
)
attrs.update(dict(
orgn=dict(
form=db.relationship('Form'),
),
form=dict(
orgn=db.relationship('Orgn', back_populates='form'),
slot=db.relationship('Slot', back_populates='form'),
),
slot=dict(
form=db.relationship('Form'),
),
)[tabl])
tablcls = type(str(tabl).capitalize(), (Base, ), attrs)
colsToAdd = dict(
orgn=(),
form=(
'orgn', 'orgn.code',
),
slot=(
'form', 'form.code',
),
)[tabl]
colsToShow = [c.name for c in tablobj.columns]
colsToShow.extend(colsToAdd)
# print tabl,colsToShow
apimanager.create_api(
tablcls,
<|code_end|>
, generate the next line using the imports in this file:
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
and context (functions, classes, or occasionally code) from other files:
# Path: opentaxforms/db.py
# def connect(appname, **kw):
# # default values
# user = pw = 'user'
# dbname = appname.lower()
# # optionally override defaults
# user = os.environ.get(appname.upper() + '_DBUSER', user)
# pw = os.environ.get(appname.upper() + '_DBPASS', pw)
# dbname = os.environ.get(appname.upper() + '_DBNAME', dbname)
# global conn
# conn, engine, metadata, md = connect_(user=user, pw=pw, db=dbname, **kw)
# return conn, engine, metadata, md
#
# Path: opentaxforms/version.py
#
# Path: opentaxforms/ut.py
# class Bag(object):
#
# # after alexMartelli at http://stackoverflow.com/questions/2597278
# def __init__(self, *maps, **kw):
# '''
# >>> b=Bag(a=0)
# >>> b.a=1
# >>> b.b=0
# >>> c=Bag(b)
# '''
# for mapp in maps:
# getdict = None
# if type(mapp) == dict:
# getdict = lambda x: x
# # def getdict(x): return x
# elif type(mapp) == Bag:
# getdict = lambda x: x.__dict__
# # def getdict(x): return x.__dict__
# elif type(mapp) == tuple:
# mapp, getdict = mapp
# if getdict is not None:
# self.__dict__.update(getdict(mapp))
# else:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# self.__dict__.update(kw)
#
# def _getGetitems(self, mapp):
# if type(mapp) == tuple:
# mapp, getitems = mapp
# else:
# getitems = lambda m: m.items()
# # def getitems(m): return m.items()
# return mapp, getitems
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, val):
# self.__dict__[key] = val
#
# def __len__(self):
# return len(self.__dict__)
#
# def __call__(self, *keys):
# '''slicing interface
# gimmicky but useful, and doesnt pollute key namespace
# >>> b=Bag(a=1,b=2)
# >>> assert b('a','b')==(1,2)
# '''
# return tuple(self.__dict__[k] for k in keys)
#
# def clear(self):
# self.__dict__={}
#
# def update(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b.update(Bag(a=1,b=1,c=0))
# Bag({'a': 1, 'b': 1, 'c': 0})
# '''
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# return self
#
# def __add__(self, *maps):
# self.__iadd__(*maps)
# return self
#
# def __iadd__(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b+=Bag(a=1,b=1,c=0)
# >>> assert b('a','b','c')==(2,3,0)
# >>> b=Bag(a='1',b='2')
# >>> b+=Bag(a='1',b='1',c='0')
# >>> assert b('a','b','c')==('11','21','0')
# '''
# # todo error for empty maps[0]
# zero = type(list(maps[0].values())[0])()
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__.setdefault(k, zero)
# self.__dict__[k] += v
# return self
#
# def __iter__(self):
# return self.iterkeys()
#
# def iterkeys(self):
# return iter(self.__dict__.keys())
#
# def keys(self):
# return self.__dict__.keys()
#
# def values(self):
# return self.__dict__.values()
#
# def items(self):
# return self.__dict__.items()
#
# def iteritems(self):
# return self.__dict__.iteritems()
#
# def get(self, key, dflt=None):
# return self.__dict__.get(key, dflt)
#
# def __str__(self):
# return 'Bag(' + pf(self.__dict__) + ')'
#
# def __repr__(self):
# return self.__str__()
. Output only the next line. | url_prefix='/api/v%s' % (apiVersion, ), |
Using the snippet: <|code_start|> colsToShow = [c.name for c in tablobj.columns]
colsToShow.extend(colsToAdd)
# print tabl,colsToShow
apimanager.create_api(
tablcls,
url_prefix='/api/v%s' % (apiVersion, ),
include_columns=colsToShow,
)
return counts
def parseCmdline():
'''Load command line arguments'''
parser = ArgumentParser(
description='Automates tax forms'
' and provides an API for new tax form interfaces'
)
parser.add_argument(
'-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
return parser.parse_args()
def createApp(**kw):
cmdline = kw.get('cmdline')
verbose = kw.get('verbose')
if 'cmdline' in kw:
del kw['cmdline']
if 'verbose' in kw:
del kw['verbose']
<|code_end|>
, determine the next line of code. You have imports:
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
and context (class names, function names, or code) available:
# Path: opentaxforms/db.py
# def connect(appname, **kw):
# # default values
# user = pw = 'user'
# dbname = appname.lower()
# # optionally override defaults
# user = os.environ.get(appname.upper() + '_DBUSER', user)
# pw = os.environ.get(appname.upper() + '_DBPASS', pw)
# dbname = os.environ.get(appname.upper() + '_DBNAME', dbname)
# global conn
# conn, engine, metadata, md = connect_(user=user, pw=pw, db=dbname, **kw)
# return conn, engine, metadata, md
#
# Path: opentaxforms/version.py
#
# Path: opentaxforms/ut.py
# class Bag(object):
#
# # after alexMartelli at http://stackoverflow.com/questions/2597278
# def __init__(self, *maps, **kw):
# '''
# >>> b=Bag(a=0)
# >>> b.a=1
# >>> b.b=0
# >>> c=Bag(b)
# '''
# for mapp in maps:
# getdict = None
# if type(mapp) == dict:
# getdict = lambda x: x
# # def getdict(x): return x
# elif type(mapp) == Bag:
# getdict = lambda x: x.__dict__
# # def getdict(x): return x.__dict__
# elif type(mapp) == tuple:
# mapp, getdict = mapp
# if getdict is not None:
# self.__dict__.update(getdict(mapp))
# else:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# self.__dict__.update(kw)
#
# def _getGetitems(self, mapp):
# if type(mapp) == tuple:
# mapp, getitems = mapp
# else:
# getitems = lambda m: m.items()
# # def getitems(m): return m.items()
# return mapp, getitems
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, val):
# self.__dict__[key] = val
#
# def __len__(self):
# return len(self.__dict__)
#
# def __call__(self, *keys):
# '''slicing interface
# gimmicky but useful, and doesnt pollute key namespace
# >>> b=Bag(a=1,b=2)
# >>> assert b('a','b')==(1,2)
# '''
# return tuple(self.__dict__[k] for k in keys)
#
# def clear(self):
# self.__dict__={}
#
# def update(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b.update(Bag(a=1,b=1,c=0))
# Bag({'a': 1, 'b': 1, 'c': 0})
# '''
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__[k] = v
# return self
#
# def __add__(self, *maps):
# self.__iadd__(*maps)
# return self
#
# def __iadd__(self, *maps):
# '''
# >>> b=Bag(a=1,b=2)
# >>> b+=Bag(a=1,b=1,c=0)
# >>> assert b('a','b','c')==(2,3,0)
# >>> b=Bag(a='1',b='2')
# >>> b+=Bag(a='1',b='1',c='0')
# >>> assert b('a','b','c')==('11','21','0')
# '''
# # todo error for empty maps[0]
# zero = type(list(maps[0].values())[0])()
# for mapp in maps:
# mapp, getitems = self._getGetitems(mapp)
# for k, v in getitems(mapp):
# self.__dict__.setdefault(k, zero)
# self.__dict__[k] += v
# return self
#
# def __iter__(self):
# return self.iterkeys()
#
# def iterkeys(self):
# return iter(self.__dict__.keys())
#
# def keys(self):
# return self.__dict__.keys()
#
# def values(self):
# return self.__dict__.values()
#
# def items(self):
# return self.__dict__.items()
#
# def iteritems(self):
# return self.__dict__.iteritems()
#
# def get(self, key, dflt=None):
# return self.__dict__.get(key, dflt)
#
# def __str__(self):
# return 'Bag(' + pf(self.__dict__) + ')'
#
# def __repr__(self):
# return self.__str__()
. Output only the next line. | args = parseCmdline() if cmdline else Bag(dict(postgres=False)) |
Given the code snippet: <|code_start|>
def normalize(s):
if isinstance(s, six.binary_type):
s = s.decode('utf8')
# replace each whitespace string with a single space
return re.sub(r'\s+', ' ', s)
def condtopy(cond):
'''
>>> condtopy('line 2 is more than line 1')
'line2>line1'
'''
delim = ' is more than '
if delim in cond:
lh, rh = cond.split(delim, 1)
lh = lh.replace(' ', '')
rh = rh.replace(' ', '')
return '%s>%s' % (lh, rh)
raise Exception('dunno condition [%s]' % (cond, ))
def lineOrRange(s, pg, fieldsByLine, col=None):
'''
todo add enough of fieldsByLine arg to make these work
#>>> lineOrRange('46','1')
#['line46']
#>>> lineOrRange('56 through 62','1')
#['line57', 'line62', 'line60b', ..., 'line61', 'line56', 'line58']
'''
<|code_end|>
, generate the next line using the imports in this file:
import re
import six
from . import ut, irs
from .ut import log, jj, numerify
and context (functions, classes, or occasionally code) from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
. Output only the next line. | log.debug('+lineOrRange s=%s col=%s', s, col) |
Using the snippet: <|code_start|> # Line 6. ... If line 5 is larger than line 4, enter -0-.
# constants [see parseCommand]
# f8814/line5 Base amount. $2,100.
# f8814/line13 Amount not taxed. $1,050.
# many places
# Enter the result here and on Form...
op, terms = self.op, self.terms
cmd = 'enter'
seekConstant=re.match(r'^\$?(\d+)(?:,?(\d+))?$',s)
if s == 'zero':
s = '-0-'
elif seekConstant:
constant=''.join((string or '') for string in seekConstant.groups())
self.op = '='
self.terms = [constant]
elif cond and s.startswith('the difference here'):
op = '-'
m1 = re.match(
r'(line \w+) '
r'is (less|more|larger|smaller|greater) than '
r'(line \w+)', cond)
if m1:
lineA, cmpOp, lineB = m1.groups()
if cmpOp in ('more', 'larger', 'greater'):
terms = [lineA, lineB]
else:
terms = [lineB, lineA]
self.terms = terms
self.op = op # todo suspect! removeme
else:
<|code_end|>
, determine the next line of code. You have imports:
import re
import six
from . import ut, irs
from .ut import log, jj, numerify
and context (class names, function names, or code) available:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
. Output only the next line. | msg = jj('cannotParseMath: cannot parse math: cmd,s,cond:', |
Given the following code snippet before the placeholder: <|code_start|>
def condtopy(cond):
'''
>>> condtopy('line 2 is more than line 1')
'line2>line1'
'''
delim = ' is more than '
if delim in cond:
lh, rh = cond.split(delim, 1)
lh = lh.replace(' ', '')
rh = rh.replace(' ', '')
return '%s>%s' % (lh, rh)
raise Exception('dunno condition [%s]' % (cond, ))
def lineOrRange(s, pg, fieldsByLine, col=None):
'''
todo add enough of fieldsByLine arg to make these work
#>>> lineOrRange('46','1')
#['line46']
#>>> lineOrRange('56 through 62','1')
#['line57', 'line62', 'line60b', ..., 'line61', 'line56', 'line58']
'''
log.debug('+lineOrRange s=%s col=%s', s, col)
prefix = 'line'
try:
start, end = s.split(' through ', 1)
if end.startswith(prefix):
prefix = ''
<|code_end|>
, predict the next line using imports from the current file:
import re
import six
from . import ut, irs
from .ut import log, jj, numerify
and context including class names, function names, and sometimes code from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
. Output only the next line. | startnum, endnum = numerify(start), numerify(end) |
Predict the next line after this snippet: <|code_start|> continue
if b'xfa-template' in data:
break
else:
msg='Cannot find form data in %s' % pdfpath
raise CrypticXml(msg)
# data == <form>-text.xml
tree = etree.fromstring(data)
if xmlpath is not None:
with open(xmlpath, 'wb') as out:
out.write(etree.tostring(tree, pretty_print=True))
return tree
def getNamespace(tree):
# xml root pre 2017: <template xmlns="http://www.xfa.org/schema/xfa-template/2.8/">
# xml root in 2017: <template xmlns="http://www.xfa.org/schema/xfa-template/3.0/">
rootNodes = tree.xpath('/*[local-name()="template"]')
assert len(rootNodes) == 1
rootNodeTag = rootNodes[0].tag
assert rootNodeTag.startswith('{') and '}' in rootNodeTag
namespace = rootNodeTag[1: rootNodeTag.index('}')]
return namespace
def extractFields(form):
prefix = form.prefix
fields = form.fields
visiblz = form.draws
assert len(fields)==0
<|code_end|>
using the current file's imports:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and any relevant context from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
. Output only the next line. | if 'x' not in cfg.steps: |
Predict the next line for this snippet: <|code_start|> # rowname=ele.attrib.get('name','nameless') todo for maxheight in
# each row, assuming just need 'h' attrib, but f1040/line6ctable
# also has node w/ lineHeight attrib
cells = ele.xpath('./*[@h or @minH]', namespaces=namespaces)
if iele == 0:
# get titles from 1st row
def getcoltext(elem, namespaces):
txts = list(elem.xpath('.//*[@style]/text()',
namespaces=namespaces))
alltxt = ' '.join(txts).replace(u'\xa0', '')
m = re.match(commandPtn, alltxt)
if m:
colinstruction = alltxt[m.start():]
else:
colinstruction = ''
ctitles = [txt[:3].strip('()') for txt in txts
if len(txt) >= 3 and txt[0] == '('
and txt[2] == ')' and txt[1].islower()]
coltext = (' '.join(txts)).lower()
coltype = ' '.join(
coltype for coltype in possibleColTypes
if coltype in coltext)
return (ctitles[0] if ctitles else '',
coltype,
colinstruction)
coltitles, coltypes, colinstructions = zip(
*[getcoltext(elem, namespaces) for elem in cells])
try:
maxh = max([Qnty.fromstring(c.attrib.get('h', c.attrib.get(
'minH'))) for c in cells])
<|code_end|>
with the help of current file imports:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and context from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
, which may contain function names, class names, or code. Output only the next line. | log.debug('row name:' + ele.attrib.get('name', 'nameless') + |
Using the snippet: <|code_start|>def saveFields(fields, prefix):
with open('%s.pickl' % prefix, 'w') as pickl:
dump(fields, pickl)
def parse_cli():
'''Load command line arguments'''
parser = ArgumentParser(description='extract field info of a PDF.')
addarg = parser.add_argument
addarg('-p', '--prefix', metavar='PREFIX',
nargs='?', help='prefix for names of files generated')
addarg('infile', metavar='pdf_file',
nargs='?', default='stdin',
help='PDF file to extract from')
addarg('-l', '--loglevel', help='Set loglevel',
default='WARN', metavar='LOG_LEVEL')
addarg('-t', '--doctests', help='Run doctests', action="store_true")
return parser.parse_args()
def main():
args = parse_cli()
infile = args.infile
def prefixify(s):
return s.rsplit('.', 1)[0]
prefix = args.prefix or prefixify(infile)
if prefix == 'stdin':
prefix = __name__
global log
<|code_end|>
, determine the next line of code. You have imports:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and context (class names, function names, or code) available:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
. Output only the next line. | log = setupLogging(prefix, args) |
Based on the snippet: <|code_start|> tables[key] = dict(
# elements in tables may not be directly assigned a width;
# widths are set for the columns so must track the column of
# each element
colwidths=el.attrib['columnWidths'],
maxheights=maxhs,
coltitles=coltitles,
coltypes=coltypes,
colinstructions=colinstructions,
)
return tables
def computePath(el, namespaces):
def ancestry(ele, namespacez):
return reversed(list(
elem.attrib.get(
'name', '%s#%d' % (
elem.tag,
indexAmongSibs(elem, elem.tag, namespacez)))
for elem in chain([ele], ele.iterancestors())))
return '.'.join(ancestry(el, namespaces))
def indexAmongSibs(el, tag=None, namespaces=None):
if not tag:
tag = '*'
elif callable(tag):
tag = str(tag).replace(' ', '').replace('-', '').strip('<>')
if '}' in tag:
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and context (classes, functions, sometimes code) from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
. Output only the next line. | tag = 't:' + skip(tag, '}') |
Using the snippet: <|code_start|> for iele, ele in enumerate(
el.xpath('.//*[@layout="row"]', namespaces=namespaces)):
# rowname=ele.attrib.get('name','nameless') todo for maxheight in
# each row, assuming just need 'h' attrib, but f1040/line6ctable
# also has node w/ lineHeight attrib
cells = ele.xpath('./*[@h or @minH]', namespaces=namespaces)
if iele == 0:
# get titles from 1st row
def getcoltext(elem, namespaces):
txts = list(elem.xpath('.//*[@style]/text()',
namespaces=namespaces))
alltxt = ' '.join(txts).replace(u'\xa0', '')
m = re.match(commandPtn, alltxt)
if m:
colinstruction = alltxt[m.start():]
else:
colinstruction = ''
ctitles = [txt[:3].strip('()') for txt in txts
if len(txt) >= 3 and txt[0] == '('
and txt[2] == ')' and txt[1].islower()]
coltext = (' '.join(txts)).lower()
coltype = ' '.join(
coltype for coltype in possibleColTypes
if coltype in coltext)
return (ctitles[0] if ctitles else '',
coltype,
colinstruction)
coltitles, coltypes, colinstructions = zip(
*[getcoltext(elem, namespaces) for elem in cells])
try:
<|code_end|>
, determine the next line of code. You have imports:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and context (class names, function names, or code) available:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
. Output only the next line. | maxh = max([Qnty.fromstring(c.attrib.get('h', c.attrib.get( |
Using the snippet: <|code_start|> if currTable:
if currTable != prevTable:
# this is the first element in the table, so read the
# columnWidths and set icol to 1st column
columnWidths = [Qnty.fromstring(width) for width in tables[
currTable]['colwidths'].split()]
cumColWidths = [
sum(columnWidths[0:i], Qnty(0, columnWidths[0].units))
for i in range(len(columnWidths))]
maxheights = tables[currTable]['maxheights']
rowheights = [
sum(maxheights[0:i], Qnty(0, maxheights[0].units))
for i in range(len(maxheights))]
coltitles = tables[currTable]['coltitles']
coltypes = tables[currTable]['coltypes']
colinstructions = tables[currTable]['colinstructions']
prevTable = currTable
# todo chkboxes [and textboxes in tables] have more specific
# dims--reduce wdim,hdim,xpos,ypos by <field><margin> [see
# notes/27sep2013]
try:
wdim = columnWidths[icol]
except Exception as e:
msg = '; icol,columnWidths=%s,%s' % (icol, columnWidths)
etype = type(e)
tb = exc_info()[2]
raise etype(etype(e.message + msg)).with_traceback(tb)
try:
ypos += rowheights[irow]
except Exception as e:
<|code_end|>
, determine the next line of code. You have imports:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and context (class names, function names, or code) available:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
. Output only the next line. | msg = '; irow,rowheights=%s,%s\n%s' % (irow, rowheights, pf( |
Predict the next line for this snippet: <|code_start|>from __future__ import print_function, absolute_import
try:
except ImportError:
def collectTables(tree, namespaces):
tableEls = tree.xpath('//*[@layout="table"]', namespaces=namespaces)
tables = {}
for el in tableEls:
# tablename=el.attrib.get('name','nameless')
key = computePath(el, namespaces)
maxhs = []
for iele, ele in enumerate(
el.xpath('.//*[@layout="row"]', namespaces=namespaces)):
# rowname=ele.attrib.get('name','nameless') todo for maxheight in
# each row, assuming just need 'h' attrib, but f1040/line6ctable
# also has node w/ lineHeight attrib
cells = ele.xpath('./*[@h or @minH]', namespaces=namespaces)
if iele == 0:
# get titles from 1st row
def getcoltext(elem, namespaces):
txts = list(elem.xpath('.//*[@style]/text()',
namespaces=namespaces))
alltxt = ' '.join(txts).replace(u'\xa0', '')
<|code_end|>
with the help of current file imports:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and context from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
, which may contain function names, class names, or code. Output only the next line. | m = re.match(commandPtn, alltxt) |
Next line prediction: <|code_start|>
def collectTables(tree, namespaces):
tableEls = tree.xpath('//*[@layout="table"]', namespaces=namespaces)
tables = {}
for el in tableEls:
# tablename=el.attrib.get('name','nameless')
key = computePath(el, namespaces)
maxhs = []
for iele, ele in enumerate(
el.xpath('.//*[@layout="row"]', namespaces=namespaces)):
# rowname=ele.attrib.get('name','nameless') todo for maxheight in
# each row, assuming just need 'h' attrib, but f1040/line6ctable
# also has node w/ lineHeight attrib
cells = ele.xpath('./*[@h or @minH]', namespaces=namespaces)
if iele == 0:
# get titles from 1st row
def getcoltext(elem, namespaces):
txts = list(elem.xpath('.//*[@style]/text()',
namespaces=namespaces))
alltxt = ' '.join(txts).replace(u'\xa0', '')
m = re.match(commandPtn, alltxt)
if m:
colinstruction = alltxt[m.start():]
else:
colinstruction = ''
ctitles = [txt[:3].strip('()') for txt in txts
if len(txt) >= 3 and txt[0] == '('
and txt[2] == ')' and txt[1].islower()]
coltext = (' '.join(txts)).lower()
coltype = ' '.join(
<|code_end|>
. Use current file imports:
(import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml)
and context including class names, function names, or small code snippets from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
. Output only the next line. | coltype for coltype in possibleColTypes |
Given the following code snippet before the placeholder: <|code_start|>def ensurePathsAreUniq(fields):
fieldsbyid = set()
for f in fields:
if f['path'] in fieldsbyid:
log.error('dup paths [%s]', f['path'])
else:
fieldsbyid.add(f['path'])
assert len(fields) == len(fieldsbyid), 'dup paths? see log'
def xmlFromPdf(pdfpath, xmlpath=None):
'''find xfa data in pdf file'''
with open(pdfpath, 'rb') as fp:
parser = PDFParser(fp)
doc = PDFDocument(parser)
all_objids = set(objid for xref in doc.xrefs
for objid in xref.get_objids())
for objid in all_objids:
obj = doc.getobj(objid)
if not isinstance(obj, PDFStream):
continue
try:
data = obj.get_data()
except PDFNotImplementedError:
# eg for jpeg image: PDFNotImplementedError: Unsupported filter: /DCTDecode
continue
if b'xfa-template' in data:
break
else:
msg='Cannot find form data in %s' % pdfpath
<|code_end|>
, predict the next line using imports from the current file:
import sys
import six
import re
import os
import doctest
from sys import exc_info
from itertools import chain
from argparse import ArgumentParser
from lxml import etree
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFStream, PDFNotImplementedError
from cPickle import dump
from pickle import dump
from .config import cfg
from .ut import log, setupLogging, skip, Qnty, pf
from .irs import commandPtn, possibleColTypes, CrypticXml
and context including class names, function names, and sometimes code from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/irs.py
# class CrypticXml(Exception):
# def computeFormId(formName):
# def computeTitle(prefix, form = None):
# def sortableFieldname(fieldname):
# def intify(s):
# def possibleFilePrefixes(formName):
# def trailingLetters(s):
. Output only the next line. | raise CrypticXml(msg) |
Predict the next line for this snippet: <|code_start|> ('line1', 'dollars')
>>> findLineAndUnit("Part 1. Persons ... (If ..., see the instructions.)"
" Line 1. Item 1. (a) Care provider's name. Caution. If the care was"
" provided in your home, you may owe employment taxes. If you do,"
" you cannot file Form 1040A. For details, see the instructions for"
" Form 1040, line 60a, or Form 1040N R, line 59a. 2 lines available"
" for entry.") # f2441
('line1', '')
'''
if isinstance(speak, six.binary_type):
speak = speak.decode('utf8')
findLineNum1 = re.search(r'(?:[\.\)]+\s*|^)(Line\s*\w+)\.(?:\s*\w\.)?',
speak)
# Line 62. a. etc
findLineNum2 = re.search(r'(?:\.\s*)(\d+)\.(?:\s*\w\.)?', speak)
# Exemptions. 62. a. etc
findLineNum3 = re.search(r'^(\d+\w*)\.\s', speak)
# 16b. ... eg 990/page6/line16b
units = re.findall(r'\.?\s*(Dollars|Cents)\.?', speak, re.I)
if findLineNum1:
# linenum is eg 'line62a' for 'Line 62. a. etc' or even for
# 'Exemptions. 62. a. etc'
linenum = findLineNum1.groups()[0]
elif findLineNum2:
linenum = 'line' + findLineNum2.groups()[0]
elif findLineNum3:
linenum = 'line' + findLineNum3.groups()[0]
else:
linenum = None
if re.search(r'line\s+\d+', speak, re.I):
<|code_end|>
with the help of current file imports:
import six
import re
from . import irs
from .ut import log, jj, ddict
and context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
, which may contain function names, class names, or code. Output only the next line. | log.warn(jj('linenumNotFound: cannot find the linenum in:', speak)) |
Predict the next line after this snippet: <|code_start|> ('line1', 'dollars')
>>> findLineAndUnit("Part 1. Persons ... (If ..., see the instructions.)"
" Line 1. Item 1. (a) Care provider's name. Caution. If the care was"
" provided in your home, you may owe employment taxes. If you do,"
" you cannot file Form 1040A. For details, see the instructions for"
" Form 1040, line 60a, or Form 1040N R, line 59a. 2 lines available"
" for entry.") # f2441
('line1', '')
'''
if isinstance(speak, six.binary_type):
speak = speak.decode('utf8')
findLineNum1 = re.search(r'(?:[\.\)]+\s*|^)(Line\s*\w+)\.(?:\s*\w\.)?',
speak)
# Line 62. a. etc
findLineNum2 = re.search(r'(?:\.\s*)(\d+)\.(?:\s*\w\.)?', speak)
# Exemptions. 62. a. etc
findLineNum3 = re.search(r'^(\d+\w*)\.\s', speak)
# 16b. ... eg 990/page6/line16b
units = re.findall(r'\.?\s*(Dollars|Cents)\.?', speak, re.I)
if findLineNum1:
# linenum is eg 'line62a' for 'Line 62. a. etc' or even for
# 'Exemptions. 62. a. etc'
linenum = findLineNum1.groups()[0]
elif findLineNum2:
linenum = 'line' + findLineNum2.groups()[0]
elif findLineNum3:
linenum = 'line' + findLineNum3.groups()[0]
else:
linenum = None
if re.search(r'line\s+\d+', speak, re.I):
<|code_end|>
using the current file's imports:
import six
import re
from . import irs
from .ut import log, jj, ddict
and any relevant context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
. Output only the next line. | log.warn(jj('linenumNotFound: cannot find the linenum in:', speak)) |
Predict the next line after this snippet: <|code_start|>
def uniqifyLinenums(ypozByLinenum, fieldsByLine, fieldsByLinenumYpos):
# compute and assign unique linenums
for lnum in ypozByLinenum:
# using noncentFields to ensure just one field per ypos
# eg dollar-and-cent pair usu at same ypos
# newcode but wks for 1040,1040sb
pg, linenumm = lnum
noncentFields = [
ff for ff in fieldsByLine[lnum]
if ff['unit'] != 'cents']
dupLinenumz = len(noncentFields) > 1
ypozz = [ff['ypos'] for ff in noncentFields]
for iypos, ypos in enumerate(sorted(ypozz)):
for ff in fieldsByLinenumYpos[(pg, linenumm, ypos)]:
if linenumm is None:
uniqlinenum = None
elif dupLinenumz:
# todo ensure the delimiter char ['_'] doesnt occur in any
# linenum
uniqlinenum = ff['linenum'] + '_' + str(1 + iypos)
else:
uniqlinenum = ff['linenum']
ff['uniqlinenum'] = uniqlinenum
def linkfields(form):
# link and classify fields: dollar and cent; by line; by name
fields = form.fields
<|code_end|>
using the current file's imports:
import six
import re
from . import irs
from .ut import log, jj, ddict
and any relevant context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
. Output only the next line. | ypozByLinenum = ddict(set) |
Using the snippet: <|code_start|>
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README')
setup(
name='opentaxforms',
<|code_end|>
, determine the next line of code. You have imports:
from setuptools import setup
from opentaxforms.version import appversion
import io
import os
and context (class names, function names, or code) available:
# Path: opentaxforms/version.py
. Output only the next line. | version=appversion, |
Given snippet: <|code_start|> draws = form.draws
theform = form
class PrintableFunc(object):
# for debugging
def __call__(self, o):
return self.ypos == o.get('ypos', None)
def __repr__(self):
return str(self.ypos)
def __str__(self):
return str(self.ypos)
# maybeForms should be called formContext or expectingFormsOnThisLine
maybeForms = PrintableFunc()
class FormRefs(object):
# list of key,val,context tuples w/ set of keys for uniqness
def __init__(self):
self.set = set()
self.list = []
self.nErrs = 0
def add(self, *info):
if info[0] == 'err':
self.nErrs += 1
return False
elif info[0] == 'excludedform':
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import six
from . import ut, irs
from .ut import log, jj, pathjoin, asciiOnly
from .config import cfg
and context:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
which might include code, classes, or functions. Output only the next line. | log.info('FormRefs: ignoring excludedform') |
Based on the snippet: <|code_start|> # because we are in Schedule B already,
# and we assume that 1040A uses 1040's sched B.
txt = txt.replace(match, '')
continue
elif inSameFamily(*formvalues):
# in 'schedX(form1 or form2) assume both forms use the same schedX
# if both forms are in the same form family [eg 1040 and 1040A]
# and separate schedX's dont occur in allpdfnames.
formvaluesToRemove=[f.lower() for f in formvalues
if ('f%ss%s'%(f,sched)).lower() not in cfg.allpdfnames]
formfields=[k for k,v in d.items() if k.startswith('form') and v.lower() not in formvaluesToRemove]
if len(formfields)==1:
scheduleContext[sched]=d[formfields[0]].lower()
for formfield in formfields:
form = d[formfield].upper()
if fieldIsOptional(formfield) and not form:
continue
for schedfield in schedfields:
sched = d[schedfield].upper()
if fieldIsOptional(schedfield) and not sched:
continue
# context highlights the match we found [for logging]
context = txt.replace(match, '[[' + match + ']]')
if formrefs.add(
*checkForm(
form, sched,
**dict(
iFormInLine=iFormInLine, draw=el, match=match,
form=formName, context=context))):
formsinline.append(
<|code_end|>
, predict the immediate next line with the help of imports:
import re
import six
from . import ut, irs
from .ut import log, jj, pathjoin, asciiOnly
from .config import cfg
and context (classes, functions, sometimes code) from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
. Output only the next line. | jj( |
Given the following code snippet before the placeholder: <|code_start|> if iword == 0:
matchingtext = fulltype + ' ' + txt
else:
matchingtext = txt
checkedForm = checkForm(key, **dict(
iFormInLine=iFormInLine, draw=el, match=matchingtext,
form=formName, call='words'))
if formrefs.add(*checkedForm):
formsinline.append(
jj(idraw, 'couldbe', key,
matchingtext, nicetext, delim='|'))
iFormInLine += 1
# section for forms announced in previous layout object
# eg 1040/54 Other credits from Form: a 3800 b 8801 c ____
if six.text_type(rawtext).strip(u' |\xa0').endswith('Form:'):
maybeForms.ypos = el['ypos']
elif maybeForms(el):
for txt in rawtext.strip().split():
txt = txt.strip(' .,;()|').upper()
if len(txt) > 1 and couldbeform(txt):
if formrefs.add(*checkForm(txt, **dict(
iFormInLine=iFormInLine, draw=el, match=txt,
form=formName, call='rawtext'))):
matchingtext = txt
formsinline.append(
jj(idraw, 'maybe', txt,
matchingtext, rawtext, delim='|'))
iFormInLine += 1
if lineHasForms:
lines.extend(formsinline)
<|code_end|>
, predict the next line using imports from the current file:
import re
import six
from . import ut, irs
from .ut import log, jj, pathjoin, asciiOnly
from .config import cfg
and context including class names, function names, and sometimes code from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
. Output only the next line. | with open(pathjoin(dirName, prefix) + '-refs.txt', 'wb') as f: |
Next line prediction: <|code_start|> class PrintableFunc(object):
# for debugging
def __call__(self, o):
return self.ypos == o.get('ypos', None)
def __repr__(self):
return str(self.ypos)
def __str__(self):
return str(self.ypos)
# maybeForms should be called formContext or expectingFormsOnThisLine
maybeForms = PrintableFunc()
class FormRefs(object):
# list of key,val,context tuples w/ set of keys for uniqness
def __init__(self):
self.set = set()
self.list = []
self.nErrs = 0
def add(self, *info):
if info[0] == 'err':
self.nErrs += 1
return False
elif info[0] == 'excludedform':
log.info('FormRefs: ignoring excludedform')
return False
(key, val), context = info
<|code_end|>
. Use current file imports:
(import re
import six
from . import ut, irs
from .ut import log, jj, pathjoin, asciiOnly
from .config import cfg)
and context including class names, function names, or small code snippets from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
. Output only the next line. | key=asciiOnly(key) |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import
# nonforms are numbers that dont represent forms
nonforms = [str(yr) for yr in range(2000, 2050)]
''' nonformcontexts are text signals that the number to follow is not a form.
eg:
line 40
lines 40 through 49
pub 15
Form 1116, Part II
use Schedule EIC to give the IRS informationabout
2439[instructions] ... and the tax shown in box 2 on the Form 2439
for each owner must agree with the amounts on Copy B that you received
from the RIC or REIT.
3903: This amount should be shown in box 12 of your Form W-2 with code P
2015/f1040sd: Box A
8814/p4/line10 instructions: unrecaptured section 1250 gain, section 1202 gain, ...
'''
nonformcontexts = (
'box line lines through pub part parts section to the copy copies code'.split())
def findRefs(form):
<|code_end|>
with the help of current file imports:
import re
import six
from . import ut, irs
from .ut import log, jj, pathjoin, asciiOnly
from .config import cfg
and context from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
, which may contain function names, class names, or code. Output only the next line. | if 'r' not in cfg.steps: |
Continue the code snippet: <|code_start|> inserted_primary_key)
elif len(allmatches) == 1:
insertedpk, = allmatches[0]
else:
msg = 'too many [%d] rows in table [%s]' \
' match allegedly unique values [%s]' \
% (len(allmatches), table, seekfields)
raise Exception(msg)
return insertedpk
# todo switch to a memoize decorator
mem = ut.ddict(dict)
def getbycode(table, mem=mem, **kw):
def stripifstring(s):
try:
return s.strip("\" '")
except Exception:
return s
kw = dict([(k, stripifstring(v)) for k, v in kw.items()])
if kw['code'] in mem[table.name]:
i = mem[table.name][kw['code']]
else:
whereclause = (table.c.code == kw['code'])
matches = conn.execute(select([table.c.id], whereclause))
if matches.returns_rows:
i, = matches.first()
else:
<|code_end|>
. Use current file imports:
import os
import six
import sys
import doctest
from itertools import chain
from sqlalchemy import (
MetaData, create_engine, select, UniqueConstraint)
from . import ut, config
from .ut import log
from .config import cfg
and context (classes, functions, or code) from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
. Output only the next line. | log.debug(kw) |
Based on the snippet: <|code_start|>
engine, metadata, conn = None, None, None
def unicodify(dic):
for k, v in dic.items():
if isinstance(v, six.binary_type):
dic[k] = six.text_type(v.decode('utf-8'))
return dic
def connect(appname, **kw):
# default values
user = pw = 'user'
dbname = appname.lower()
# optionally override defaults
user = os.environ.get(appname.upper() + '_DBUSER', user)
pw = os.environ.get(appname.upper() + '_DBPASS', pw)
dbname = os.environ.get(appname.upper() + '_DBNAME', dbname)
global conn
conn, engine, metadata, md = connect_(user=user, pw=pw, db=dbname, **kw)
return conn, engine, metadata, md
def connect_(**kw):
# consumes keys from kw: user pw db
global conn, engine, metadata
config.setup(**kw)
if 'dirName' in kw:
del kw['dirName']
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import six
import sys
import doctest
from itertools import chain
from sqlalchemy import (
MetaData, create_engine, select, UniqueConstraint)
from . import ut, config
from .ut import log
from .config import cfg
and context (classes, functions, sometimes code) from other files:
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
#
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
. Output only the next line. | usepostgres = kw.get('postgres', False) if cfg is None else cfg.postgres |
Based on the snippet: <|code_start|> for dep in field['deps']
if (unit is None or unit == dep['unit']))
if '-' in signs:
result = ',"' + signs + '"'
elif requiredarg:
result = ',""'
else:
result = ''
if result:
jdb('<getSigns', result)
return result
def writeEmptyHtmlPages(form):
# generate html form fields overlaid on image of the form
if 'h' not in cfg.steps:
return
formName = form.formName
dirName = cfg.dirName
prefix = form.prefix
pageinfo = form.pageinfo
formrefs = form.refs
npages = len(pageinfo)
template = ut.Resource('opentaxforms', 'template/form.html').content()
# todo maybe use a real templating lib like jinja2
template = template.decode('utf8')
emptyHtml = (template.replace('{', '{{')
.replace('}', '}}')
.replace('[=[', '{')
.replace(']=]', '}'))
<|code_end|>
, predict the immediate next line with the help of imports:
import os.path
import re
import traceback
import doctest
from os import remove as removeFile
from itertools import chain
from . import ut
from .config import cfg, setup
from .irs import computeTitle, computeFormId, sortableFieldname
from .ut import log, jdb, Qnty, NL, pathjoin
from PIL import Image
and context (classes, functions, sometimes code) from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/irs.py
# def computeTitle(prefix, form = None):
# '''
# >>> computeTitle('f1040')
# 'Form 1040'
# >>> computeTitle('f1040se')
# 'Form 1040 Schedule E'
# >>> computeTitle('f1040ez')
# 'Form 1040EZ'
# '''
# m = re.match(r'(\w)(\d+)([^s].*)?(?:s(\w))?$', prefix)
# if m:
# typ, num, suffix, sched = m.groups()
# suffix = suffix or ''
# if typ == 'f':
# doctype = 'Form'
# else:
# # todo temporary fallback for CRA forms
# #raise Exception('unknown doctype [%s]' % (typ, ))
# return form.docinfo['titl']
# if suffix:
# num += suffix.upper() # .upper for eg 1040EZ
# titleEls = [doctype, num]
# if sched:
# titleEls.extend(['Schedule', sched.upper()])
# title = ' '.join(el for el in titleEls if el)
# else:
# title = prefix.capitalize()
# return title
#
# def computeFormId(formName):
# '''
# >>> computeFormId('1040')
# '1040'
# >>> computeFormId(('1040','SE'))
# '1040sSE'
# '''
# try:
# form, sched = formName
# if sched is None:
# idd = form
# else:
# idd = form + 's' + sched
# except ValueError:
# idd = formName
# return idd
#
# def sortableFieldname(fieldname):
# '''
# to avoid lexicographic malordering: f1_19,f1_2,f1_20
# >>> sortableFieldname('f1_43_L0T')
# ['f', 1, '_', 43, '_L', 0, 'T']
# '''
#
# def intify(s):
# try:
# return int(s)
# except ValueError:
# return s
# try:
# segs = re.findall('(\D+|\d+)', fieldname)
# segs = [intify(seg) for seg in segs]
# return segs
# except Exception:
# excclass, exc, tb = sys.exc_info()
# new_exc = Exception(
# 'sortableFieldname: new exception: fieldname= ' + fieldname)
# raise new_exc.__class__(new_exc).with_traceback(tb)
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
. Output only the next line. | titlebase = computeTitle(prefix, form) |
Predict the next line for this snippet: <|code_start|> except Exception:
pass
fname = 'f' + form.lower().replace('-', '')
return fname
def computeFormTitle(form, parentForm=None):
try:
form, sched = form
if form == parentForm[1:]: # strip 'f' from parentForm
form = 'Sched %s' % (sched, )
elif sched is None:
form = 'Form %s' % (form, )
else:
form = 'Form %s Sched %s' % (form, sched, )
except ValueError:
form = 'Form ' + form
return form
def computePageTitle(titlebase, npage, npages):
if npages == 1:
title = titlebase
else:
title = titlebase + ' page {}'.format(npage)
return title
def createSvgFile(inputDirName, outputDirName, prefix, npage):
ipage = npage - 1
<|code_end|>
with the help of current file imports:
import os.path
import re
import traceback
import doctest
from os import remove as removeFile
from itertools import chain
from . import ut
from .config import cfg, setup
from .irs import computeTitle, computeFormId, sortableFieldname
from .ut import log, jdb, Qnty, NL, pathjoin
from PIL import Image
and context from other files:
# Path: opentaxforms/config.py
# def parseCmdline():
# def getFileList(dirName):
# def setLogname(rootForms,cfg):
# def copyStaticDir(appname):
# def setupStaticDir(dirName):
# def setup(**overrideArgs):
# def unsetup():
#
# Path: opentaxforms/irs.py
# def computeTitle(prefix, form = None):
# '''
# >>> computeTitle('f1040')
# 'Form 1040'
# >>> computeTitle('f1040se')
# 'Form 1040 Schedule E'
# >>> computeTitle('f1040ez')
# 'Form 1040EZ'
# '''
# m = re.match(r'(\w)(\d+)([^s].*)?(?:s(\w))?$', prefix)
# if m:
# typ, num, suffix, sched = m.groups()
# suffix = suffix or ''
# if typ == 'f':
# doctype = 'Form'
# else:
# # todo temporary fallback for CRA forms
# #raise Exception('unknown doctype [%s]' % (typ, ))
# return form.docinfo['titl']
# if suffix:
# num += suffix.upper() # .upper for eg 1040EZ
# titleEls = [doctype, num]
# if sched:
# titleEls.extend(['Schedule', sched.upper()])
# title = ' '.join(el for el in titleEls if el)
# else:
# title = prefix.capitalize()
# return title
#
# def computeFormId(formName):
# '''
# >>> computeFormId('1040')
# '1040'
# >>> computeFormId(('1040','SE'))
# '1040sSE'
# '''
# try:
# form, sched = formName
# if sched is None:
# idd = form
# else:
# idd = form + 's' + sched
# except ValueError:
# idd = formName
# return idd
#
# def sortableFieldname(fieldname):
# '''
# to avoid lexicographic malordering: f1_19,f1_2,f1_20
# >>> sortableFieldname('f1_43_L0T')
# ['f', 1, '_', 43, '_L', 0, 'T']
# '''
#
# def intify(s):
# try:
# return int(s)
# except ValueError:
# return s
# try:
# segs = re.findall('(\D+|\d+)', fieldname)
# segs = [intify(seg) for seg in segs]
# return segs
# except Exception:
# excclass, exc, tb = sys.exc_info()
# new_exc = Exception(
# 'sortableFieldname: new exception: fieldname= ' + fieldname)
# raise new_exc.__class__(new_exc).with_traceback(tb)
#
# Path: opentaxforms/ut.py
# NL = '\n'
# TAB = '\t'
# def merge(bb1, bb2):
# def numerify(s):
# def compactify(multilineRegex):
# def crunch(seg):
# def pickle(data, pickleFilePrefix):
# def unpickle(pickleFilePrefix, default=None):
# def flattened(l):
# def hasdups(l, key=None):
# def uniqify(l):
# def uniqify2(l):
# def setupLogging(loggerId, args=None):
# def unsetupLogging():
# def logg(msg, outputs=None):
# def jj(*args, **kw):
# def jdb(*args, **kw):
# def run0(cmd):
# def run(cmd, logprefix='run', loglevel='INFO'):
# def __init__(self, pkgname, fpath=None):
# def path(self):
# def content(self):
# def keys(cls):
# def vals(cls):
# def items(cls):
# def __init__(self):
# def __call__(self, **kw):
# def __init__(self, *maps, **kw):
# def _getGetitems(self, mapp):
# def __getitem__(self, key):
# def __setitem__(self, key, val):
# def __len__(self):
# def __call__(self, *keys):
# def clear(self):
# def update(self, *maps):
# def __add__(self, *maps):
# def __iadd__(self, *maps):
# def __iter__(self):
# def iterkeys(self):
# def keys(self):
# def values(self):
# def items(self):
# def iteritems(self):
# def get(self, key, dflt=None):
# def __str__(self):
# def __repr__(self):
# def notequalpatch(self, o):
# def fromstring(cls, s):
# def __hash__(self):
# def playQnty():
# def nth(n):
# def skip(s, substr):
# def until(s, substr):
# def ensure_dir(folder):
# def now(format=None):
# def readImgSize(fname, dirName):
# def asciiOnly(s):
# class NoSuchPickle(Exception):
# class PickleException(Exception):
# class Resource(object):
# class CharEnum(object):
# class ChainablyUpdatableOrderedDict(odict):
# class Bag(object):
# class Qnty(qq):
, which may contain function names, class names, or code. Output only the next line. | infpath = pathjoin(inputDirName,'{}.pdf'.format(prefix)) |
Using the snippet: <|code_start|>
logging.disable(logging.CRITICAL)
USER_DICT = dict(username='tester', email='foo@bar.com')
def get_user_callback(request):
<|code_end|>
, determine the next line of code. You have imports:
import json
import logging
import pytest
import responses
from wh_habitica import default
from wh_habitica.forms import AuthForm
from wunderhabit.factories import UserFactory
from wh_habitica.tests import utils
and context (class names, function names, or code) available:
# Path: wh_habitica/default.py
# AUTH_URL = 'url'
# AUTH_HEADER_CLIENT = 'x-api-user'
# AUTH_HEADER_TOKEN = 'x-api-key'
# API_HOST = 'https://habitica.com'
# API_BASE_URL = API_HOST + '/api/v3'
# GET_STATUS = API_BASE_URL + '/status'
# GET_USER = API_BASE_URL + '/user'
# GET_USER_ANONYMIZED = API_BASE_URL + '/user/anonymized'
# POST_TASK = API_BASE_URL + '/tasks/user/{id}/{direction}'
# JSON_STATUS = 'status'
# JSON_UP = 'up'
# JSON_DOWN = 'down'
# JSON_ID = 'id'
# JSON_DELTA = 'delta'
# JSON_BUFFS = 'buffs'
# JSON_AUTH = 'auth'
# JSON_LOCAL = 'local'
# JSON_FACEBOOK = 'facebook'
# JSON_GOOGLE = 'google'
# JSON_GOOGLE_EMAILS = 'emails'
# JSON_DISPLAY_NAME = 'displayName'
# JSON_FORMAT_JSON = '_json'
# JSON_USERNAME = 'username'
# JSON_EMAIL = 'email'
# JSON_NAME = 'name'
# JSON_VALUE = 'value'
# JSON_TYPE = 'type'
#
# Path: wh_habitica/forms.py
# class AuthForm(forms.ModelForm):
# """
# Form to enter and validate the Habitica authentication credentials.
# """
#
# AUTH_ERROR = _('Could not authenticate to Habitica. Please check the User '
# 'ID and the API Token.')
# HABITICA_ERROR = _('Something went wrong while loading Habitica user data.'
# ' Please try again or contact the admin.')
#
# class Meta:
# model = Habitica
# fields = ['user_id', 'api_token']
#
# def clean(self):
# cleaned_data = super(AuthForm, self).clean()
#
# api = HabiticaApi(cleaned_data['user_id'], cleaned_data['api_token'])
# user_details = api.get_user_details()
#
# # Validate authentication
# if not user_details or cleaned_data['user_id'] != user_details[default.JSON_ID]:
# raise forms.ValidationError(self.AUTH_ERROR)
#
# # Get optional user details
# try:
# self.instance.name = user_details[default.JSON_NAME]
# self.instance.email = user_details[default.JSON_EMAIL]
# except (ValueError, KeyError):
# logger.exception('Could not get user details: %s', str(user_details))
#
# return cleaned_data
#
# Path: wunderhabit/factories.py
# class UserFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = get_user_model()
#
# username = factory.Sequence(lambda n: "user_%d" % n)
# first_name = 'John'
# last_name = 'Doe'
#
# Path: wh_habitica/tests/utils.py
# API_STATUS_UP = {default.JSON_STATUS: default.JSON_UP}
# LOCAL_NAME = 'John Local'
# FACEBOOK_NAME = 'John Facebook'
# FACEBOOK_ID = '1337'
# GOOGLE_NAME = 'John Google'
# USER_EMAIL = 'john@doe.com'
# API_USER = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {},
# default.JSON_LOCAL: {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
# }
# }
# API_USER_FACEBOOK = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_LOCAL: {},
# default.JSON_GOOGLE: {},
# default.JSON_FACEBOOK: {
# default.JSON_DISPLAY_NAME: FACEBOOK_NAME,
# default.JSON_ID: FACEBOOK_ID,
# }
# }
# }
# API_USER_GOOGLE = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_LOCAL: {},
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {
# default.JSON_DISPLAY_NAME: GOOGLE_NAME,
# default.JSON_GOOGLE_EMAILS: [{default.JSON_TYPE: u'account', default.JSON_VALUE: USER_EMAIL}],
# }
# }
# }
# API_USER_INVALID = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {},
# default.JSON_LOCAL: {},
# 'Invalid_Provider': {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
# }
# }
# API_TASK = {default.JSON_DELTA: 1}
# def mock_habitica_api(monkeypatch):
# def mock_habitica_api_facebook(monkeypatch):
# def mock_habitica_api_google(monkeypatch):
# def mock_habitica_api_invalid_provider(monkeypatch):
. Output only the next line. | if request.headers.get(default.AUTH_HEADER_TOKEN) != 'correct': |
Using the snippet: <|code_start|>
else:
# Local auth provider
data = utils.API_USER
data[default.JSON_ID] = request.headers.get(default.AUTH_HEADER_CLIENT)
return 200, {}, json.dumps({'data': data})
@pytest.fixture
def get_user():
responses.add_callback(
responses.GET, default.GET_USER,
callback=get_user_callback,
content_type='application/json',
)
@pytest.fixture
def api_error():
responses.add(
responses.GET, default.GET_USER,
body=Exception('Some Habitica API Error occurred!'),
content_type='application/json',
)
@responses.activate
def test_auth_error(get_user):
# bad api auth result
<|code_end|>
, determine the next line of code. You have imports:
import json
import logging
import pytest
import responses
from wh_habitica import default
from wh_habitica.forms import AuthForm
from wunderhabit.factories import UserFactory
from wh_habitica.tests import utils
and context (class names, function names, or code) available:
# Path: wh_habitica/default.py
# AUTH_URL = 'url'
# AUTH_HEADER_CLIENT = 'x-api-user'
# AUTH_HEADER_TOKEN = 'x-api-key'
# API_HOST = 'https://habitica.com'
# API_BASE_URL = API_HOST + '/api/v3'
# GET_STATUS = API_BASE_URL + '/status'
# GET_USER = API_BASE_URL + '/user'
# GET_USER_ANONYMIZED = API_BASE_URL + '/user/anonymized'
# POST_TASK = API_BASE_URL + '/tasks/user/{id}/{direction}'
# JSON_STATUS = 'status'
# JSON_UP = 'up'
# JSON_DOWN = 'down'
# JSON_ID = 'id'
# JSON_DELTA = 'delta'
# JSON_BUFFS = 'buffs'
# JSON_AUTH = 'auth'
# JSON_LOCAL = 'local'
# JSON_FACEBOOK = 'facebook'
# JSON_GOOGLE = 'google'
# JSON_GOOGLE_EMAILS = 'emails'
# JSON_DISPLAY_NAME = 'displayName'
# JSON_FORMAT_JSON = '_json'
# JSON_USERNAME = 'username'
# JSON_EMAIL = 'email'
# JSON_NAME = 'name'
# JSON_VALUE = 'value'
# JSON_TYPE = 'type'
#
# Path: wh_habitica/forms.py
# class AuthForm(forms.ModelForm):
# """
# Form to enter and validate the Habitica authentication credentials.
# """
#
# AUTH_ERROR = _('Could not authenticate to Habitica. Please check the User '
# 'ID and the API Token.')
# HABITICA_ERROR = _('Something went wrong while loading Habitica user data.'
# ' Please try again or contact the admin.')
#
# class Meta:
# model = Habitica
# fields = ['user_id', 'api_token']
#
# def clean(self):
# cleaned_data = super(AuthForm, self).clean()
#
# api = HabiticaApi(cleaned_data['user_id'], cleaned_data['api_token'])
# user_details = api.get_user_details()
#
# # Validate authentication
# if not user_details or cleaned_data['user_id'] != user_details[default.JSON_ID]:
# raise forms.ValidationError(self.AUTH_ERROR)
#
# # Get optional user details
# try:
# self.instance.name = user_details[default.JSON_NAME]
# self.instance.email = user_details[default.JSON_EMAIL]
# except (ValueError, KeyError):
# logger.exception('Could not get user details: %s', str(user_details))
#
# return cleaned_data
#
# Path: wunderhabit/factories.py
# class UserFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = get_user_model()
#
# username = factory.Sequence(lambda n: "user_%d" % n)
# first_name = 'John'
# last_name = 'Doe'
#
# Path: wh_habitica/tests/utils.py
# API_STATUS_UP = {default.JSON_STATUS: default.JSON_UP}
# LOCAL_NAME = 'John Local'
# FACEBOOK_NAME = 'John Facebook'
# FACEBOOK_ID = '1337'
# GOOGLE_NAME = 'John Google'
# USER_EMAIL = 'john@doe.com'
# API_USER = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {},
# default.JSON_LOCAL: {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
# }
# }
# API_USER_FACEBOOK = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_LOCAL: {},
# default.JSON_GOOGLE: {},
# default.JSON_FACEBOOK: {
# default.JSON_DISPLAY_NAME: FACEBOOK_NAME,
# default.JSON_ID: FACEBOOK_ID,
# }
# }
# }
# API_USER_GOOGLE = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_LOCAL: {},
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {
# default.JSON_DISPLAY_NAME: GOOGLE_NAME,
# default.JSON_GOOGLE_EMAILS: [{default.JSON_TYPE: u'account', default.JSON_VALUE: USER_EMAIL}],
# }
# }
# }
# API_USER_INVALID = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {},
# default.JSON_LOCAL: {},
# 'Invalid_Provider': {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
# }
# }
# API_TASK = {default.JSON_DELTA: 1}
# def mock_habitica_api(monkeypatch):
# def mock_habitica_api_facebook(monkeypatch):
# def mock_habitica_api_google(monkeypatch):
# def mock_habitica_api_invalid_provider(monkeypatch):
. Output only the next line. | res = AuthForm(data=dict(user_id='foo', api_token='baz')) |
Next line prediction: <|code_start|>
logging.disable(logging.CRITICAL)
USER_DICT = dict(username='tester', email='foo@bar.com')
def get_user_callback(request):
if request.headers.get(default.AUTH_HEADER_TOKEN) != 'correct':
return (401, {}, {
"err": "No user found."
})
if request.headers.get(default.AUTH_HEADER_CLIENT) == 'facebook_auth':
<|code_end|>
. Use current file imports:
(import json
import logging
import pytest
import responses
from wh_habitica import default
from wh_habitica.forms import AuthForm
from wunderhabit.factories import UserFactory
from wh_habitica.tests import utils)
and context including class names, function names, or small code snippets from other files:
# Path: wh_habitica/default.py
# AUTH_URL = 'url'
# AUTH_HEADER_CLIENT = 'x-api-user'
# AUTH_HEADER_TOKEN = 'x-api-key'
# API_HOST = 'https://habitica.com'
# API_BASE_URL = API_HOST + '/api/v3'
# GET_STATUS = API_BASE_URL + '/status'
# GET_USER = API_BASE_URL + '/user'
# GET_USER_ANONYMIZED = API_BASE_URL + '/user/anonymized'
# POST_TASK = API_BASE_URL + '/tasks/user/{id}/{direction}'
# JSON_STATUS = 'status'
# JSON_UP = 'up'
# JSON_DOWN = 'down'
# JSON_ID = 'id'
# JSON_DELTA = 'delta'
# JSON_BUFFS = 'buffs'
# JSON_AUTH = 'auth'
# JSON_LOCAL = 'local'
# JSON_FACEBOOK = 'facebook'
# JSON_GOOGLE = 'google'
# JSON_GOOGLE_EMAILS = 'emails'
# JSON_DISPLAY_NAME = 'displayName'
# JSON_FORMAT_JSON = '_json'
# JSON_USERNAME = 'username'
# JSON_EMAIL = 'email'
# JSON_NAME = 'name'
# JSON_VALUE = 'value'
# JSON_TYPE = 'type'
#
# Path: wh_habitica/forms.py
# class AuthForm(forms.ModelForm):
# """
# Form to enter and validate the Habitica authentication credentials.
# """
#
# AUTH_ERROR = _('Could not authenticate to Habitica. Please check the User '
# 'ID and the API Token.')
# HABITICA_ERROR = _('Something went wrong while loading Habitica user data.'
# ' Please try again or contact the admin.')
#
# class Meta:
# model = Habitica
# fields = ['user_id', 'api_token']
#
# def clean(self):
# cleaned_data = super(AuthForm, self).clean()
#
# api = HabiticaApi(cleaned_data['user_id'], cleaned_data['api_token'])
# user_details = api.get_user_details()
#
# # Validate authentication
# if not user_details or cleaned_data['user_id'] != user_details[default.JSON_ID]:
# raise forms.ValidationError(self.AUTH_ERROR)
#
# # Get optional user details
# try:
# self.instance.name = user_details[default.JSON_NAME]
# self.instance.email = user_details[default.JSON_EMAIL]
# except (ValueError, KeyError):
# logger.exception('Could not get user details: %s', str(user_details))
#
# return cleaned_data
#
# Path: wunderhabit/factories.py
# class UserFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = get_user_model()
#
# username = factory.Sequence(lambda n: "user_%d" % n)
# first_name = 'John'
# last_name = 'Doe'
#
# Path: wh_habitica/tests/utils.py
# API_STATUS_UP = {default.JSON_STATUS: default.JSON_UP}
# LOCAL_NAME = 'John Local'
# FACEBOOK_NAME = 'John Facebook'
# FACEBOOK_ID = '1337'
# GOOGLE_NAME = 'John Google'
# USER_EMAIL = 'john@doe.com'
# API_USER = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {},
# default.JSON_LOCAL: {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
# }
# }
# API_USER_FACEBOOK = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_LOCAL: {},
# default.JSON_GOOGLE: {},
# default.JSON_FACEBOOK: {
# default.JSON_DISPLAY_NAME: FACEBOOK_NAME,
# default.JSON_ID: FACEBOOK_ID,
# }
# }
# }
# API_USER_GOOGLE = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_LOCAL: {},
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {
# default.JSON_DISPLAY_NAME: GOOGLE_NAME,
# default.JSON_GOOGLE_EMAILS: [{default.JSON_TYPE: u'account', default.JSON_VALUE: USER_EMAIL}],
# }
# }
# }
# API_USER_INVALID = {
# default.JSON_ID: 42,
# default.JSON_AUTH: {
# default.JSON_FACEBOOK: {},
# default.JSON_GOOGLE: {},
# default.JSON_LOCAL: {},
# 'Invalid_Provider': {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
# }
# }
# API_TASK = {default.JSON_DELTA: 1}
# def mock_habitica_api(monkeypatch):
# def mock_habitica_api_facebook(monkeypatch):
# def mock_habitica_api_google(monkeypatch):
# def mock_habitica_api_invalid_provider(monkeypatch):
. Output only the next line. | data = utils.API_USER_FACEBOOK |
Next line prediction: <|code_start|>
def get_user():
"""
Returns a user which is connected with wunderlist and habitica.
"""
wunderlist = WunderlistFactory.create()
<|code_end|>
. Use current file imports:
(import pytest
from wunderlist.factories import WunderlistFactory
from wh_habitica.factories import HabiticaFactory)
and context including class names, function names, or small code snippets from other files:
# Path: wunderlist/factories.py
# class WunderlistFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = Wunderlist
#
# user_id = factory.Sequence(lambda n: "%d" % n)
# name = 'John'
# email = 'john@doe.com'
# api_token = get_random_string(32)
# owner = factory.SubFactory(UserFactory)
#
# Path: wh_habitica/factories.py
# class HabiticaFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = Habitica
#
# user_id = factory.Sequence(lambda n: "%d" % n)
# name = 'John'
# email = 'john@doe.com'
# api_token = get_random_string(32)
# owner = factory.SubFactory(UserFactory)
. Output only the next line. | habitica = HabiticaFactory.create() |
Based on the snippet: <|code_start|>
USER_ID = 42
USER_NAME = 'John Wunderlist'
USER_EMAIL = 'john@doe.com'
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from wunderlist import default
and context (classes, functions, sometimes code) from other files:
# Path: wunderlist/default.py
# AUTH_URL = 'https://www.wunderlist.com/oauth/authorize?client_id={client_id}&redirect_uri={url}&state={state}'
# AUTH_POST = 'https://www.wunderlist.com/oauth/access_token'
# AUTH_POST_CLIENT = 'client_id'
# AUTH_POST_SECRET = 'client_secret'
# AUTH_POST_CODE = 'code'
# AUTH_POST_TOKEN = 'access_token'
# AUTH_HEADER_CLIENT = 'X-Client-ID'
# AUTH_HEADER_TOKEN = 'X-Access-Token'
# GET_USER = 'https://a.wunderlist.com/api/v1/user'
# GET_LISTS = 'https://a.wunderlist.com/api/v1/lists'
# GET_WEBHOOKS = 'https://a.wunderlist.com/api/v1/webhooks?list_id={list_id}'
# POST_WEBHOOK = 'https://a.wunderlist.com/api/v1/webhooks'
# DELETE_WEBHOOK = 'https://a.wunderlist.com/api/v1/webhooks/{id}'
# JSON_ID = 'id'
# JSON_USER_ID = 'user_id'
# JSON_NAME = 'name'
# JSON_EMAIL = 'email'
# JSON_CREATED_AT = 'created_at'
# JSON_REVISION = 'revision'
# JSON_LIST_ID = 'list_id'
# JSON_URL = 'url'
# JSON_PROCESSOR_TYPE = 'processor_type'
# JSON_CONFIGURATION = 'configuration'
# JSON_SUBJECT = 'subject'
# JSON_TYPE = 'type'
# JSON_OPERATION = 'operation'
# JSON_BEFORE = 'before'
# JSON_AFTER = 'after'
# JSON_COMPLETED = 'completed'
# JSON_TITLE = 'title'
# OPERATION_CREATE = 'create'
# OPERATION_UPDATE = 'update'
# SUBJECT_TASK = 'task'
# SUBJECT_SUBTASK = 'subtask'
# SUBJECT_LIST = 'list'
. Output only the next line. | API_USER = {default.JSON_ID: USER_ID, default.JSON_NAME: USER_NAME, default.JSON_EMAIL: USER_EMAIL} |
Using the snippet: <|code_start|>
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^contact/$', TemplateView.as_view(template_name='wunderhabit/contact.html'), name='contact'),
url(r'^privacy/$', TemplateView.as_view(template_name='wunderhabit/privacy.html'), name='privacy'),
url(r'^dashboard/$', views.dashboard, name='dashboard'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^account/delete/$', views.delete_account, name='delete_account'),
url(r'^account/test/$', views.test_authentication, name='test_authentication'),
url(r'^add/$', views.add_connection, name='add'),
url(r'^delete/(?P<connection_id>\d+)/$', views.delete_connection, name='delete'),
url(r'^habitica/', include(habitica_urls, namespace='habitica')),
<|code_end|>
, determine the next line of code. You have imports:
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from . import views
from wunderlist import urls as wunderlist_urls
from wh_habitica import urls as habitica_urls
and context (class names, function names, or code) available:
# Path: wunderlist/urls.py
#
# Path: wh_habitica/urls.py
. Output only the next line. | url(r'^wunderlist/', include(wunderlist_urls, namespace='wunderlist')), |
Next line prediction: <|code_start|>
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^contact/$', TemplateView.as_view(template_name='wunderhabit/contact.html'), name='contact'),
url(r'^privacy/$', TemplateView.as_view(template_name='wunderhabit/privacy.html'), name='privacy'),
url(r'^dashboard/$', views.dashboard, name='dashboard'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^account/delete/$', views.delete_account, name='delete_account'),
url(r'^account/test/$', views.test_authentication, name='test_authentication'),
url(r'^add/$', views.add_connection, name='add'),
url(r'^delete/(?P<connection_id>\d+)/$', views.delete_connection, name='delete'),
<|code_end|>
. Use current file imports:
(from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from . import views
from wunderlist import urls as wunderlist_urls
from wh_habitica import urls as habitica_urls)
and context including class names, function names, or small code snippets from other files:
# Path: wunderlist/urls.py
#
# Path: wh_habitica/urls.py
. Output only the next line. | url(r'^habitica/', include(habitica_urls, namespace='habitica')), |
Using the snippet: <|code_start|>
class HabiticaFactory(factory.django.DjangoModelFactory):
class Meta:
model = Habitica
user_id = factory.Sequence(lambda n: "%d" % n)
name = 'John'
email = 'john@doe.com'
api_token = get_random_string(32)
<|code_end|>
, determine the next line of code. You have imports:
import factory
from django.utils.crypto import get_random_string
from .models import Habitica
from wunderhabit.factories import UserFactory
and context (class names, function names, or code) available:
# Path: wh_habitica/models.py
# class Habitica(models.Model):
# """
# Habitica API Endpoint
# """
#
# user_id = models.CharField(_('User ID'), max_length=255, blank=True)
# name = models.CharField(_('Name'), max_length=255, blank=True)
# email = models.EmailField(_('Email'), blank=True)
# api_token = models.CharField(_('API Token'), max_length=255, blank=True)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='habitica')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Habitica API object.
# """
#
# return HabiticaApi(self.user_id, self.api_token)
#
# class Meta:
# db_table = 'habitica_habitica'
#
# Path: wunderhabit/factories.py
# class UserFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = get_user_model()
#
# username = factory.Sequence(lambda n: "user_%d" % n)
# first_name = 'John'
# last_name = 'Doe'
. Output only the next line. | owner = factory.SubFactory(UserFactory) |
Predict the next line after this snippet: <|code_start|>
class HabiticaAdmin(admin.ModelAdmin):
fieldsets = [
(
'Wunderlist',
{'fields': ['user_id', 'name', 'email', 'api_token', 'owner', 'created_at', 'modified_at']}
)
]
readonly_fields = ['created_at', 'modified_at']
list_display = ['user_id', 'name', 'email', 'owner', 'created_at', 'modified_at']
<|code_end|>
using the current file's imports:
from django.contrib import admin
from .models import Habitica
and any relevant context from other files:
# Path: wh_habitica/models.py
# class Habitica(models.Model):
# """
# Habitica API Endpoint
# """
#
# user_id = models.CharField(_('User ID'), max_length=255, blank=True)
# name = models.CharField(_('Name'), max_length=255, blank=True)
# email = models.EmailField(_('Email'), blank=True)
# api_token = models.CharField(_('API Token'), max_length=255, blank=True)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='habitica')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Habitica API object.
# """
#
# return HabiticaApi(self.user_id, self.api_token)
#
# class Meta:
# db_table = 'habitica_habitica'
. Output only the next line. | admin.site.register(Habitica, HabiticaAdmin) |
Continue the code snippet: <|code_start|>
logger = logging.getLogger('wunderhabit')
class AuthForm(forms.ModelForm):
"""
Form to enter and validate the Habitica authentication credentials.
"""
AUTH_ERROR = _('Could not authenticate to Habitica. Please check the User '
'ID and the API Token.')
HABITICA_ERROR = _('Something went wrong while loading Habitica user data.'
' Please try again or contact the admin.')
class Meta:
<|code_end|>
. Use current file imports:
import logging
from django import forms
from django.utils.translation import ugettext_lazy as _
from . import default
from .models import Habitica
from .api import HabiticaApi
and context (classes, functions, or code) from other files:
# Path: wh_habitica/models.py
# class Habitica(models.Model):
# """
# Habitica API Endpoint
# """
#
# user_id = models.CharField(_('User ID'), max_length=255, blank=True)
# name = models.CharField(_('Name'), max_length=255, blank=True)
# email = models.EmailField(_('Email'), blank=True)
# api_token = models.CharField(_('API Token'), max_length=255, blank=True)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='habitica')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Habitica API object.
# """
#
# return HabiticaApi(self.user_id, self.api_token)
#
# class Meta:
# db_table = 'habitica_habitica'
#
# Path: wh_habitica/api.py
# class HabiticaApi(hlib.api.Habitica):
# def __init__(self, user_id, api_token):
# headers = {
# default.AUTH_URL: default.API_HOST,
# default.AUTH_HEADER_CLIENT: user_id,
# default.AUTH_HEADER_TOKEN: api_token
# }
# super(HabiticaApi, self).__init__(auth=headers)
#
# def get_status(self):
# """
# Returns the Habitica server status.
# """
#
# try:
# return self.status()[default.JSON_STATUS] == default.JSON_UP
# except Exception:
# logger.exception('Could not get Habitica status.')
# return False
#
# def get_user(self):
# """
# Returns the full user object.
# """
#
# try:
# user = self.user()
# except Exception as e:
# logger.exception('Could not load Habitica user: ' + str(e))
# return None
#
# return user
#
# def get_user_details(self):
# """
# Parses the user details like the username or email from the Habitica user object.
# """
#
# user = self.get_user()
# if not user or default.JSON_AUTH not in user:
# return None
#
# auth_details = user[default.JSON_AUTH]
# user_details = dict()
#
# # Parse user id
# try:
# user_details[default.JSON_ID] = user[default.JSON_ID]
# except Exception:
# logger.exception('Could not find Habitica user id: ' + str(user))
#
# # Parse user details
# if default.JSON_LOCAL in auth_details and auth_details[default.JSON_LOCAL]:
# # User is authenticated with Habitica account
# try:
# auth_local = auth_details[default.JSON_LOCAL]
# user_details[default.JSON_EMAIL] = auth_local[default.JSON_EMAIL]
# user_details[default.JSON_NAME] = auth_local[default.JSON_USERNAME]
# except Exception:
# logger.exception('Could not parse Habitica user with local auth: ' + str(user))
#
# elif default.JSON_FACEBOOK in auth_details and auth_details[default.JSON_FACEBOOK]:
# # User is authenticated with facebook
# try:
# auth_facebook = auth_details[default.JSON_FACEBOOK]
# user_details[default.JSON_NAME] = auth_facebook[default.JSON_DISPLAY_NAME]
# except Exception:
# logger.exception('Could not parse Habitica user with Facebook auth: ' + str(user))
#
# elif default.JSON_GOOGLE in auth_details and auth_details[default.JSON_GOOGLE]:
# # User is authenticated with google
# try:
# auth_google = auth_details[default.JSON_GOOGLE]
# user_details[default.JSON_NAME] = auth_google[default.JSON_DISPLAY_NAME]
# user_details[default.JSON_EMAIL] = auth_google[default.JSON_GOOGLE_EMAILS][0][default.JSON_VALUE]
# except Exception:
# logger.exception('Could not parse Habitica user with Google auth: ' + str(user))
#
# else:
# # No valid authentication provider found
# logger.error('No valid Habitica auth provider found: ' + str(user))
#
# return user_details
#
# def get_habits(self):
# return self.user.tasks(type='habits')
#
# def get_habits_list_choices(self):
# """
# Returns a tuple with the available habits (possibly empty) and a boolean indicating the success of the api request.
# """
#
# habits = self.get_habits()
#
# if habits is None:
# return [], False
#
# choices = [(l['id'], l['text']) for l in habits]
# choices = sorted(choices, key=lambda x: x[1])
# return choices, True
#
# def post_task(self, task_id, up=True):
# """
# Up- or down-scores a task specified by the task_id.
# """
#
# if up:
# score = default.JSON_UP
# else:
# score = default.JSON_DOWN
#
# return self.user.tasks(_id=task_id, _direction=score, _method='post')
#
# def test_auth(self):
# """
# Tests whether the authentication credentials work or not.
# """
#
# user = self.get_user()
# if user:
# return True
# else:
# return False
. Output only the next line. | model = Habitica |
Next line prediction: <|code_start|>
logger = logging.getLogger('wunderhabit')
class AuthForm(forms.ModelForm):
"""
Form to enter and validate the Habitica authentication credentials.
"""
AUTH_ERROR = _('Could not authenticate to Habitica. Please check the User '
'ID and the API Token.')
HABITICA_ERROR = _('Something went wrong while loading Habitica user data.'
' Please try again or contact the admin.')
class Meta:
model = Habitica
fields = ['user_id', 'api_token']
def clean(self):
cleaned_data = super(AuthForm, self).clean()
<|code_end|>
. Use current file imports:
(import logging
from django import forms
from django.utils.translation import ugettext_lazy as _
from . import default
from .models import Habitica
from .api import HabiticaApi)
and context including class names, function names, or small code snippets from other files:
# Path: wh_habitica/models.py
# class Habitica(models.Model):
# """
# Habitica API Endpoint
# """
#
# user_id = models.CharField(_('User ID'), max_length=255, blank=True)
# name = models.CharField(_('Name'), max_length=255, blank=True)
# email = models.EmailField(_('Email'), blank=True)
# api_token = models.CharField(_('API Token'), max_length=255, blank=True)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='habitica')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Habitica API object.
# """
#
# return HabiticaApi(self.user_id, self.api_token)
#
# class Meta:
# db_table = 'habitica_habitica'
#
# Path: wh_habitica/api.py
# class HabiticaApi(hlib.api.Habitica):
# def __init__(self, user_id, api_token):
# headers = {
# default.AUTH_URL: default.API_HOST,
# default.AUTH_HEADER_CLIENT: user_id,
# default.AUTH_HEADER_TOKEN: api_token
# }
# super(HabiticaApi, self).__init__(auth=headers)
#
# def get_status(self):
# """
# Returns the Habitica server status.
# """
#
# try:
# return self.status()[default.JSON_STATUS] == default.JSON_UP
# except Exception:
# logger.exception('Could not get Habitica status.')
# return False
#
# def get_user(self):
# """
# Returns the full user object.
# """
#
# try:
# user = self.user()
# except Exception as e:
# logger.exception('Could not load Habitica user: ' + str(e))
# return None
#
# return user
#
# def get_user_details(self):
# """
# Parses the user details like the username or email from the Habitica user object.
# """
#
# user = self.get_user()
# if not user or default.JSON_AUTH not in user:
# return None
#
# auth_details = user[default.JSON_AUTH]
# user_details = dict()
#
# # Parse user id
# try:
# user_details[default.JSON_ID] = user[default.JSON_ID]
# except Exception:
# logger.exception('Could not find Habitica user id: ' + str(user))
#
# # Parse user details
# if default.JSON_LOCAL in auth_details and auth_details[default.JSON_LOCAL]:
# # User is authenticated with Habitica account
# try:
# auth_local = auth_details[default.JSON_LOCAL]
# user_details[default.JSON_EMAIL] = auth_local[default.JSON_EMAIL]
# user_details[default.JSON_NAME] = auth_local[default.JSON_USERNAME]
# except Exception:
# logger.exception('Could not parse Habitica user with local auth: ' + str(user))
#
# elif default.JSON_FACEBOOK in auth_details and auth_details[default.JSON_FACEBOOK]:
# # User is authenticated with facebook
# try:
# auth_facebook = auth_details[default.JSON_FACEBOOK]
# user_details[default.JSON_NAME] = auth_facebook[default.JSON_DISPLAY_NAME]
# except Exception:
# logger.exception('Could not parse Habitica user with Facebook auth: ' + str(user))
#
# elif default.JSON_GOOGLE in auth_details and auth_details[default.JSON_GOOGLE]:
# # User is authenticated with google
# try:
# auth_google = auth_details[default.JSON_GOOGLE]
# user_details[default.JSON_NAME] = auth_google[default.JSON_DISPLAY_NAME]
# user_details[default.JSON_EMAIL] = auth_google[default.JSON_GOOGLE_EMAILS][0][default.JSON_VALUE]
# except Exception:
# logger.exception('Could not parse Habitica user with Google auth: ' + str(user))
#
# else:
# # No valid authentication provider found
# logger.error('No valid Habitica auth provider found: ' + str(user))
#
# return user_details
#
# def get_habits(self):
# return self.user.tasks(type='habits')
#
# def get_habits_list_choices(self):
# """
# Returns a tuple with the available habits (possibly empty) and a boolean indicating the success of the api request.
# """
#
# habits = self.get_habits()
#
# if habits is None:
# return [], False
#
# choices = [(l['id'], l['text']) for l in habits]
# choices = sorted(choices, key=lambda x: x[1])
# return choices, True
#
# def post_task(self, task_id, up=True):
# """
# Up- or down-scores a task specified by the task_id.
# """
#
# if up:
# score = default.JSON_UP
# else:
# score = default.JSON_DOWN
#
# return self.user.tasks(_id=task_id, _direction=score, _method='post')
#
# def test_auth(self):
# """
# Tests whether the authentication credentials work or not.
# """
#
# user = self.get_user()
# if user:
# return True
# else:
# return False
. Output only the next line. | api = HabiticaApi(cleaned_data['user_id'], cleaned_data['api_token']) |
Continue the code snippet: <|code_start|>
USER_DICT = dict(username='tester', email='foo@bar.com')
INVALID_HOOK_TOKEN = '0000aUZ01eJYBhsIIVZotvc0dY9h0000'
def get_invalid_webhook_body():
return {
<|code_end|>
. Use current file imports:
import json
import pytest
from django.core.urlresolvers import reverse
from django.http import Http404
from wunderhabit.tests.utils import get_user
from wunderlist import default
from wunderlist.factories import ConnectionFactory
from wunderlist.views import webhook
from wh_habitica.tests.utils import mock_habitica_api
and context (classes, functions, or code) from other files:
# Path: wunderhabit/tests/utils.py
# def get_user():
# """
# Returns a user which is connected with wunderlist and habitica.
# """
#
# wunderlist = WunderlistFactory.create()
# habitica = HabiticaFactory.create()
# habitica.owner = wunderlist.owner
# habitica.save()
# return wunderlist.owner
#
# Path: wunderlist/default.py
# AUTH_URL = 'https://www.wunderlist.com/oauth/authorize?client_id={client_id}&redirect_uri={url}&state={state}'
# AUTH_POST = 'https://www.wunderlist.com/oauth/access_token'
# AUTH_POST_CLIENT = 'client_id'
# AUTH_POST_SECRET = 'client_secret'
# AUTH_POST_CODE = 'code'
# AUTH_POST_TOKEN = 'access_token'
# AUTH_HEADER_CLIENT = 'X-Client-ID'
# AUTH_HEADER_TOKEN = 'X-Access-Token'
# GET_USER = 'https://a.wunderlist.com/api/v1/user'
# GET_LISTS = 'https://a.wunderlist.com/api/v1/lists'
# GET_WEBHOOKS = 'https://a.wunderlist.com/api/v1/webhooks?list_id={list_id}'
# POST_WEBHOOK = 'https://a.wunderlist.com/api/v1/webhooks'
# DELETE_WEBHOOK = 'https://a.wunderlist.com/api/v1/webhooks/{id}'
# JSON_ID = 'id'
# JSON_USER_ID = 'user_id'
# JSON_NAME = 'name'
# JSON_EMAIL = 'email'
# JSON_CREATED_AT = 'created_at'
# JSON_REVISION = 'revision'
# JSON_LIST_ID = 'list_id'
# JSON_URL = 'url'
# JSON_PROCESSOR_TYPE = 'processor_type'
# JSON_CONFIGURATION = 'configuration'
# JSON_SUBJECT = 'subject'
# JSON_TYPE = 'type'
# JSON_OPERATION = 'operation'
# JSON_BEFORE = 'before'
# JSON_AFTER = 'after'
# JSON_COMPLETED = 'completed'
# JSON_TITLE = 'title'
# OPERATION_CREATE = 'create'
# OPERATION_UPDATE = 'update'
# SUBJECT_TASK = 'task'
# SUBJECT_SUBTASK = 'subtask'
# SUBJECT_LIST = 'list'
#
# Path: wunderlist/factories.py
# class ConnectionFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = Connection
#
# list_id = 42
# token = get_random_string(32)
#
# Path: wunderlist/views.py
# @csrf_exempt
# def webhook(request, hook_id):
# connection = get_object_or_404(Connection, token=hook_id)
#
# if request.method != 'POST':
# return HttpResponse(status=400)
#
# try:
# data = json.loads(request.body)
# operation = data.get(default.JSON_OPERATION)
# user_id = int(data.get(default.JSON_USER_ID))
# subject = data.get(default.JSON_SUBJECT)
# subject_type = subject.get(default.JSON_TYPE)
# except Exception:
# return HttpResponse(status=400)
#
# # Check if connection is active
# if not connection.is_active:
# return HttpResponse(status=410)
#
# # Find Wunderlist user
# try:
# wunderlist = Wunderlist.objects.get(user_id=user_id)
# except ObjectDoesNotExist:
# return HttpResponse(status=401)
#
# # Validate user
# user = connection.owner
# if not user or not wunderlist or user != wunderlist.owner:
# return HttpResponse(status=401)
#
# # Check if user is active
# if not user.is_active:
# return HttpResponse(status=403)
#
# # Check if a task or subtask has been added to the list
# if operation == default.OPERATION_CREATE:
# # New task has been added to list
# return HttpResponse(status=200)
#
# # Check if a task has been updated (includes completion)
# if operation == default.OPERATION_UPDATE:
# try:
# before = data.get(default.JSON_BEFORE)
# before_completed = before.get(default.JSON_COMPLETED, False)
# after = data.get(default.JSON_AFTER)
# after_completed = after.get(default.JSON_COMPLETED, False)
# except Exception:
# return HttpResponse(status=400)
#
# if not before_completed and after_completed:
# if subject_type == default.SUBJECT_TASK:
# # Task has been completed
# connection.score_up()
# return HttpResponse(status=200)
#
# elif subject_type == default.SUBJECT_SUBTASK:
# # Subtask has been completed
# connection.score_up()
# return HttpResponse(status=200)
#
# return HttpResponse(status=400)
#
# Path: wh_habitica/tests/utils.py
# @pytest.fixture
# def mock_habitica_api(monkeypatch):
# monkeypatch.setattr('habitica.api.Habitica.status', lambda x: API_STATUS_UP, raising=False)
# monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER, raising=False)
# monkeypatch.setattr('wh_habitica.api.HabiticaApi.post_task', lambda x, task_id: API_TASK, raising=False)
. Output only the next line. | default.JSON_OPERATION: default.OPERATION_CREATE, |
Given the code snippet: <|code_start|>
class WunderlistFactory(factory.django.DjangoModelFactory):
class Meta:
model = Wunderlist
user_id = factory.Sequence(lambda n: "%d" % n)
name = 'John'
email = 'john@doe.com'
api_token = get_random_string(32)
owner = factory.SubFactory(UserFactory)
class ConnectionFactory(factory.django.DjangoModelFactory):
class Meta:
<|code_end|>
, generate the next line using the imports in this file:
import factory
from django.utils.crypto import get_random_string
from .models import Connection, Wunderlist
from wunderhabit.factories import UserFactory
and context (functions, classes, or occasionally code) from other files:
# Path: wunderlist/models.py
# class Connection(models.Model):
# """
# Connects a Wunderlist list with a Habitica habit.
# """
#
# list_id = models.IntegerField(_('List ID'))
# list_title = models.CharField(_('List Title'), max_length=255, blank=True)
# webhook_id = models.IntegerField(_('Webhook ID'), default=-1, blank=True)
# habit_id = models.CharField(_('Habit ID'), max_length=255, blank=True, null=True)
# habit_title = models.CharField(_('Habit Title'), max_length=255, blank=True, null=True)
# token = models.CharField(_('Token'), max_length=255)
# owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='connections', blank=True, null=True)
# tasks_completed = models.IntegerField(_('Tasks completed'), default=0)
# last_upscored = models.DateTimeField(_('Last Up-Scored'), default=None, blank=True, null=True)
# is_active = models.BooleanField(_('Is active'), default=True)
#
# created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
# modified_at = models.DateTimeField(_('Modified at'), auto_now=True)
#
# @property
# def webhook_url(self):
# """
# Returns the absolute URL which is callable by a webhook.
# """
#
# return settings.WEBHOOK_BASE_URL + reverse('wunderlist:webhook', kwargs={'hook_id': self.token})
#
# def score_up(self):
# api = HabiticaApi(self.owner.habitica.user_id, self.owner.habitica.api_token)
# result = api.post_task(self.habit_id)
# self.tasks_completed += 1
# self.last_upscored = timezone.now()
# self.save()
# return result
#
# def create_webhook(self):
# """
# Sends a request to Wunderlist to create a webhook and stores its id.
# """
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# webhook = api.create_webhook(self.list_id, self.webhook_url)
# if webhook:
# self.webhook_id = webhook[default.JSON_ID]
# self.save()
# return webhook
#
# def delete_webhook(self):
# """
# Sends a request to Wunderlist to delete the webhook.
# """
#
# if self.webhook_id < 0:
# return None
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# return api.delete_webhook(self.webhook_id)
#
# def deactivate(self):
# """
# Deletes the webhook and and anonymizes the connection such that the number of completed tasks does not get lost.
# """
#
# self.delete_webhook()
# self.list_title = ''
# self.habit_id = ''
# self.habit_title = ''
# self.owner = None
# self.is_active = False
# self.save()
#
# def delete(self, *args, **kwargs):
# """
# Overrides the delete method to delete the webhook first.
# """
#
# # Delete the webhook
# print(self.delete_webhook())
#
# # Delete the connection
# super(Connection, self).delete(*args, **kwargs)
#
# class Wunderlist(models.Model):
# """
# Wunderlist API Endpoint
# """
#
# user_id = models.IntegerField(_('User ID'))
# name = models.CharField(_('Name'), max_length=255)
# email = models.EmailField(_('Email'))
# api_token = models.CharField(_('API Token'), max_length=255)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='wunderlist')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Wunderlist API object.
# """
#
# return WunderlistApi(self.api_token)
#
# Path: wunderhabit/factories.py
# class UserFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = get_user_model()
#
# username = factory.Sequence(lambda n: "user_%d" % n)
# first_name = 'John'
# last_name = 'Doe'
. Output only the next line. | model = Connection |
Here is a snippet: <|code_start|>
class WunderlistFactory(factory.django.DjangoModelFactory):
class Meta:
model = Wunderlist
user_id = factory.Sequence(lambda n: "%d" % n)
name = 'John'
email = 'john@doe.com'
api_token = get_random_string(32)
<|code_end|>
. Write the next line using the current file imports:
import factory
from django.utils.crypto import get_random_string
from .models import Connection, Wunderlist
from wunderhabit.factories import UserFactory
and context from other files:
# Path: wunderlist/models.py
# class Connection(models.Model):
# """
# Connects a Wunderlist list with a Habitica habit.
# """
#
# list_id = models.IntegerField(_('List ID'))
# list_title = models.CharField(_('List Title'), max_length=255, blank=True)
# webhook_id = models.IntegerField(_('Webhook ID'), default=-1, blank=True)
# habit_id = models.CharField(_('Habit ID'), max_length=255, blank=True, null=True)
# habit_title = models.CharField(_('Habit Title'), max_length=255, blank=True, null=True)
# token = models.CharField(_('Token'), max_length=255)
# owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='connections', blank=True, null=True)
# tasks_completed = models.IntegerField(_('Tasks completed'), default=0)
# last_upscored = models.DateTimeField(_('Last Up-Scored'), default=None, blank=True, null=True)
# is_active = models.BooleanField(_('Is active'), default=True)
#
# created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
# modified_at = models.DateTimeField(_('Modified at'), auto_now=True)
#
# @property
# def webhook_url(self):
# """
# Returns the absolute URL which is callable by a webhook.
# """
#
# return settings.WEBHOOK_BASE_URL + reverse('wunderlist:webhook', kwargs={'hook_id': self.token})
#
# def score_up(self):
# api = HabiticaApi(self.owner.habitica.user_id, self.owner.habitica.api_token)
# result = api.post_task(self.habit_id)
# self.tasks_completed += 1
# self.last_upscored = timezone.now()
# self.save()
# return result
#
# def create_webhook(self):
# """
# Sends a request to Wunderlist to create a webhook and stores its id.
# """
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# webhook = api.create_webhook(self.list_id, self.webhook_url)
# if webhook:
# self.webhook_id = webhook[default.JSON_ID]
# self.save()
# return webhook
#
# def delete_webhook(self):
# """
# Sends a request to Wunderlist to delete the webhook.
# """
#
# if self.webhook_id < 0:
# return None
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# return api.delete_webhook(self.webhook_id)
#
# def deactivate(self):
# """
# Deletes the webhook and and anonymizes the connection such that the number of completed tasks does not get lost.
# """
#
# self.delete_webhook()
# self.list_title = ''
# self.habit_id = ''
# self.habit_title = ''
# self.owner = None
# self.is_active = False
# self.save()
#
# def delete(self, *args, **kwargs):
# """
# Overrides the delete method to delete the webhook first.
# """
#
# # Delete the webhook
# print(self.delete_webhook())
#
# # Delete the connection
# super(Connection, self).delete(*args, **kwargs)
#
# class Wunderlist(models.Model):
# """
# Wunderlist API Endpoint
# """
#
# user_id = models.IntegerField(_('User ID'))
# name = models.CharField(_('Name'), max_length=255)
# email = models.EmailField(_('Email'))
# api_token = models.CharField(_('API Token'), max_length=255)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='wunderlist')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Wunderlist API object.
# """
#
# return WunderlistApi(self.api_token)
#
# Path: wunderhabit/factories.py
# class UserFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = get_user_model()
#
# username = factory.Sequence(lambda n: "user_%d" % n)
# first_name = 'John'
# last_name = 'Doe'
, which may include functions, classes, or code. Output only the next line. | owner = factory.SubFactory(UserFactory) |
Using the snippet: <|code_start|>
@pytest.mark.usefixtures('mock_messages', 'mock_wunderlist_api', 'mock_habitica_api')
@pytest.mark.django_db
def test_successfully_authenticated(rf):
request = rf.get(reverse('test_authentication'))
request.user = get_user()
<|code_end|>
, determine the next line of code. You have imports:
import pytest
from django.core.urlresolvers import reverse
from wunderhabit import views
from wunderhabit import default
from .utils import get_user
from .utils import mock_messages
from wunderlist.tests.utils import mock_wunderlist_api
from wh_habitica.tests.utils import mock_habitica_api
and context (class names, function names, or code) available:
# Path: wunderhabit/views.py
# def index(request):
# def dashboard(request):
# def logout_view(request):
# def delete_account(request):
# def add_connection(request):
# def delete_connection(request, connection_id):
# def test_authentication(request):
#
# Path: wunderhabit/default.py
# MESSAGE_CREATED_CONNECTION = _('Created new Connection!')
# MESSAGE_CREATE_CONNECTION_ERROR = _('Could not create Connection.')
# MESSAGE_DELETED_CONNECTION = _('Deleted the Connection!')
# MESSAGE_LOGGED_OUT = _('Successfully logged out.')
# MESSAGE_AUTH_SUCCESS = _('Successfully connected with Wunderlist and Habitica.')
# MESSAGE_DELETED_ACCOUNT = _('Your account has successfully been deleted.')
#
# Path: wunderhabit/tests/utils.py
# def get_user():
# """
# Returns a user which is connected with wunderlist and habitica.
# """
#
# wunderlist = WunderlistFactory.create()
# habitica = HabiticaFactory.create()
# habitica.owner = wunderlist.owner
# habitica.save()
# return wunderlist.owner
#
# Path: wunderhabit/tests/utils.py
# @pytest.fixture(autouse=True)
# def mock_messages(monkeypatch):
# monkeypatch.setattr('django.http.HttpRequest._messages', MockMessages(), raising=False)
#
# Path: wunderlist/tests/utils.py
# @pytest.fixture(autouse=True)
# def mock_wunderlist_api(monkeypatch):
# monkeypatch.setattr('wunderlist.api.WunderlistApi.get_user', lambda x: API_USER, raising=False)
# monkeypatch.setattr('wunderlist.api.WunderlistApi.get_lists', lambda x: API_LISTS, raising=False)
# monkeypatch.setattr('wunderlist.api.WunderlistApi.test_auth', lambda x: True, raising=False)
#
# Path: wh_habitica/tests/utils.py
# @pytest.fixture
# def mock_habitica_api(monkeypatch):
# monkeypatch.setattr('habitica.api.Habitica.status', lambda x: API_STATUS_UP, raising=False)
# monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER, raising=False)
# monkeypatch.setattr('wh_habitica.api.HabiticaApi.post_task', lambda x, task_id: API_TASK, raising=False)
. Output only the next line. | response = views.test_authentication(request) |
Predict the next line after this snippet: <|code_start|>
@pytest.mark.usefixtures('mock_messages', 'mock_wunderlist_api', 'mock_habitica_api')
@pytest.mark.django_db
def test_successfully_authenticated(rf):
request = rf.get(reverse('test_authentication'))
request.user = get_user()
response = views.test_authentication(request)
<|code_end|>
using the current file's imports:
import pytest
from django.core.urlresolvers import reverse
from wunderhabit import views
from wunderhabit import default
from .utils import get_user
from .utils import mock_messages
from wunderlist.tests.utils import mock_wunderlist_api
from wh_habitica.tests.utils import mock_habitica_api
and any relevant context from other files:
# Path: wunderhabit/views.py
# def index(request):
# def dashboard(request):
# def logout_view(request):
# def delete_account(request):
# def add_connection(request):
# def delete_connection(request, connection_id):
# def test_authentication(request):
#
# Path: wunderhabit/default.py
# MESSAGE_CREATED_CONNECTION = _('Created new Connection!')
# MESSAGE_CREATE_CONNECTION_ERROR = _('Could not create Connection.')
# MESSAGE_DELETED_CONNECTION = _('Deleted the Connection!')
# MESSAGE_LOGGED_OUT = _('Successfully logged out.')
# MESSAGE_AUTH_SUCCESS = _('Successfully connected with Wunderlist and Habitica.')
# MESSAGE_DELETED_ACCOUNT = _('Your account has successfully been deleted.')
#
# Path: wunderhabit/tests/utils.py
# def get_user():
# """
# Returns a user which is connected with wunderlist and habitica.
# """
#
# wunderlist = WunderlistFactory.create()
# habitica = HabiticaFactory.create()
# habitica.owner = wunderlist.owner
# habitica.save()
# return wunderlist.owner
#
# Path: wunderhabit/tests/utils.py
# @pytest.fixture(autouse=True)
# def mock_messages(monkeypatch):
# monkeypatch.setattr('django.http.HttpRequest._messages', MockMessages(), raising=False)
#
# Path: wunderlist/tests/utils.py
# @pytest.fixture(autouse=True)
# def mock_wunderlist_api(monkeypatch):
# monkeypatch.setattr('wunderlist.api.WunderlistApi.get_user', lambda x: API_USER, raising=False)
# monkeypatch.setattr('wunderlist.api.WunderlistApi.get_lists', lambda x: API_LISTS, raising=False)
# monkeypatch.setattr('wunderlist.api.WunderlistApi.test_auth', lambda x: True, raising=False)
#
# Path: wh_habitica/tests/utils.py
# @pytest.fixture
# def mock_habitica_api(monkeypatch):
# monkeypatch.setattr('habitica.api.Habitica.status', lambda x: API_STATUS_UP, raising=False)
# monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER, raising=False)
# monkeypatch.setattr('wh_habitica.api.HabiticaApi.post_task', lambda x, task_id: API_TASK, raising=False)
. Output only the next line. | assert request._messages.messages[0] == default.MESSAGE_AUTH_SUCCESS |
Next line prediction: <|code_start|>
@pytest.mark.usefixtures('mock_messages', 'mock_wunderlist_api', 'mock_habitica_api')
@pytest.mark.django_db
def test_successfully_authenticated(rf):
request = rf.get(reverse('test_authentication'))
<|code_end|>
. Use current file imports:
(import pytest
from django.core.urlresolvers import reverse
from wunderhabit import views
from wunderhabit import default
from .utils import get_user
from .utils import mock_messages
from wunderlist.tests.utils import mock_wunderlist_api
from wh_habitica.tests.utils import mock_habitica_api)
and context including class names, function names, or small code snippets from other files:
# Path: wunderhabit/views.py
# def index(request):
# def dashboard(request):
# def logout_view(request):
# def delete_account(request):
# def add_connection(request):
# def delete_connection(request, connection_id):
# def test_authentication(request):
#
# Path: wunderhabit/default.py
# MESSAGE_CREATED_CONNECTION = _('Created new Connection!')
# MESSAGE_CREATE_CONNECTION_ERROR = _('Could not create Connection.')
# MESSAGE_DELETED_CONNECTION = _('Deleted the Connection!')
# MESSAGE_LOGGED_OUT = _('Successfully logged out.')
# MESSAGE_AUTH_SUCCESS = _('Successfully connected with Wunderlist and Habitica.')
# MESSAGE_DELETED_ACCOUNT = _('Your account has successfully been deleted.')
#
# Path: wunderhabit/tests/utils.py
# def get_user():
# """
# Returns a user which is connected with wunderlist and habitica.
# """
#
# wunderlist = WunderlistFactory.create()
# habitica = HabiticaFactory.create()
# habitica.owner = wunderlist.owner
# habitica.save()
# return wunderlist.owner
#
# Path: wunderhabit/tests/utils.py
# @pytest.fixture(autouse=True)
# def mock_messages(monkeypatch):
# monkeypatch.setattr('django.http.HttpRequest._messages', MockMessages(), raising=False)
#
# Path: wunderlist/tests/utils.py
# @pytest.fixture(autouse=True)
# def mock_wunderlist_api(monkeypatch):
# monkeypatch.setattr('wunderlist.api.WunderlistApi.get_user', lambda x: API_USER, raising=False)
# monkeypatch.setattr('wunderlist.api.WunderlistApi.get_lists', lambda x: API_LISTS, raising=False)
# monkeypatch.setattr('wunderlist.api.WunderlistApi.test_auth', lambda x: True, raising=False)
#
# Path: wh_habitica/tests/utils.py
# @pytest.fixture
# def mock_habitica_api(monkeypatch):
# monkeypatch.setattr('habitica.api.Habitica.status', lambda x: API_STATUS_UP, raising=False)
# monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER, raising=False)
# monkeypatch.setattr('wh_habitica.api.HabiticaApi.post_task', lambda x, task_id: API_TASK, raising=False)
. Output only the next line. | request.user = get_user() |
Continue the code snippet: <|code_start|>
@login_required
@has_wunderlist
def auth(request):
"""
Authenticates the user to Habitica.
"""
user = request.user
if hasattr(user, 'habitica'):
return redirect('index')
<|code_end|>
. Use current file imports:
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from .forms import AuthForm
from .decorators import has_habitica
from wunderlist.decorators import has_wunderlist
and context (classes, functions, or code) from other files:
# Path: wh_habitica/forms.py
# class AuthForm(forms.ModelForm):
# """
# Form to enter and validate the Habitica authentication credentials.
# """
#
# AUTH_ERROR = _('Could not authenticate to Habitica. Please check the User '
# 'ID and the API Token.')
# HABITICA_ERROR = _('Something went wrong while loading Habitica user data.'
# ' Please try again or contact the admin.')
#
# class Meta:
# model = Habitica
# fields = ['user_id', 'api_token']
#
# def clean(self):
# cleaned_data = super(AuthForm, self).clean()
#
# api = HabiticaApi(cleaned_data['user_id'], cleaned_data['api_token'])
# user_details = api.get_user_details()
#
# # Validate authentication
# if not user_details or cleaned_data['user_id'] != user_details[default.JSON_ID]:
# raise forms.ValidationError(self.AUTH_ERROR)
#
# # Get optional user details
# try:
# self.instance.name = user_details[default.JSON_NAME]
# self.instance.email = user_details[default.JSON_EMAIL]
# except (ValueError, KeyError):
# logger.exception('Could not get user details: %s', str(user_details))
#
# return cleaned_data
#
# Path: wh_habitica/decorators.py
# def has_habitica(view_func):
# """
# Checks if the current user is connected to Habitica.
# If not, the user is redirected to the Habitica connect page.
# """
#
# def _wrapped_view_func(request, *args, **kwargs):
# user = request.user
# if not hasattr(user, 'habitica'):
# return redirect('habitica:index')
# if not user.habitica.api_token:
# return redirect('habitica:index')
# if not user.habitica.user_id:
# return redirect('habitica:index')
# return view_func(request, *args, **kwargs)
# return _wrapped_view_func
#
# Path: wunderlist/decorators.py
# def has_wunderlist(view_func):
# """
# Checks if the current user is connected to Wunderlist.
# If not, the user is redirected to the Wunderlist connect page.
# """
#
# def _wrapped_view_func(request, *args, **kwargs):
# user = request.user
# if not hasattr(user, 'wunderlist'):
# return redirect('index')
# if not user.wunderlist.api_token:
# return redirect('index')
# if not user.wunderlist.user_id:
# return redirect('index')
# if not user.wunderlist.email:
# return redirect('index')
# return view_func(request, *args, **kwargs)
# return _wrapped_view_func
. Output only the next line. | form = AuthForm() |
Given the code snippet: <|code_start|>
@login_required
@has_wunderlist
def auth(request):
"""
Authenticates the user to Habitica.
"""
user = request.user
if hasattr(user, 'habitica'):
return redirect('index')
form = AuthForm()
if request.method == 'POST':
form = AuthForm(request.POST)
if form.is_valid():
habitica = form.save(commit=False)
habitica.owner = user
habitica.save()
messages.success(request, _('Successfully connected with Habitica.'))
return redirect('index')
return render(request, 'habitica/index.html', {'form': form})
@login_required
@has_wunderlist
<|code_end|>
, generate the next line using the imports in this file:
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from .forms import AuthForm
from .decorators import has_habitica
from wunderlist.decorators import has_wunderlist
and context (functions, classes, or occasionally code) from other files:
# Path: wh_habitica/forms.py
# class AuthForm(forms.ModelForm):
# """
# Form to enter and validate the Habitica authentication credentials.
# """
#
# AUTH_ERROR = _('Could not authenticate to Habitica. Please check the User '
# 'ID and the API Token.')
# HABITICA_ERROR = _('Something went wrong while loading Habitica user data.'
# ' Please try again or contact the admin.')
#
# class Meta:
# model = Habitica
# fields = ['user_id', 'api_token']
#
# def clean(self):
# cleaned_data = super(AuthForm, self).clean()
#
# api = HabiticaApi(cleaned_data['user_id'], cleaned_data['api_token'])
# user_details = api.get_user_details()
#
# # Validate authentication
# if not user_details or cleaned_data['user_id'] != user_details[default.JSON_ID]:
# raise forms.ValidationError(self.AUTH_ERROR)
#
# # Get optional user details
# try:
# self.instance.name = user_details[default.JSON_NAME]
# self.instance.email = user_details[default.JSON_EMAIL]
# except (ValueError, KeyError):
# logger.exception('Could not get user details: %s', str(user_details))
#
# return cleaned_data
#
# Path: wh_habitica/decorators.py
# def has_habitica(view_func):
# """
# Checks if the current user is connected to Habitica.
# If not, the user is redirected to the Habitica connect page.
# """
#
# def _wrapped_view_func(request, *args, **kwargs):
# user = request.user
# if not hasattr(user, 'habitica'):
# return redirect('habitica:index')
# if not user.habitica.api_token:
# return redirect('habitica:index')
# if not user.habitica.user_id:
# return redirect('habitica:index')
# return view_func(request, *args, **kwargs)
# return _wrapped_view_func
#
# Path: wunderlist/decorators.py
# def has_wunderlist(view_func):
# """
# Checks if the current user is connected to Wunderlist.
# If not, the user is redirected to the Wunderlist connect page.
# """
#
# def _wrapped_view_func(request, *args, **kwargs):
# user = request.user
# if not hasattr(user, 'wunderlist'):
# return redirect('index')
# if not user.wunderlist.api_token:
# return redirect('index')
# if not user.wunderlist.user_id:
# return redirect('index')
# if not user.wunderlist.email:
# return redirect('index')
# return view_func(request, *args, **kwargs)
# return _wrapped_view_func
. Output only the next line. | @has_habitica |
Predict the next line for this snippet: <|code_start|>
class WunderlistAdmin(admin.ModelAdmin):
fieldsets = [
(
'Wunderlist',
{'fields': ['user_id', 'name', 'email', 'api_token', 'owner', 'created_at', 'modified_at']}
)
]
readonly_fields = ['created_at', 'modified_at']
list_display = ['user_id', 'name', 'email', 'owner', 'created_at', 'modified_at']
class ConnectionAdmin(admin.ModelAdmin):
fieldsets = [
(
'Connection',
{'fields': ['list_id', 'list_title', 'habit_id', 'habit_title', 'token', 'webhook_id', 'owner']}
),
(
'Activity',
{'fields': ['is_active', 'tasks_completed', 'last_upscored', 'created_at', 'modified_at']}
)
]
readonly_fields = ['list_id', 'list_title', 'token', 'webhook_id', 'created_at', 'modified_at']
list_display = ['list_id', 'list_title', 'habit_id', 'habit_title', 'token', 'is_active', 'owner', 'tasks_completed', 'created_at', 'modified_at']
<|code_end|>
with the help of current file imports:
from django.contrib import admin
from .models import Wunderlist, Connection
and context from other files:
# Path: wunderlist/models.py
# class Wunderlist(models.Model):
# """
# Wunderlist API Endpoint
# """
#
# user_id = models.IntegerField(_('User ID'))
# name = models.CharField(_('Name'), max_length=255)
# email = models.EmailField(_('Email'))
# api_token = models.CharField(_('API Token'), max_length=255)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='wunderlist')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Wunderlist API object.
# """
#
# return WunderlistApi(self.api_token)
#
# class Connection(models.Model):
# """
# Connects a Wunderlist list with a Habitica habit.
# """
#
# list_id = models.IntegerField(_('List ID'))
# list_title = models.CharField(_('List Title'), max_length=255, blank=True)
# webhook_id = models.IntegerField(_('Webhook ID'), default=-1, blank=True)
# habit_id = models.CharField(_('Habit ID'), max_length=255, blank=True, null=True)
# habit_title = models.CharField(_('Habit Title'), max_length=255, blank=True, null=True)
# token = models.CharField(_('Token'), max_length=255)
# owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='connections', blank=True, null=True)
# tasks_completed = models.IntegerField(_('Tasks completed'), default=0)
# last_upscored = models.DateTimeField(_('Last Up-Scored'), default=None, blank=True, null=True)
# is_active = models.BooleanField(_('Is active'), default=True)
#
# created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
# modified_at = models.DateTimeField(_('Modified at'), auto_now=True)
#
# @property
# def webhook_url(self):
# """
# Returns the absolute URL which is callable by a webhook.
# """
#
# return settings.WEBHOOK_BASE_URL + reverse('wunderlist:webhook', kwargs={'hook_id': self.token})
#
# def score_up(self):
# api = HabiticaApi(self.owner.habitica.user_id, self.owner.habitica.api_token)
# result = api.post_task(self.habit_id)
# self.tasks_completed += 1
# self.last_upscored = timezone.now()
# self.save()
# return result
#
# def create_webhook(self):
# """
# Sends a request to Wunderlist to create a webhook and stores its id.
# """
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# webhook = api.create_webhook(self.list_id, self.webhook_url)
# if webhook:
# self.webhook_id = webhook[default.JSON_ID]
# self.save()
# return webhook
#
# def delete_webhook(self):
# """
# Sends a request to Wunderlist to delete the webhook.
# """
#
# if self.webhook_id < 0:
# return None
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# return api.delete_webhook(self.webhook_id)
#
# def deactivate(self):
# """
# Deletes the webhook and and anonymizes the connection such that the number of completed tasks does not get lost.
# """
#
# self.delete_webhook()
# self.list_title = ''
# self.habit_id = ''
# self.habit_title = ''
# self.owner = None
# self.is_active = False
# self.save()
#
# def delete(self, *args, **kwargs):
# """
# Overrides the delete method to delete the webhook first.
# """
#
# # Delete the webhook
# print(self.delete_webhook())
#
# # Delete the connection
# super(Connection, self).delete(*args, **kwargs)
, which may contain function names, class names, or code. Output only the next line. | admin.site.register(Wunderlist, WunderlistAdmin) |
Based on the snippet: <|code_start|>
class WunderlistAdmin(admin.ModelAdmin):
fieldsets = [
(
'Wunderlist',
{'fields': ['user_id', 'name', 'email', 'api_token', 'owner', 'created_at', 'modified_at']}
)
]
readonly_fields = ['created_at', 'modified_at']
list_display = ['user_id', 'name', 'email', 'owner', 'created_at', 'modified_at']
class ConnectionAdmin(admin.ModelAdmin):
fieldsets = [
(
'Connection',
{'fields': ['list_id', 'list_title', 'habit_id', 'habit_title', 'token', 'webhook_id', 'owner']}
),
(
'Activity',
{'fields': ['is_active', 'tasks_completed', 'last_upscored', 'created_at', 'modified_at']}
)
]
readonly_fields = ['list_id', 'list_title', 'token', 'webhook_id', 'created_at', 'modified_at']
list_display = ['list_id', 'list_title', 'habit_id', 'habit_title', 'token', 'is_active', 'owner', 'tasks_completed', 'created_at', 'modified_at']
admin.site.register(Wunderlist, WunderlistAdmin)
<|code_end|>
, predict the immediate next line with the help of imports:
from django.contrib import admin
from .models import Wunderlist, Connection
and context (classes, functions, sometimes code) from other files:
# Path: wunderlist/models.py
# class Wunderlist(models.Model):
# """
# Wunderlist API Endpoint
# """
#
# user_id = models.IntegerField(_('User ID'))
# name = models.CharField(_('Name'), max_length=255)
# email = models.EmailField(_('Email'))
# api_token = models.CharField(_('API Token'), max_length=255)
# owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='wunderlist')
#
# created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
# modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
#
# def get_api(self):
# """
# Returns the Wunderlist API object.
# """
#
# return WunderlistApi(self.api_token)
#
# class Connection(models.Model):
# """
# Connects a Wunderlist list with a Habitica habit.
# """
#
# list_id = models.IntegerField(_('List ID'))
# list_title = models.CharField(_('List Title'), max_length=255, blank=True)
# webhook_id = models.IntegerField(_('Webhook ID'), default=-1, blank=True)
# habit_id = models.CharField(_('Habit ID'), max_length=255, blank=True, null=True)
# habit_title = models.CharField(_('Habit Title'), max_length=255, blank=True, null=True)
# token = models.CharField(_('Token'), max_length=255)
# owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='connections', blank=True, null=True)
# tasks_completed = models.IntegerField(_('Tasks completed'), default=0)
# last_upscored = models.DateTimeField(_('Last Up-Scored'), default=None, blank=True, null=True)
# is_active = models.BooleanField(_('Is active'), default=True)
#
# created_at = models.DateTimeField(_('Created at'), auto_now_add=True)
# modified_at = models.DateTimeField(_('Modified at'), auto_now=True)
#
# @property
# def webhook_url(self):
# """
# Returns the absolute URL which is callable by a webhook.
# """
#
# return settings.WEBHOOK_BASE_URL + reverse('wunderlist:webhook', kwargs={'hook_id': self.token})
#
# def score_up(self):
# api = HabiticaApi(self.owner.habitica.user_id, self.owner.habitica.api_token)
# result = api.post_task(self.habit_id)
# self.tasks_completed += 1
# self.last_upscored = timezone.now()
# self.save()
# return result
#
# def create_webhook(self):
# """
# Sends a request to Wunderlist to create a webhook and stores its id.
# """
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# webhook = api.create_webhook(self.list_id, self.webhook_url)
# if webhook:
# self.webhook_id = webhook[default.JSON_ID]
# self.save()
# return webhook
#
# def delete_webhook(self):
# """
# Sends a request to Wunderlist to delete the webhook.
# """
#
# if self.webhook_id < 0:
# return None
#
# api = WunderlistApi(self.owner.wunderlist.api_token)
# return api.delete_webhook(self.webhook_id)
#
# def deactivate(self):
# """
# Deletes the webhook and and anonymizes the connection such that the number of completed tasks does not get lost.
# """
#
# self.delete_webhook()
# self.list_title = ''
# self.habit_id = ''
# self.habit_title = ''
# self.owner = None
# self.is_active = False
# self.save()
#
# def delete(self, *args, **kwargs):
# """
# Overrides the delete method to delete the webhook first.
# """
#
# # Delete the webhook
# print(self.delete_webhook())
#
# # Delete the connection
# super(Connection, self).delete(*args, **kwargs)
. Output only the next line. | admin.site.register(Connection, ConnectionAdmin) |
Using the snippet: <|code_start|>
class Habitica(models.Model):
"""
Habitica API Endpoint
"""
user_id = models.CharField(_('User ID'), max_length=255, blank=True)
name = models.CharField(_('Name'), max_length=255, blank=True)
email = models.EmailField(_('Email'), blank=True)
api_token = models.CharField(_('API Token'), max_length=255, blank=True)
owner = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='habitica')
created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
modified_at = models.DateTimeField(_("Modified at"), auto_now=True)
def get_api(self):
"""
Returns the Habitica API object.
"""
<|code_end|>
, determine the next line of code. You have imports:
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .api import HabiticaApi
and context (class names, function names, or code) available:
# Path: wh_habitica/api.py
# class HabiticaApi(hlib.api.Habitica):
# def __init__(self, user_id, api_token):
# headers = {
# default.AUTH_URL: default.API_HOST,
# default.AUTH_HEADER_CLIENT: user_id,
# default.AUTH_HEADER_TOKEN: api_token
# }
# super(HabiticaApi, self).__init__(auth=headers)
#
# def get_status(self):
# """
# Returns the Habitica server status.
# """
#
# try:
# return self.status()[default.JSON_STATUS] == default.JSON_UP
# except Exception:
# logger.exception('Could not get Habitica status.')
# return False
#
# def get_user(self):
# """
# Returns the full user object.
# """
#
# try:
# user = self.user()
# except Exception as e:
# logger.exception('Could not load Habitica user: ' + str(e))
# return None
#
# return user
#
# def get_user_details(self):
# """
# Parses the user details like the username or email from the Habitica user object.
# """
#
# user = self.get_user()
# if not user or default.JSON_AUTH not in user:
# return None
#
# auth_details = user[default.JSON_AUTH]
# user_details = dict()
#
# # Parse user id
# try:
# user_details[default.JSON_ID] = user[default.JSON_ID]
# except Exception:
# logger.exception('Could not find Habitica user id: ' + str(user))
#
# # Parse user details
# if default.JSON_LOCAL in auth_details and auth_details[default.JSON_LOCAL]:
# # User is authenticated with Habitica account
# try:
# auth_local = auth_details[default.JSON_LOCAL]
# user_details[default.JSON_EMAIL] = auth_local[default.JSON_EMAIL]
# user_details[default.JSON_NAME] = auth_local[default.JSON_USERNAME]
# except Exception:
# logger.exception('Could not parse Habitica user with local auth: ' + str(user))
#
# elif default.JSON_FACEBOOK in auth_details and auth_details[default.JSON_FACEBOOK]:
# # User is authenticated with facebook
# try:
# auth_facebook = auth_details[default.JSON_FACEBOOK]
# user_details[default.JSON_NAME] = auth_facebook[default.JSON_DISPLAY_NAME]
# except Exception:
# logger.exception('Could not parse Habitica user with Facebook auth: ' + str(user))
#
# elif default.JSON_GOOGLE in auth_details and auth_details[default.JSON_GOOGLE]:
# # User is authenticated with google
# try:
# auth_google = auth_details[default.JSON_GOOGLE]
# user_details[default.JSON_NAME] = auth_google[default.JSON_DISPLAY_NAME]
# user_details[default.JSON_EMAIL] = auth_google[default.JSON_GOOGLE_EMAILS][0][default.JSON_VALUE]
# except Exception:
# logger.exception('Could not parse Habitica user with Google auth: ' + str(user))
#
# else:
# # No valid authentication provider found
# logger.error('No valid Habitica auth provider found: ' + str(user))
#
# return user_details
#
# def get_habits(self):
# return self.user.tasks(type='habits')
#
# def get_habits_list_choices(self):
# """
# Returns a tuple with the available habits (possibly empty) and a boolean indicating the success of the api request.
# """
#
# habits = self.get_habits()
#
# if habits is None:
# return [], False
#
# choices = [(l['id'], l['text']) for l in habits]
# choices = sorted(choices, key=lambda x: x[1])
# return choices, True
#
# def post_task(self, task_id, up=True):
# """
# Up- or down-scores a task specified by the task_id.
# """
#
# if up:
# score = default.JSON_UP
# else:
# score = default.JSON_DOWN
#
# return self.user.tasks(_id=task_id, _direction=score, _method='post')
#
# def test_auth(self):
# """
# Tests whether the authentication credentials work or not.
# """
#
# user = self.get_user()
# if user:
# return True
# else:
# return False
. Output only the next line. | return HabiticaApi(self.user_id, self.api_token) |
Predict the next line after this snippet: <|code_start|>
admin.autodiscover()
admin.site.site_title = 'PlanB'
admin.site.site_header = 'PlanB management'
urlpatterns = [
#################
# Admin interface
#################
# Use / as the admin path (only if this is the only app in the project)
# (point people to the right url.. fails to work if STATIC_URL is '/')
url(r'^admin(/.*)$', RedirectView.as_view(url='/', permanent=False)),
url(r'^planb/fileset/(?P<fileset_id>\d+)/enqueue/$',
<|code_end|>
using the current file's imports:
from django.contrib import admin
from django.urls import re_path as url
from django.views.generic.base import RedirectView
from .views import EnqueueJob
and any relevant context from other files:
# Path: planb/views.py
# class EnqueueJob(View):
# def post(self, request, fileset_id):
# if not request.user.has_perm('planb.add_backuprun'):
# raise PermissionDenied()
# try:
# # Allow enqueuing disabled filesets for cases where periodic
# # backups are not desired or possible.
# fileset = Fileset.objects.get(id=fileset_id)
# except Fileset.DoesNotExist:
# raise PermissionDenied()
#
# self.enqueue(fileset)
#
# return HttpResponseRedirect(
# # Our URL is /bla/bla/123/enqueue/.
# # Drop the "enqueue/".
# # FIXME: Should use proper reverse() instead!
# self.request.path_info.rsplit('/', 2)[0] + '/')
#
# def enqueue(self, fileset):
# if fileset.is_queued or fileset.is_running:
# messages.add_message(
# self.request, messages.ERROR,
# 'Job was already queued/running!')
# return False
#
# # Spawn a single run.
# Fileset.objects.filter(pk=fileset.pk).update(is_queued=True)
# task_id = async_backup_job(fileset)
# messages.add_message(
# self.request, messages.INFO,
# 'Spawned job %s as requested.' % (task_id,))
. Output only the next line. | EnqueueJob.as_view(), name='enqueue'), |
Here is a snippet: <|code_start|> """
if value is None:
return None
value = str(value)
lines = [line.rstrip() for line in value.split('\n')]
lines = [' ' + line if line else '' for line in lines]
return '\n'.join(lines)
@register.filter(is_safe=False)
def replaceany(value, token):
"""
Replace any/all characters with the supplied token.
"""
if value is None:
return None
value = str(value)
return str(token) * len(value)
@register.filter(is_safe=False)
def formatseconds(value):
"""
Format seconds as hours/minutes/seconds.
"""
if value is None:
return None
<|code_end|>
. Write the next line using the current file imports:
from django.template.library import Library
from planb.common import human
and context from other files:
# Path: planb/common/human.py
# BYTE_UNITS = (
# ('{:.1f} KB', 1 << 10),
# ('{:.1f} MB', 1 << 20),
# ('{:.1f} GB', 1 << 30),
# ('{:.1f} TB', 1 << 40),
# ('{:.1f} PB', 1 << 50),
# )
# def bytes(bytes_):
# def seconds(seconds_):
, which may include functions, classes, or code. Output only the next line. | return human.seconds(value) |
Given snippet: <|code_start|>
class ConfigAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': (
'fileset', 'transport_command',
)}),
('Advanced options', {'fields': (
'can_create_snapshot', 'can_rotate_snapshot',
)}),
)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.contrib import admin
from django.forms import modelform_factory
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from planb.forms import FilesetRefForm
from .models import Config
and context:
# Path: planb/forms.py
# class FilesetRefForm(forms.ModelForm):
# """
# Generate FilesetRefForm tailored to the supplied class; so sorting
# of the Filesets works.
#
# Use in your admin class. For example:
#
# from django.forms import modelform_factory
# from planb.forms import FilesetRefForm
#
# class MyModel(models.Model):
# fileset = models.OneToOneField(Fileset)
#
# class MyModelAdmin(admin.ModelAdmin):
# form = modelform_factory(MyModel, form=FilesetRefForm)
# """
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
#
# if 'fileset' in self.fields:
# # Order.
# self.fields['fileset'].queryset = (
# self.fields['fileset'].queryset.order_by('friendly_name'))
#
# # Get IDs of used filesets.
# ids = set()
# for transport_class_name in settings.PLANB_TRANSPORTS:
# transport_class = apps.get_model(transport_class_name)
# ids.update(transport_class.objects.values_list(
# 'fileset', flat=True))
#
# # Don't list used filesets.
# # NOTE: This is not a fool-proof way to avoid
# # MultipleObjectsReturned. But it will provide a better
# # interface.
# self.fields['fileset'].queryset = (
# self.fields['fileset'].queryset.exclude(id__in=ids))
#
# class Meta:
# fields = '__all__'
#
# Path: planb/transport_exec/models.py
# class Config(AbstractTransport):
# transport_command = CommandField(help_text=_( # FIXME: add env docs
# 'Program to run to do the transport (data import). It is '
# 'split by spaces and fed to execve(). '
# 'Useful variables are available in the environment.'))
#
# class Meta:
# db_table = TABLE_PREFIX # or '{}_config'.format(TABLE_PREFIX)
#
# def __str__(self):
# return 'exec transport {}'.format(
# self.transport_command.replace(' \\\n', ' '))
#
# def get_change_url(self):
# return reverse('admin:transport_exec_config_change', args=(self.pk,))
#
# def generate_cmd(self):
# # shlex.split() keeps linefeeds in backslash-linefeed combo's.
# # We don't want those. Remove any '\\\n' before we proceed.
# # 'abc \\\n def' => ['abc', '\n', 'def']
# # 'abc\\\ndef' => ['abc', '\ndef']
# cmd = self.transport_command
# no_backslash_cmd = re.sub('([^\\\\])\\\\\n', r'\1', cmd)
# lexed = shlex.split(no_backslash_cmd)
# return lexed
#
# def generate_env(self):
# env = {}
#
# # Don't blindly keep all env. We don't want e.g. PYTHONPATH because it
# # might be some virtual-envy python that has no access to where we want
# # to be.
# keep_env = (
# # Mandatory:
# 'PATH',
# # Nice to have for shell apps:
# 'HOME', 'PWD', 'SHELL', 'USER',
# # #'LANG', 'TZ',
# # Systemd/logging stuff:
# # #'JOURNAL_STREAM', 'LOGNAME', 'INVOCATION_ID',
# )
# for key in keep_env:
# if key in os.environ:
# env[key] = os.environ[key]
#
# # Add our own env.
# env['planb_guid'] = settings.PLANB_GUID
# env['planb_fileset_id'] = str(self.fileset.id)
# env['planb_fileset_friendly_name'] = self.fileset.friendly_name
# env['planb_snapshot_target'] = (
# self.fileset.get_next_snapshot_name())
# env['planb_storage_name'] = (
# self.fileset.get_dataset().name) # XXX! zfs? how do we know?
# env['planb_storage_destination'] = (
# self.fileset.get_dataset().get_data_path())
#
# return env
#
# def run_transport(self):
# # FIXME: duplicate code with transport_rsync.Config.run_transport()
# cmd = self.generate_cmd()
# env = self.generate_env()
# logger.info(
# 'Running %s: %s', self.fileset.friendly_name, argsjoin(cmd))
#
# # Close all DB connections before continuing with the rsync
# # command. Since it may take a while, the connection could get
# # dropped and we'd have issues later on.
# connections.close_all()
#
# stderr = []
# with suspended_signals(SIGHUP, SIGINT, SIGQUIT, SIGTERM):
# try:
# # FIXME: do we want timeout handling here?
# output = check_output(
# cmd, env=env, return_stderr=stderr, preexec_fn=(
# # Disable suspended_signals from parent:
# lambda: sigprocmask(SIG_SETMASK, SIGSET(), 0))
# ).decode('utf-8')
# except CalledProcessError as e:
# logger.warning(
# 'Failure during exec %r: %s', argsjoin(cmd), str(e))
# raise
#
# logger.info(
# 'Exec success for %s transport:\n\n(stdout)\n\n%s\n(stderr)\n\n%s',
# self.fileset.friendly_name, output,
# b'\n'.join(stderr).decode('utf-8', 'replace'))
which might include code, classes, or functions. Output only the next line. | form = modelform_factory(Config, form=FilesetRefForm) |
Given the following code snippet before the placeholder: <|code_start|>
class ConfigAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': (
'fileset', 'transport_command',
)}),
('Advanced options', {'fields': (
'can_create_snapshot', 'can_rotate_snapshot',
)}),
)
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib import admin
from django.forms import modelform_factory
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from planb.forms import FilesetRefForm
from .models import Config
and context including class names, function names, and sometimes code from other files:
# Path: planb/forms.py
# class FilesetRefForm(forms.ModelForm):
# """
# Generate FilesetRefForm tailored to the supplied class; so sorting
# of the Filesets works.
#
# Use in your admin class. For example:
#
# from django.forms import modelform_factory
# from planb.forms import FilesetRefForm
#
# class MyModel(models.Model):
# fileset = models.OneToOneField(Fileset)
#
# class MyModelAdmin(admin.ModelAdmin):
# form = modelform_factory(MyModel, form=FilesetRefForm)
# """
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
#
# if 'fileset' in self.fields:
# # Order.
# self.fields['fileset'].queryset = (
# self.fields['fileset'].queryset.order_by('friendly_name'))
#
# # Get IDs of used filesets.
# ids = set()
# for transport_class_name in settings.PLANB_TRANSPORTS:
# transport_class = apps.get_model(transport_class_name)
# ids.update(transport_class.objects.values_list(
# 'fileset', flat=True))
#
# # Don't list used filesets.
# # NOTE: This is not a fool-proof way to avoid
# # MultipleObjectsReturned. But it will provide a better
# # interface.
# self.fields['fileset'].queryset = (
# self.fields['fileset'].queryset.exclude(id__in=ids))
#
# class Meta:
# fields = '__all__'
#
# Path: planb/transport_exec/models.py
# class Config(AbstractTransport):
# transport_command = CommandField(help_text=_( # FIXME: add env docs
# 'Program to run to do the transport (data import). It is '
# 'split by spaces and fed to execve(). '
# 'Useful variables are available in the environment.'))
#
# class Meta:
# db_table = TABLE_PREFIX # or '{}_config'.format(TABLE_PREFIX)
#
# def __str__(self):
# return 'exec transport {}'.format(
# self.transport_command.replace(' \\\n', ' '))
#
# def get_change_url(self):
# return reverse('admin:transport_exec_config_change', args=(self.pk,))
#
# def generate_cmd(self):
# # shlex.split() keeps linefeeds in backslash-linefeed combo's.
# # We don't want those. Remove any '\\\n' before we proceed.
# # 'abc \\\n def' => ['abc', '\n', 'def']
# # 'abc\\\ndef' => ['abc', '\ndef']
# cmd = self.transport_command
# no_backslash_cmd = re.sub('([^\\\\])\\\\\n', r'\1', cmd)
# lexed = shlex.split(no_backslash_cmd)
# return lexed
#
# def generate_env(self):
# env = {}
#
# # Don't blindly keep all env. We don't want e.g. PYTHONPATH because it
# # might be some virtual-envy python that has no access to where we want
# # to be.
# keep_env = (
# # Mandatory:
# 'PATH',
# # Nice to have for shell apps:
# 'HOME', 'PWD', 'SHELL', 'USER',
# # #'LANG', 'TZ',
# # Systemd/logging stuff:
# # #'JOURNAL_STREAM', 'LOGNAME', 'INVOCATION_ID',
# )
# for key in keep_env:
# if key in os.environ:
# env[key] = os.environ[key]
#
# # Add our own env.
# env['planb_guid'] = settings.PLANB_GUID
# env['planb_fileset_id'] = str(self.fileset.id)
# env['planb_fileset_friendly_name'] = self.fileset.friendly_name
# env['planb_snapshot_target'] = (
# self.fileset.get_next_snapshot_name())
# env['planb_storage_name'] = (
# self.fileset.get_dataset().name) # XXX! zfs? how do we know?
# env['planb_storage_destination'] = (
# self.fileset.get_dataset().get_data_path())
#
# return env
#
# def run_transport(self):
# # FIXME: duplicate code with transport_rsync.Config.run_transport()
# cmd = self.generate_cmd()
# env = self.generate_env()
# logger.info(
# 'Running %s: %s', self.fileset.friendly_name, argsjoin(cmd))
#
# # Close all DB connections before continuing with the rsync
# # command. Since it may take a while, the connection could get
# # dropped and we'd have issues later on.
# connections.close_all()
#
# stderr = []
# with suspended_signals(SIGHUP, SIGINT, SIGQUIT, SIGTERM):
# try:
# # FIXME: do we want timeout handling here?
# output = check_output(
# cmd, env=env, return_stderr=stderr, preexec_fn=(
# # Disable suspended_signals from parent:
# lambda: sigprocmask(SIG_SETMASK, SIGSET(), 0))
# ).decode('utf-8')
# except CalledProcessError as e:
# logger.warning(
# 'Failure during exec %r: %s', argsjoin(cmd), str(e))
# raise
#
# logger.info(
# 'Exec success for %s transport:\n\n(stdout)\n\n%s\n(stderr)\n\n%s',
# self.fileset.friendly_name, output,
# b'\n'.join(stderr).decode('utf-8', 'replace'))
. Output only the next line. | form = modelform_factory(Config, form=FilesetRefForm) |
Continue the code snippet: <|code_start|>SERVER_EMAIL = 'planb@example.com'
EMAIL_SUBJECT_PREFIX = '[PlanB] '
COMPANY_NAME = 'Example Company'
COMPANY_EMAIL = 'support@example.com'
# MySQL config example:
#
# SQL> set names utf8;
# SQL> create database planb;
# SQL> grant all on planb.* to planb identified by 'FIXMEFIXMEFIXME';
DATABASES = {
'default': {
# Choose 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.mysql',
'NAME': 'FIXME', # Or path to database file if using sqlite3.
'USER': 'FIXME', # Not used with sqlite3.
'PASSWORD': 'FIXMEFIXMEFIXME', # Not used with sqlite3.
'HOST': '', # Empty for localhost. Not used with sqlite3.
'PORT': '', # Empty for default. Not used with sqlite3.
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# If you want to log to a local directory instead of the default
# /var/log/planb/ then enable this:
if False:
<|code_end|>
. Use current file imports:
from planb.default_settings import * # noqa
from planb.default_settings import LOGGING # fix flake warning
and context (classes, functions, or code) from other files:
# Path: planb/default_settings.py
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse',
# },
# 'require_debug_true': {
# '()': 'django.utils.log.RequireDebugTrue',
# },
# },
# 'formatters': {
# 'simple': {
# 'format': (
# '%(asctime)s [planb/%(process)5d] '
# '[%(levelname)-3.3s] %(message)s (%(name)s)'),
# },
# 'notime': {
# 'format': '%(name)s - %(levelname)s/%(process)s - %(message)s',
# },
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'logging.NullHandler',
# },
# 'mail_admins_err': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'planb.common.log2.AdminEmailHandler' # django.utils.log
# },
# 'mail_admins_warn': {
# 'level': 'WARNING',
# 'filters': ['require_debug_false'],
# 'class': 'planb.common.log2.AdminEmailHandler' # django.utils.log
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'simple',
# 'filters': ['require_debug_true'],
# },
# # 'gelf': {
# # 'class': 'graypy.GELFHandler',
# # 'host': '10.x.x.x',
# # 'port': 12221,
# # },
# 'logfile': {
# 'level': 'INFO',
# 'class': 'logging.handlers.WatchedFileHandler',
# 'formatter': 'simple',
# 'filename': '/var/log/planb/core.log',
# # Delay, so management commands don't try to open these
# # unless they have to.
# 'delay': True,
# },
# 'djangoqlogfile': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.WatchedFileHandler',
# 'formatter': 'simple',
# 'filename': '/var/log/planb/queue.log',
# # Delay, so management commands don't try to open these
# # unless they have to.
# 'delay': True,
# },
# },
# 'loggers': {
# 'planb': {
# 'handlers': ['console', 'logfile', 'mail_admins_err'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
# '': {
# 'handlers': ['mail_admins_warn'],
# 'level': 'WARNING',
# },
# # Let all other handlers below propagate on to here so we can send mail
# # for all WARNINGs.
# 'django-q': {
# 'handlers': ['djangoqlogfile'],
# 'level': 'DEBUG',
# },
# 'django': {
# 'handlers': ['console'],
# },
# 'py.warnings': {
# 'handlers': ['console'],
# },
# }
# }
. Output only the next line. | for key, handler in LOGGING['handlers'].items(): |
Given the following code snippet before the placeholder: <|code_start|>
class InterfaceTestCase(PlanbTestCase):
def test_admin_model(self):
user = UserFactory(is_staff=True, is_superuser=True)
self.client.force_login(user)
hostgroup = HostGroupFactory()
fileset = FilesetFactory(hostgroup=hostgroup)
<|code_end|>
, predict the next line using imports from the current file:
from django.template import Context, Template
from django.utils import timezone
from planb.factories import (
BackupRunFactory, FilesetFactory, HostGroupFactory, UserFactory)
from planb.models import BOGODATE
from planb.tests.base import PlanbTestCase
and context including class names, function names, and sometimes code from other files:
# Path: planb/factories.py
# class BackupRunFactory(DjangoModelFactory):
# fileset = factory.SubFactory(FilesetFactory)
#
# duration = factory.Faker('pyint')
# success = factory.Faker('pybool')
# total_size_mb = factory.Faker('pyint')
#
# @factory.lazy_attribute
# def snapshot_size_mb(self):
# return random.randint(0, self.total_size_mb)
#
# attributes = 'do_snapshot_size_listing: false'
# snapshot_size_listing = ''
#
# class Meta:
# model = 'planb.BackupRun'
#
# class FilesetFactory(DjangoModelFactory):
# host_prefix = factory.Faker('hostname', levels=0)
# host_suffix = factory.Faker('domain_word')
# tld = factory.Faker('tld')
#
# @factory.lazy_attribute
# def friendly_name(self):
# # Set friendly name as the full hostname within the hostgroup domain.
# return '.'.join((
# self.host_prefix, self.host_suffix, self.hostgroup.name, self.tld))
#
# storage_alias = 'dummy'
# hostgroup = factory.SubFactory(HostGroupFactory)
#
# class Meta:
# model = 'planb.Fileset'
# exclude = ['host_prefix', 'host_suffix', 'tld']
#
# class HostGroupFactory(DjangoModelFactory):
# name = factory.Faker('domain_word')
#
# class Meta:
# model = 'planb.HostGroup'
# django_get_or_create = ('name',)
#
# class UserFactory(DjangoModelFactory):
# class Meta:
# model = 'auth.User'
# inline_args = ('username', 'email', 'password')
#
# username = factory.Faker('user_name')
# email = factory.Faker('email')
# password = factory.Faker('password')
# is_active = True
#
# @classmethod
# def _create(cls, model_class, username, email, password, **kwargs):
# instance = model_class.objects._create_user(
# username, email, password, **kwargs)
# instance.raw_password = password
# return instance
#
# Path: planb/models.py
# BOGODATE = datetime(1970, 1, 2, tzinfo=timezone.utc)
#
# Path: planb/tests/base.py
# class PlanbTestCase(TestCase):
# def setUp(self):
# super().setUp()
#
# # Reset storage_pools, otherwise we might get stale data from
# # previous dummy's. This is a dict, that everyone has loaded
# # already. Flush the contents of the dict.
# for storage in storage_pools.values():
# storage.close()
# storage_pools.clear()
# storage_pools.update(load_storage_pools())
. Output only the next line. | backuprun = BackupRunFactory(fileset=fileset) |
Predict the next line after this snippet: <|code_start|>
class InterfaceTestCase(PlanbTestCase):
def test_admin_model(self):
user = UserFactory(is_staff=True, is_superuser=True)
self.client.force_login(user)
hostgroup = HostGroupFactory()
<|code_end|>
using the current file's imports:
from django.template import Context, Template
from django.utils import timezone
from planb.factories import (
BackupRunFactory, FilesetFactory, HostGroupFactory, UserFactory)
from planb.models import BOGODATE
from planb.tests.base import PlanbTestCase
and any relevant context from other files:
# Path: planb/factories.py
# class BackupRunFactory(DjangoModelFactory):
# fileset = factory.SubFactory(FilesetFactory)
#
# duration = factory.Faker('pyint')
# success = factory.Faker('pybool')
# total_size_mb = factory.Faker('pyint')
#
# @factory.lazy_attribute
# def snapshot_size_mb(self):
# return random.randint(0, self.total_size_mb)
#
# attributes = 'do_snapshot_size_listing: false'
# snapshot_size_listing = ''
#
# class Meta:
# model = 'planb.BackupRun'
#
# class FilesetFactory(DjangoModelFactory):
# host_prefix = factory.Faker('hostname', levels=0)
# host_suffix = factory.Faker('domain_word')
# tld = factory.Faker('tld')
#
# @factory.lazy_attribute
# def friendly_name(self):
# # Set friendly name as the full hostname within the hostgroup domain.
# return '.'.join((
# self.host_prefix, self.host_suffix, self.hostgroup.name, self.tld))
#
# storage_alias = 'dummy'
# hostgroup = factory.SubFactory(HostGroupFactory)
#
# class Meta:
# model = 'planb.Fileset'
# exclude = ['host_prefix', 'host_suffix', 'tld']
#
# class HostGroupFactory(DjangoModelFactory):
# name = factory.Faker('domain_word')
#
# class Meta:
# model = 'planb.HostGroup'
# django_get_or_create = ('name',)
#
# class UserFactory(DjangoModelFactory):
# class Meta:
# model = 'auth.User'
# inline_args = ('username', 'email', 'password')
#
# username = factory.Faker('user_name')
# email = factory.Faker('email')
# password = factory.Faker('password')
# is_active = True
#
# @classmethod
# def _create(cls, model_class, username, email, password, **kwargs):
# instance = model_class.objects._create_user(
# username, email, password, **kwargs)
# instance.raw_password = password
# return instance
#
# Path: planb/models.py
# BOGODATE = datetime(1970, 1, 2, tzinfo=timezone.utc)
#
# Path: planb/tests/base.py
# class PlanbTestCase(TestCase):
# def setUp(self):
# super().setUp()
#
# # Reset storage_pools, otherwise we might get stale data from
# # previous dummy's. This is a dict, that everyone has loaded
# # already. Flush the contents of the dict.
# for storage in storage_pools.values():
# storage.close()
# storage_pools.clear()
# storage_pools.update(load_storage_pools())
. Output only the next line. | fileset = FilesetFactory(hostgroup=hostgroup) |
Using the snippet: <|code_start|>
class InterfaceTestCase(PlanbTestCase):
def test_admin_model(self):
user = UserFactory(is_staff=True, is_superuser=True)
self.client.force_login(user)
<|code_end|>
, determine the next line of code. You have imports:
from django.template import Context, Template
from django.utils import timezone
from planb.factories import (
BackupRunFactory, FilesetFactory, HostGroupFactory, UserFactory)
from planb.models import BOGODATE
from planb.tests.base import PlanbTestCase
and context (class names, function names, or code) available:
# Path: planb/factories.py
# class BackupRunFactory(DjangoModelFactory):
# fileset = factory.SubFactory(FilesetFactory)
#
# duration = factory.Faker('pyint')
# success = factory.Faker('pybool')
# total_size_mb = factory.Faker('pyint')
#
# @factory.lazy_attribute
# def snapshot_size_mb(self):
# return random.randint(0, self.total_size_mb)
#
# attributes = 'do_snapshot_size_listing: false'
# snapshot_size_listing = ''
#
# class Meta:
# model = 'planb.BackupRun'
#
# class FilesetFactory(DjangoModelFactory):
# host_prefix = factory.Faker('hostname', levels=0)
# host_suffix = factory.Faker('domain_word')
# tld = factory.Faker('tld')
#
# @factory.lazy_attribute
# def friendly_name(self):
# # Set friendly name as the full hostname within the hostgroup domain.
# return '.'.join((
# self.host_prefix, self.host_suffix, self.hostgroup.name, self.tld))
#
# storage_alias = 'dummy'
# hostgroup = factory.SubFactory(HostGroupFactory)
#
# class Meta:
# model = 'planb.Fileset'
# exclude = ['host_prefix', 'host_suffix', 'tld']
#
# class HostGroupFactory(DjangoModelFactory):
# name = factory.Faker('domain_word')
#
# class Meta:
# model = 'planb.HostGroup'
# django_get_or_create = ('name',)
#
# class UserFactory(DjangoModelFactory):
# class Meta:
# model = 'auth.User'
# inline_args = ('username', 'email', 'password')
#
# username = factory.Faker('user_name')
# email = factory.Faker('email')
# password = factory.Faker('password')
# is_active = True
#
# @classmethod
# def _create(cls, model_class, username, email, password, **kwargs):
# instance = model_class.objects._create_user(
# username, email, password, **kwargs)
# instance.raw_password = password
# return instance
#
# Path: planb/models.py
# BOGODATE = datetime(1970, 1, 2, tzinfo=timezone.utc)
#
# Path: planb/tests/base.py
# class PlanbTestCase(TestCase):
# def setUp(self):
# super().setUp()
#
# # Reset storage_pools, otherwise we might get stale data from
# # previous dummy's. This is a dict, that everyone has loaded
# # already. Flush the contents of the dict.
# for storage in storage_pools.values():
# storage.close()
# storage_pools.clear()
# storage_pools.update(load_storage_pools())
. Output only the next line. | hostgroup = HostGroupFactory() |
Next line prediction: <|code_start|>
# Test rename task spawn after hostgroup name change.
data = {
'name': 'my-group',
'_save': 'Save',
}
response = self.client.post(
'/planb/hostgroup/{}/change/'.format(hostgroup.pk), data,
follow=True)
self.assertContains(
response,
'A rename task has been queued for all filesets in the hostgroup')
# Test rename task spawn after fileset name change.
data = {
'friendly_name': 'my-host',
'hostgroup': hostgroup.pk,
}
response = self.client.post(
'/planb/fileset/{}/change/'.format(fileset.pk), data, follow=True)
self.assertContains(
response, 'A rename task has been queued for the fileset')
def test_global_messages_templatetag(self):
context = Context()
template = Template('{% load planb %}{% global_messages %}')
self.assertEqual(template.render(context), '')
# Hack to trigger email updates doesn't show messages.
<|code_end|>
. Use current file imports:
(from django.template import Context, Template
from django.utils import timezone
from planb.factories import (
BackupRunFactory, FilesetFactory, HostGroupFactory, UserFactory)
from planb.models import BOGODATE
from planb.tests.base import PlanbTestCase)
and context including class names, function names, or small code snippets from other files:
# Path: planb/factories.py
# class BackupRunFactory(DjangoModelFactory):
# fileset = factory.SubFactory(FilesetFactory)
#
# duration = factory.Faker('pyint')
# success = factory.Faker('pybool')
# total_size_mb = factory.Faker('pyint')
#
# @factory.lazy_attribute
# def snapshot_size_mb(self):
# return random.randint(0, self.total_size_mb)
#
# attributes = 'do_snapshot_size_listing: false'
# snapshot_size_listing = ''
#
# class Meta:
# model = 'planb.BackupRun'
#
# class FilesetFactory(DjangoModelFactory):
# host_prefix = factory.Faker('hostname', levels=0)
# host_suffix = factory.Faker('domain_word')
# tld = factory.Faker('tld')
#
# @factory.lazy_attribute
# def friendly_name(self):
# # Set friendly name as the full hostname within the hostgroup domain.
# return '.'.join((
# self.host_prefix, self.host_suffix, self.hostgroup.name, self.tld))
#
# storage_alias = 'dummy'
# hostgroup = factory.SubFactory(HostGroupFactory)
#
# class Meta:
# model = 'planb.Fileset'
# exclude = ['host_prefix', 'host_suffix', 'tld']
#
# class HostGroupFactory(DjangoModelFactory):
# name = factory.Faker('domain_word')
#
# class Meta:
# model = 'planb.HostGroup'
# django_get_or_create = ('name',)
#
# class UserFactory(DjangoModelFactory):
# class Meta:
# model = 'auth.User'
# inline_args = ('username', 'email', 'password')
#
# username = factory.Faker('user_name')
# email = factory.Faker('email')
# password = factory.Faker('password')
# is_active = True
#
# @classmethod
# def _create(cls, model_class, username, email, password, **kwargs):
# instance = model_class.objects._create_user(
# username, email, password, **kwargs)
# instance.raw_password = password
# return instance
#
# Path: planb/models.py
# BOGODATE = datetime(1970, 1, 2, tzinfo=timezone.utc)
#
# Path: planb/tests/base.py
# class PlanbTestCase(TestCase):
# def setUp(self):
# super().setUp()
#
# # Reset storage_pools, otherwise we might get stale data from
# # previous dummy's. This is a dict, that everyone has loaded
# # already. Flush the contents of the dict.
# for storage in storage_pools.values():
# storage.close()
# storage_pools.clear()
# storage_pools.update(load_storage_pools())
. Output only the next line. | FilesetFactory(first_fail=BOGODATE) |
Using the snippet: <|code_start|>
_planb_settings_checks = []
_is_planb_settings_check = (lambda x: _planb_settings_checks.append(x) or x)
def check_planb_settings(app_configs, **kwargs):
errors = []
for checkfunc in _planb_settings_checks:
errors.extend(checkfunc())
return errors
@_is_planb_settings_check
def _settings__planb_blacklist_hours():
if settings.PLANB_BLACKLIST_HOURS:
try:
<|code_end|>
, determine the next line of code. You have imports:
import re
from django.conf import settings
from django.core.checks import Critical
from django.core.exceptions import ValidationError
from .models import validate_blacklist_hours, validate_retention
and context (class names, function names, or code) available:
# Path: planb/models.py
# BOGODATE = datetime(1970, 1, 2, tzinfo=timezone.utc)
# class _DecoratedSnapshot:
# class HostGroup(models.Model):
# class Meta:
# class FilesetLock(object):
# class Fileset(models.Model):
# class Meta:
# class BackupRun(models.Model):
# def iterator(cls, sorted_snapshots):
# def __init__(self, name, prev=None):
# def diff(self):
# def rdiff(self):
# def _human_time(self, secs):
# def __str__(self):
# def get_blacklist_hours(self):
# def get_retention(self):
# def __str__(self):
# def __init__(self, fileset_id, timeout=86400):
# def lock(self):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def is_acquired(self):
# def acquire(self, blocking=None):
# def release(self):
# def __str__(self):
# def unique_name(self):
# def with_lock(fileset_id):
# def get_transport(self):
# def storage(self):
# def get_blacklist_hours(self):
# def is_in_blacklist_hours(self):
# def get_retention(self):
# def retention_map(self):
# def hourly_retention(self):
# def daily_retention(self):
# def weekly_retention(self):
# def monthly_retention(self):
# def yearly_retention(self):
# def retention_display(self):
# def total_size(self):
# def snapshot_size(self):
# def snapshot_count(self):
# def snapshot_efficiency(self):
# def last_backuprun(self):
# def last_successful_backuprun(self):
# def get_dataset(self):
# def rename_dataset(self, new_dataset_name):
# def clone(self, **override):
# def should_backup(self):
# def _has_recent_backup(self):
# def snapshot_rotate(self):
# def snapshot_list(self):
# def snapshot_list_display(self):
# def has_child_datasets(self):
# def get_next_snapshot_name(self):
# def snapshot_create(self):
# def signal_done(self, success):
# def save(self, *args, **kwargs):
# def total_size(self):
# def snapshot_size(self):
# def snapshot_size_listing_as_list(self):
# def __str__(self):
# def create_dataset(sender, instance, created, *args, **kwargs):
. Output only the next line. | validate_blacklist_hours(settings.PLANB_BLACKLIST_HOURS) |
Given the following code snippet before the placeholder: <|code_start|>
_planb_settings_checks = []
_is_planb_settings_check = (lambda x: _planb_settings_checks.append(x) or x)
def check_planb_settings(app_configs, **kwargs):
errors = []
for checkfunc in _planb_settings_checks:
errors.extend(checkfunc())
return errors
@_is_planb_settings_check
def _settings__planb_blacklist_hours():
if settings.PLANB_BLACKLIST_HOURS:
try:
validate_blacklist_hours(settings.PLANB_BLACKLIST_HOURS)
except ValidationError as e:
return [Critical(
'settings.PLANB_BLACKLIST_HOURS is invalid', hint=e.message,
id='planb.E001')]
return []
@_is_planb_settings_check
def _settings__planb_retention():
if settings.PLANB_RETENTION:
try:
<|code_end|>
, predict the next line using imports from the current file:
import re
from django.conf import settings
from django.core.checks import Critical
from django.core.exceptions import ValidationError
from .models import validate_blacklist_hours, validate_retention
and context including class names, function names, and sometimes code from other files:
# Path: planb/models.py
# BOGODATE = datetime(1970, 1, 2, tzinfo=timezone.utc)
# class _DecoratedSnapshot:
# class HostGroup(models.Model):
# class Meta:
# class FilesetLock(object):
# class Fileset(models.Model):
# class Meta:
# class BackupRun(models.Model):
# def iterator(cls, sorted_snapshots):
# def __init__(self, name, prev=None):
# def diff(self):
# def rdiff(self):
# def _human_time(self, secs):
# def __str__(self):
# def get_blacklist_hours(self):
# def get_retention(self):
# def __str__(self):
# def __init__(self, fileset_id, timeout=86400):
# def lock(self):
# def __enter__(self):
# def __exit__(self, type, value, traceback):
# def is_acquired(self):
# def acquire(self, blocking=None):
# def release(self):
# def __str__(self):
# def unique_name(self):
# def with_lock(fileset_id):
# def get_transport(self):
# def storage(self):
# def get_blacklist_hours(self):
# def is_in_blacklist_hours(self):
# def get_retention(self):
# def retention_map(self):
# def hourly_retention(self):
# def daily_retention(self):
# def weekly_retention(self):
# def monthly_retention(self):
# def yearly_retention(self):
# def retention_display(self):
# def total_size(self):
# def snapshot_size(self):
# def snapshot_count(self):
# def snapshot_efficiency(self):
# def last_backuprun(self):
# def last_successful_backuprun(self):
# def get_dataset(self):
# def rename_dataset(self, new_dataset_name):
# def clone(self, **override):
# def should_backup(self):
# def _has_recent_backup(self):
# def snapshot_rotate(self):
# def snapshot_list(self):
# def snapshot_list_display(self):
# def has_child_datasets(self):
# def get_next_snapshot_name(self):
# def snapshot_create(self):
# def signal_done(self, success):
# def save(self, *args, **kwargs):
# def total_size(self):
# def snapshot_size(self):
# def snapshot_size_listing_as_list(self):
# def __str__(self):
# def create_dataset(sender, instance, created, *args, **kwargs):
. Output only the next line. | validate_retention(settings.PLANB_RETENTION) |
Based on the snippet: <|code_start|>
class TransportExecTestCase(TestCase):
def test_shlex_with_backslash(self):
self.assertEqual(
shlex.split(
'/usr/local/bin/planb-zfssync --qlz1 root@10.1.2.3 \\\n'
' tank/mysql/log \\\n tank/mysql/data'),
['/usr/local/bin/planb-zfssync', '--qlz1', 'root@10.1.2.3',
'\n', 'tank/mysql/log', '\n', 'tank/mysql/data'])
def test_transport_command_with_backslash(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import shlex
from django.test import TestCase
from .models import Config
and context (classes, functions, sometimes code) from other files:
# Path: planb/transport_exec/models.py
# class Config(AbstractTransport):
# transport_command = CommandField(help_text=_( # FIXME: add env docs
# 'Program to run to do the transport (data import). It is '
# 'split by spaces and fed to execve(). '
# 'Useful variables are available in the environment.'))
#
# class Meta:
# db_table = TABLE_PREFIX # or '{}_config'.format(TABLE_PREFIX)
#
# def __str__(self):
# return 'exec transport {}'.format(
# self.transport_command.replace(' \\\n', ' '))
#
# def get_change_url(self):
# return reverse('admin:transport_exec_config_change', args=(self.pk,))
#
# def generate_cmd(self):
# # shlex.split() keeps linefeeds in backslash-linefeed combo's.
# # We don't want those. Remove any '\\\n' before we proceed.
# # 'abc \\\n def' => ['abc', '\n', 'def']
# # 'abc\\\ndef' => ['abc', '\ndef']
# cmd = self.transport_command
# no_backslash_cmd = re.sub('([^\\\\])\\\\\n', r'\1', cmd)
# lexed = shlex.split(no_backslash_cmd)
# return lexed
#
# def generate_env(self):
# env = {}
#
# # Don't blindly keep all env. We don't want e.g. PYTHONPATH because it
# # might be some virtual-envy python that has no access to where we want
# # to be.
# keep_env = (
# # Mandatory:
# 'PATH',
# # Nice to have for shell apps:
# 'HOME', 'PWD', 'SHELL', 'USER',
# # #'LANG', 'TZ',
# # Systemd/logging stuff:
# # #'JOURNAL_STREAM', 'LOGNAME', 'INVOCATION_ID',
# )
# for key in keep_env:
# if key in os.environ:
# env[key] = os.environ[key]
#
# # Add our own env.
# env['planb_guid'] = settings.PLANB_GUID
# env['planb_fileset_id'] = str(self.fileset.id)
# env['planb_fileset_friendly_name'] = self.fileset.friendly_name
# env['planb_snapshot_target'] = (
# self.fileset.get_next_snapshot_name())
# env['planb_storage_name'] = (
# self.fileset.get_dataset().name) # XXX! zfs? how do we know?
# env['planb_storage_destination'] = (
# self.fileset.get_dataset().get_data_path())
#
# return env
#
# def run_transport(self):
# # FIXME: duplicate code with transport_rsync.Config.run_transport()
# cmd = self.generate_cmd()
# env = self.generate_env()
# logger.info(
# 'Running %s: %s', self.fileset.friendly_name, argsjoin(cmd))
#
# # Close all DB connections before continuing with the rsync
# # command. Since it may take a while, the connection could get
# # dropped and we'd have issues later on.
# connections.close_all()
#
# stderr = []
# with suspended_signals(SIGHUP, SIGINT, SIGQUIT, SIGTERM):
# try:
# # FIXME: do we want timeout handling here?
# output = check_output(
# cmd, env=env, return_stderr=stderr, preexec_fn=(
# # Disable suspended_signals from parent:
# lambda: sigprocmask(SIG_SETMASK, SIGSET(), 0))
# ).decode('utf-8')
# except CalledProcessError as e:
# logger.warning(
# 'Failure during exec %r: %s', argsjoin(cmd), str(e))
# raise
#
# logger.info(
# 'Exec success for %s transport:\n\n(stdout)\n\n%s\n(stderr)\n\n%s',
# self.fileset.friendly_name, output,
# b'\n'.join(stderr).decode('utf-8', 'replace'))
. Output only the next line. | transport = Config(transport_command=( |
Continue the code snippet: <|code_start|> self.handle_error(d, e)
# Post the items that did not fail validation.
if remaining:
self.post(remaining)
else:
self.handle_error(data, error)
def handle_error(self, data, error):
if error.get('non_field_errors', []) == self.UNIQUE_ERROR:
logger.warning('%r returned error %r', data, error)
else:
logger.error('%r returned error %r', data, error)
def format_data(self, hostgroup, fileset, report_date):
return {
'relation_code': hostgroup.name,
'item_code': fileset.friendly_name,
'service_code': 'backup-size-gibibyte',
'date': report_date.strftime('%Y-%m-%d'),
'value': round(fileset.total_size_mb / 1024, 5), # MiB to GiB
'unit': 'GiB',
}
def daily_hostgroup_report(data_poster, hostgroup_qs=None):
"""
This could be run daily to report to REMOTE how many data each
hostgroup has backed up.
"""
if hostgroup_qs is None:
<|code_end|>
. Use current file imports:
import logging
import requests
from datetime import date
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from planb.models import HostGroup
and context (classes, functions, or code) from other files:
# Path: planb/models.py
# class HostGroup(models.Model):
# name = models.CharField(max_length=63, unique=True)
# notify_email = MultiEmailField(
# blank=True, null=True,
# help_text=_('Use a newline per emailaddress'))
# last_monthly_report = models.DateTimeField(blank=True, null=True)
# notes = models.TextField(blank=True, help_text=_(
# 'Description, guidelines and agreements for the hostgroup.'))
#
# blacklist_hours = models.CharField(
# _('Blacklist hours'), max_length=31, blank=True,
# validators=[validate_blacklist_hours], help_text=_(
# 'Specify hours during which backups are disabled using notation '
# 'h,h-h or none to disable blacklist hours. When left empty the '
# 'system blacklist hours are used.'))
# retention = models.CharField(
# max_length=31, blank=True, validators=[validate_retention],
# help_text=_(
# 'The backup retention period using notation <n><period> separated '
# 'by comma: 1y,6m,3w,15d. When left empty the system retention '
# 'periods are used.'))
#
# def get_blacklist_hours(self):
# if self.blacklist_hours:
# return self.blacklist_hours
# return settings.PLANB_BLACKLIST_HOURS
# get_blacklist_hours.short_description = _('Blacklist hours')
#
# def get_retention(self):
# if self.retention:
# return self.retention
# return settings.PLANB_RETENTION
# get_retention.short_description = _('Retention')
#
# def __str__(self):
# return self.name
#
# class Meta:
# ordering = ('name',)
. Output only the next line. | hostgroup_qs = HostGroup.objects.all() |
Next line prediction: <|code_start|>
class Subprocess2Test(TestCase):
def test_calledprocesserror_stderr_on_first_line(self):
try:
check_call(
['/bin/ls', '/surely/this/dir/does/not/exist'],
env={'LC_ALL': 'C'})
<|code_end|>
. Use current file imports:
(from unittest import TestCase
from .subprocess2 import CalledProcessError, check_call, check_output)
and context including class names, function names, or small code snippets from other files:
# Path: planb/common/subprocess2.py
# class CalledProcessError(OrigCalledProcessError):
# """
# Version of subprocess.CalledProcessError that also shows the stdout
# and stderr data if available.
# """
# _anychar_re = re_compile(br'[A-Za-z]') # bytestring-re
#
# def __init__(self, returncode, cmd, stdout, stderr):
# super().__init__(returncode=returncode, cmd=cmd, output=stdout)
# self.errput = stderr
#
# def _quote(self, bintext):
# text = bintext.decode('ascii', 'replace')
# text = text.replace('\r', '')
# if not text:
# return ''
#
# # You should not rely on this output to be complete. And when
# # you're getting this via e-mail, you don't want big mega-byte
# # blobs. Trim it if it's too large:
# if len(text) >= 256 * 1024:
# text = (
# text[0:(128 * 1024)]
# + '\n[... truncated ...]\n'
# + text[-(128 * 1024):])
#
# if text.endswith('\n'):
# text = text[0:-1]
# else:
# text += '[noeol]'
#
# return '> ' + '\n> '.join(text.split('\n'))
#
# @property
# def _short_stderr(self):
# # Take first non-empty, meaningful line. For example:
# # > @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# # > @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
# # > @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# # Here we'd return the second line.
# #
# # >>> timeit.timeit((lambda: any(
# # ... i in string for i in (
# # ... 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# # ... 'abcdefghijklmnopqrstuvwxyz'))))
# # 3.488983060999999
# # >>> timeit.timeit((lambda: anychar.search(string)))
# # 0.49033315299993774
# #
# for line in self.errput.splitlines(): # use iterator instead?
# if self._anychar_re.search(line):
# return line.decode('ascii', 'replace').strip()
# return '?'
#
# def __str__(self):
# stdout = self._quote(self.output)
# stderr = self._quote(self.errput)
#
# # Take entire command if string, or first item if tuple.
# short_cmd = self.cmd if isinstance(self.cmd, str) else self.cmd[0]
#
# # Make a meaningful first line.
# ret = ['{cmd}: "{stderr}" (exit {code})'.format(
# cmd=short_cmd, stderr=self._short_stderr.replace('"', '""'),
# code=self.returncode)]
#
# if stderr:
# ret.append('STDERR:\n{}'.format(stderr))
# if stdout:
# ret.append('STDOUT:\n{}'.format(stdout))
#
# if not isinstance(self.cmd, str):
# ret.append('COMMAND: {}'.format(argsjoin(self.cmd)))
#
# ret.append('')
# return '\n\n'.join(ret)
#
# def check_call(cmd, *, env=None, preexec_fn=None, shell=False, timeout=None):
# """
# Same as check_output, but discards output.
#
# Note that stdout/stderr are still captured so we have more
# informative exceptions.
# """
# check_output(
# cmd, env=env, preexec_fn=preexec_fn, shell=shell, timeout=timeout)
#
# def check_output(cmd, *, env=None, preexec_fn=None, return_stderr=None,
# shell=False, timeout=None):
# """
# Run command with arguments and return its output.
#
# Behaves as regular subprocess.check_output but raises the improved
# CalledProcessError on error.
#
# You'll need to decode stdout from binary encoding yourself.
#
# If return_stderr is a list, stderr will be added to it, if it's non-empty.
# """
# assert isinstance(return_stderr, list) or return_stderr is None
# assert timeout is None, 'Timeout is not supported for now'
#
# fp, ret, stdout, stderr = None, -1, '', ''
# try:
# fp = Popen(
# cmd, stdin=None, stdout=PIPE, stderr=PIPE, env=env,
# preexec_fn=preexec_fn, shell=shell)
# stdout, stderr = fp.communicate()
# ret = fp.wait()
# fp = None
# if ret != 0:
# raise CalledProcessError(ret, cmd, stdout, stderr)
# finally:
# if fp:
# fp.kill()
#
# if stderr and return_stderr is not None:
# return_stderr.append(stderr)
# return stdout
. Output only the next line. | except CalledProcessError as e: |
Predict the next line for this snippet: <|code_start|>
class Subprocess2Test(TestCase):
def test_calledprocesserror_stderr_on_first_line(self):
try:
<|code_end|>
with the help of current file imports:
from unittest import TestCase
from .subprocess2 import CalledProcessError, check_call, check_output
and context from other files:
# Path: planb/common/subprocess2.py
# class CalledProcessError(OrigCalledProcessError):
# """
# Version of subprocess.CalledProcessError that also shows the stdout
# and stderr data if available.
# """
# _anychar_re = re_compile(br'[A-Za-z]') # bytestring-re
#
# def __init__(self, returncode, cmd, stdout, stderr):
# super().__init__(returncode=returncode, cmd=cmd, output=stdout)
# self.errput = stderr
#
# def _quote(self, bintext):
# text = bintext.decode('ascii', 'replace')
# text = text.replace('\r', '')
# if not text:
# return ''
#
# # You should not rely on this output to be complete. And when
# # you're getting this via e-mail, you don't want big mega-byte
# # blobs. Trim it if it's too large:
# if len(text) >= 256 * 1024:
# text = (
# text[0:(128 * 1024)]
# + '\n[... truncated ...]\n'
# + text[-(128 * 1024):])
#
# if text.endswith('\n'):
# text = text[0:-1]
# else:
# text += '[noeol]'
#
# return '> ' + '\n> '.join(text.split('\n'))
#
# @property
# def _short_stderr(self):
# # Take first non-empty, meaningful line. For example:
# # > @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# # > @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
# # > @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# # Here we'd return the second line.
# #
# # >>> timeit.timeit((lambda: any(
# # ... i in string for i in (
# # ... 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# # ... 'abcdefghijklmnopqrstuvwxyz'))))
# # 3.488983060999999
# # >>> timeit.timeit((lambda: anychar.search(string)))
# # 0.49033315299993774
# #
# for line in self.errput.splitlines(): # use iterator instead?
# if self._anychar_re.search(line):
# return line.decode('ascii', 'replace').strip()
# return '?'
#
# def __str__(self):
# stdout = self._quote(self.output)
# stderr = self._quote(self.errput)
#
# # Take entire command if string, or first item if tuple.
# short_cmd = self.cmd if isinstance(self.cmd, str) else self.cmd[0]
#
# # Make a meaningful first line.
# ret = ['{cmd}: "{stderr}" (exit {code})'.format(
# cmd=short_cmd, stderr=self._short_stderr.replace('"', '""'),
# code=self.returncode)]
#
# if stderr:
# ret.append('STDERR:\n{}'.format(stderr))
# if stdout:
# ret.append('STDOUT:\n{}'.format(stdout))
#
# if not isinstance(self.cmd, str):
# ret.append('COMMAND: {}'.format(argsjoin(self.cmd)))
#
# ret.append('')
# return '\n\n'.join(ret)
#
# def check_call(cmd, *, env=None, preexec_fn=None, shell=False, timeout=None):
# """
# Same as check_output, but discards output.
#
# Note that stdout/stderr are still captured so we have more
# informative exceptions.
# """
# check_output(
# cmd, env=env, preexec_fn=preexec_fn, shell=shell, timeout=timeout)
#
# def check_output(cmd, *, env=None, preexec_fn=None, return_stderr=None,
# shell=False, timeout=None):
# """
# Run command with arguments and return its output.
#
# Behaves as regular subprocess.check_output but raises the improved
# CalledProcessError on error.
#
# You'll need to decode stdout from binary encoding yourself.
#
# If return_stderr is a list, stderr will be added to it, if it's non-empty.
# """
# assert isinstance(return_stderr, list) or return_stderr is None
# assert timeout is None, 'Timeout is not supported for now'
#
# fp, ret, stdout, stderr = None, -1, '', ''
# try:
# fp = Popen(
# cmd, stdin=None, stdout=PIPE, stderr=PIPE, env=env,
# preexec_fn=preexec_fn, shell=shell)
# stdout, stderr = fp.communicate()
# ret = fp.wait()
# fp = None
# if ret != 0:
# raise CalledProcessError(ret, cmd, stdout, stderr)
# finally:
# if fp:
# fp.kill()
#
# if stderr and return_stderr is not None:
# return_stderr.append(stderr)
# return stdout
, which may contain function names, class names, or code. Output only the next line. | check_call( |
Predict the next line for this snippet: <|code_start|>
class Subprocess2Test(TestCase):
def test_calledprocesserror_stderr_on_first_line(self):
try:
check_call(
['/bin/ls', '/surely/this/dir/does/not/exist'],
env={'LC_ALL': 'C'})
except CalledProcessError as e:
# /bin/ls: "/bin/ls: cannot access
# '/surely/this/dir/does/not/exist': No such file or
# directory" (exit 2)
line1 = str(e).split('\n', 1)[0].rstrip()
self.assertIn('/bin/ls:', line1)
self.assertIn('No such file or directory', line1)
else:
self.assertFalse(True, 'Surely the dir does not exist?')
def test_return_stderr(self):
stderr = []
<|code_end|>
with the help of current file imports:
from unittest import TestCase
from .subprocess2 import CalledProcessError, check_call, check_output
and context from other files:
# Path: planb/common/subprocess2.py
# class CalledProcessError(OrigCalledProcessError):
# """
# Version of subprocess.CalledProcessError that also shows the stdout
# and stderr data if available.
# """
# _anychar_re = re_compile(br'[A-Za-z]') # bytestring-re
#
# def __init__(self, returncode, cmd, stdout, stderr):
# super().__init__(returncode=returncode, cmd=cmd, output=stdout)
# self.errput = stderr
#
# def _quote(self, bintext):
# text = bintext.decode('ascii', 'replace')
# text = text.replace('\r', '')
# if not text:
# return ''
#
# # You should not rely on this output to be complete. And when
# # you're getting this via e-mail, you don't want big mega-byte
# # blobs. Trim it if it's too large:
# if len(text) >= 256 * 1024:
# text = (
# text[0:(128 * 1024)]
# + '\n[... truncated ...]\n'
# + text[-(128 * 1024):])
#
# if text.endswith('\n'):
# text = text[0:-1]
# else:
# text += '[noeol]'
#
# return '> ' + '\n> '.join(text.split('\n'))
#
# @property
# def _short_stderr(self):
# # Take first non-empty, meaningful line. For example:
# # > @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# # > @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
# # > @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# # Here we'd return the second line.
# #
# # >>> timeit.timeit((lambda: any(
# # ... i in string for i in (
# # ... 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# # ... 'abcdefghijklmnopqrstuvwxyz'))))
# # 3.488983060999999
# # >>> timeit.timeit((lambda: anychar.search(string)))
# # 0.49033315299993774
# #
# for line in self.errput.splitlines(): # use iterator instead?
# if self._anychar_re.search(line):
# return line.decode('ascii', 'replace').strip()
# return '?'
#
# def __str__(self):
# stdout = self._quote(self.output)
# stderr = self._quote(self.errput)
#
# # Take entire command if string, or first item if tuple.
# short_cmd = self.cmd if isinstance(self.cmd, str) else self.cmd[0]
#
# # Make a meaningful first line.
# ret = ['{cmd}: "{stderr}" (exit {code})'.format(
# cmd=short_cmd, stderr=self._short_stderr.replace('"', '""'),
# code=self.returncode)]
#
# if stderr:
# ret.append('STDERR:\n{}'.format(stderr))
# if stdout:
# ret.append('STDOUT:\n{}'.format(stdout))
#
# if not isinstance(self.cmd, str):
# ret.append('COMMAND: {}'.format(argsjoin(self.cmd)))
#
# ret.append('')
# return '\n\n'.join(ret)
#
# def check_call(cmd, *, env=None, preexec_fn=None, shell=False, timeout=None):
# """
# Same as check_output, but discards output.
#
# Note that stdout/stderr are still captured so we have more
# informative exceptions.
# """
# check_output(
# cmd, env=env, preexec_fn=preexec_fn, shell=shell, timeout=timeout)
#
# def check_output(cmd, *, env=None, preexec_fn=None, return_stderr=None,
# shell=False, timeout=None):
# """
# Run command with arguments and return its output.
#
# Behaves as regular subprocess.check_output but raises the improved
# CalledProcessError on error.
#
# You'll need to decode stdout from binary encoding yourself.
#
# If return_stderr is a list, stderr will be added to it, if it's non-empty.
# """
# assert isinstance(return_stderr, list) or return_stderr is None
# assert timeout is None, 'Timeout is not supported for now'
#
# fp, ret, stdout, stderr = None, -1, '', ''
# try:
# fp = Popen(
# cmd, stdin=None, stdout=PIPE, stderr=PIPE, env=env,
# preexec_fn=preexec_fn, shell=shell)
# stdout, stderr = fp.communicate()
# ret = fp.wait()
# fp = None
# if ret != 0:
# raise CalledProcessError(ret, cmd, stdout, stderr)
# finally:
# if fp:
# fp.kill()
#
# if stderr and return_stderr is not None:
# return_stderr.append(stderr)
# return stdout
, which may contain function names, class names, or code. Output only the next line. | stdout = check_output( |
Given the following code snippet before the placeholder: <|code_start|>MANAGERS = ADMINS = (
# ('My Name', 'myname@example.com'),
)
DEFAULT_FROM_EMAIL = 'support@example.com'
SERVER_EMAIL = 'planb@example.com'
EMAIL_SUBJECT_PREFIX = '[PlanB] '
COMPANY_NAME = 'Example Company'
COMPANY_EMAIL = 'support@example.com'
# MySQL config example:
#
# SQL> set names utf8;
# SQL> create database planb;
# SQL> grant all on planb.* to planb identified by 'FIXMEFIXMEFIXME';
DATABASES = {
'default': {
# Choose 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Empty for localhost. Not used with sqlite3.
'PORT': '', # Empty for default. Not used with sqlite3.
'OPTIONS': {},
}
}
# Replace file logging with output to stderr.
<|code_end|>
, predict the next line using imports from the current file:
from planb.default_settings import * # noqa
from planb.default_settings import LOGGING, Q_CLUSTER
and context including class names, function names, and sometimes code from other files:
# Path: planb/default_settings.py
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse',
# },
# 'require_debug_true': {
# '()': 'django.utils.log.RequireDebugTrue',
# },
# },
# 'formatters': {
# 'simple': {
# 'format': (
# '%(asctime)s [planb/%(process)5d] '
# '[%(levelname)-3.3s] %(message)s (%(name)s)'),
# },
# 'notime': {
# 'format': '%(name)s - %(levelname)s/%(process)s - %(message)s',
# },
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'logging.NullHandler',
# },
# 'mail_admins_err': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'planb.common.log2.AdminEmailHandler' # django.utils.log
# },
# 'mail_admins_warn': {
# 'level': 'WARNING',
# 'filters': ['require_debug_false'],
# 'class': 'planb.common.log2.AdminEmailHandler' # django.utils.log
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'simple',
# 'filters': ['require_debug_true'],
# },
# # 'gelf': {
# # 'class': 'graypy.GELFHandler',
# # 'host': '10.x.x.x',
# # 'port': 12221,
# # },
# 'logfile': {
# 'level': 'INFO',
# 'class': 'logging.handlers.WatchedFileHandler',
# 'formatter': 'simple',
# 'filename': '/var/log/planb/core.log',
# # Delay, so management commands don't try to open these
# # unless they have to.
# 'delay': True,
# },
# 'djangoqlogfile': {
# 'level': 'DEBUG',
# 'class': 'logging.handlers.WatchedFileHandler',
# 'formatter': 'simple',
# 'filename': '/var/log/planb/queue.log',
# # Delay, so management commands don't try to open these
# # unless they have to.
# 'delay': True,
# },
# },
# 'loggers': {
# 'planb': {
# 'handlers': ['console', 'logfile', 'mail_admins_err'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
# '': {
# 'handlers': ['mail_admins_warn'],
# 'level': 'WARNING',
# },
# # Let all other handlers below propagate on to here so we can send mail
# # for all WARNINGs.
# 'django-q': {
# 'handlers': ['djangoqlogfile'],
# 'level': 'DEBUG',
# },
# 'django': {
# 'handlers': ['console'],
# },
# 'py.warnings': {
# 'handlers': ['console'],
# },
# }
# }
#
# Q_CLUSTER = {
# 'name': 'planb', # redis prefix AND default broker (yuck!)
# 'workers': 15, # how many workers to process tasks simultaneously
# 'timeout': 86300, # almost a day
# 'retry': 86400, # an entire day (needed??)
# 'catch_up': False, # no catching up of missed scheduled tasks
# 'compress': False, # don't care about payload size
# # The save limit must exceed the amount of enabled filesets * 2 + a little.
# # If the task result cannot be saved the hook will not trigger and
# # this will cause the backup_done signal to be skipped.
# 'save_limit': 1000, # store 1000 successful jobs, drop older
# 'label': 'Task Queue', # seen in Django Admin
# 'scheduler': True, # Schedule on default queue
# 'redis': {
# 'host': '127.0.0.1',
# 'port': 6379,
# 'db': 0,
# },
# }
. Output only the next line. | for key, handler in LOGGING['handlers'].items(): |
Based on the snippet: <|code_start|> return {'nodes': str(nd), 'edges' : str(ed), 'services': str(services) }
def import_kalkati_data(filename, network_name = "Public Transport"):
return import_gtfs_data(filename, network_name)
def import_freq(self, line_name, nodesf, linesf):
return import_gtfs_data(line_name)
#Loads a bike service API ( from already formatted URL ). Insert bike stations in database and enables schedulded re-check.
def import_bike_service( url, name ):
engine = create_engine(db_type + ":///" + db_params)
metadata = MetaData(bind = engine)
mumoro_metadata = Table('metadata', metadata, autoload = True)
s = mumoro_metadata.select((mumoro_metadata.c.origin == url) & (mumoro_metadata.c.node_or_edge == 'bike_stations'))
rs = s.execute()
for row in rs:
bt = row[0]
bike_stations_array.append( {'url_api': url,'table': str(bt)} )
return {'url_api': url,'table': str(bt)}
#Loads data from previous inserted data and creates a layer used in multi-modal graph
def street_layer( data, name, color, mode ):
if not data or not name:
raise NameError('One or more parameters are missing')
if not is_color_valid( color ):
raise NameError('Color for the layer is invalid')
if mode != mumoro.Foot and mode != mumoro.Bike and mode != mumoro.Car and mode != None:
raise NameError('Wrong layer mode paramater')
engine = create_engine(db_type + ":///" + db_params)
metadata = MetaData(bind = engine)
<|code_end|>
, predict the immediate next line with the help of imports:
from lib.core import mumoro
from lib.core.mumoro import Bike, Car, Foot, PublicTransport, cost, co2, dist, elevation, line_change, mode_change, Costs
from lib import layer
from lib import bikestations as bikestations
from web import shorturl
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
from cherrypy import request
from genshi.template import TemplateLoader
import cherrypy
import sys
import simplejson as json
import os
import time
import urllib
import httplib
import hashlib
import datetime
and context (classes, functions, sometimes code) from other files:
# Path: lib/layer.py
# def layer(self, node):
# for l in self.node_to_layer:
# if int(node) < l[0]:
# return l[1]
# print "Unable to find the right layer for node {0}".format(node)
# print self.node_to_layer
#
# Path: lib/bikestations.py
# def get_text(node):
# def get_int(node):
# def get_float(node):
# def __init__(self, url, name, metadata):
# def import_data(self):
# def update_from_db(self):
# def to_string(self):
# class BikeStationImporter():
#
# Path: web/shorturl.py
# class shortURL:
# def __init__(self,metadata):
# def addRouteToDatabase(self,lonMap,latMap,zoom,lonStart,latStart,lonDest,latDest,addressStart,addressDest, time):
# def getDataFromHash(self,value):
. Output only the next line. | res = layer.Layer(name, mode, data, metadata) |
Based on the snippet: <|code_start|> geometry['coordinates'] = coordinates
feature['geometry'] = geometry
feature['properties'] = {'layer': last_coord[3]}
features.append(feature)
p_str['features'] = features
ret['paths'].append(p_str)
return json.dumps(ret)
@cherrypy.expose
def bikes(self):
if len( self.bike_stations ) > 0:
if time.time() > self.timestamp + 60 * 5:
print "Updating bikestations"
for i in self.bike_stations:
i.import_data()
print "Done !"
for i in self.bike_stations:
i.update_from_db()
res = 'lat\tlon\ttitle\tdescription\ticon\ticonSize\ticonOffset\n'
for i in self.bike_stations:
res += i.to_string()
print "Got string"
return res;
else:
print "No bike stations imported so no string available to generate"
return None
@cherrypy.expose
def addhash(self,mlon,mlat,zoom,slon,slat,dlon,dlat,saddress,daddress,time):
cherrypy.response.headers['Content-Type']= 'application/json'
<|code_end|>
, predict the immediate next line with the help of imports:
from lib.core import mumoro
from lib.core.mumoro import Bike, Car, Foot, PublicTransport, cost, co2, dist, elevation, line_change, mode_change, Costs
from lib import layer
from lib import bikestations as bikestations
from web import shorturl
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
from cherrypy import request
from genshi.template import TemplateLoader
import cherrypy
import sys
import simplejson as json
import os
import time
import urllib
import httplib
import hashlib
import datetime
and context (classes, functions, sometimes code) from other files:
# Path: lib/layer.py
# def layer(self, node):
# for l in self.node_to_layer:
# if int(node) < l[0]:
# return l[1]
# print "Unable to find the right layer for node {0}".format(node)
# print self.node_to_layer
#
# Path: lib/bikestations.py
# def get_text(node):
# def get_int(node):
# def get_float(node):
# def __init__(self, url, name, metadata):
# def import_data(self):
# def update_from_db(self):
# def to_string(self):
# class BikeStationImporter():
#
# Path: web/shorturl.py
# class shortURL:
# def __init__(self,metadata):
# def addRouteToDatabase(self,lonMap,latMap,zoom,lonStart,latStart,lonDest,latDest,addressStart,addressDest, time):
# def getDataFromHash(self,value):
. Output only the next line. | hashAdd = shorturl.shortURL(self.metadata) |
Using the snippet: <|code_start|>
def import_kalkati(self, filename, start_date, end_date, network_name = "GTFS"):
print "Adding municipal data from " + filename
print "From " + start_date + " to " + end_date + " for " + network_name + " network"
nodes2 = Metadata(network_name, "Nodes", filename)
self.session.add(nodes2)
self.session.commit()
mapper(PT_Node, create_pt_nodes_table(str(nodes2.id), self.metadata))
services = Metadata(network_name, "Services", filename)
self.session.add(services)
self.session.commit()
mapper(PT_Service, create_services_table(str(services.id), self.metadata))
edges2 = Metadata(network_name, "Edges", filename)
self.session.add(edges2)
self.session.commit()
mapper(PT_Edge, create_pt_edges_table(str(edges2.id), self.metadata, str(services.id)))
self.session.commit()
kalkati_reader.convert(filename, self.session, start_date, end_date)
self.init_mappers()
print "Done importing municipal data from " + filename + " for network '" + network_name + "'"
print "---------------------------------------------------------------------"
def import_bike(self, url, name):
print "Adding public bike service from " + url
bike_service = Metadata(name, "bike_stations", url )
self.session.add(bike_service)
self.session.commit()
<|code_end|>
, determine the next line of code. You have imports:
import sys
import osm4routing
import os.path
import datetime
import csv
from lib.core.mumoro import *
from lib.core import mumoro
from lib import bikestations, gtfs_reader, kalkati_reader
from lib.datastructures import *
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
and context (class names, function names, or code) available:
# Path: lib/bikestations.py
# def get_text(node):
# def get_int(node):
# def get_float(node):
# def __init__(self, url, name, metadata):
# def import_data(self):
# def update_from_db(self):
# def to_string(self):
# class BikeStationImporter():
#
# Path: lib/gtfs_reader.py
# def distance(c1, c2):
# def convert(filename, session, start_date, end_date):
# C = math.radians(delta)
#
# Path: lib/kalkati_reader.py
# def distance(c1, c2):
# def normalize_service(start, end, services, service_start):
# def __init__(self, session, start_date, end_date):
# def startElement(self, name, attrs):
# def endElement(self, name):
# def endDocument(self):
# def convert(filename, session, start_date, end_date):
# C = math.radians(delta)
# class KalkatiHandler(ContentHandler):
. Output only the next line. | i = bikestations.BikeStationImporter( url, str(bike_service.id), self.metadata) |
Using the snippet: <|code_start|> def import_gtfs(self, filename, start_date, end_date, network_name = "GTFS"):
print "Adding municipal data from " + filename
print "From " + start_date + " to " + end_date + " for " + network_name + " network"
stop_areas = Metadata(network_name, "StopAreas", filename)
self.session.add(stop_areas)
self.session.commit()
mapper(PT_StopArea, create_pt_stop_areas_table(str(stop_areas.id), self.metadata))
nodes2 = Metadata(network_name, "Nodes", filename)
self.session.add(nodes2)
self.session.commit()
mapper(PT_Node, create_pt_nodes_table(str(nodes2.id), self.metadata, str(stop_areas.id)))
services = Metadata(network_name, "Services", filename)
self.session.add(services)
self.session.commit()
mapper(PT_Service, create_services_table(str(services.id), self.metadata))
lines = Metadata(network_name, "Lines", filename)
self.session.add(lines)
self.session.commit()
mapper(PT_Line, create_pt_lines_table(str(lines.id), self.metadata))
edges2 = Metadata(network_name, "Edges", filename)
self.session.add(edges2)
self.session.commit()
mapper(PT_Edge, create_pt_edges_table(str(edges2.id), self.metadata, str(services.id), str(lines.id)))
self.session.commit()
<|code_end|>
, determine the next line of code. You have imports:
import sys
import osm4routing
import os.path
import datetime
import csv
from lib.core.mumoro import *
from lib.core import mumoro
from lib import bikestations, gtfs_reader, kalkati_reader
from lib.datastructures import *
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
and context (class names, function names, or code) available:
# Path: lib/bikestations.py
# def get_text(node):
# def get_int(node):
# def get_float(node):
# def __init__(self, url, name, metadata):
# def import_data(self):
# def update_from_db(self):
# def to_string(self):
# class BikeStationImporter():
#
# Path: lib/gtfs_reader.py
# def distance(c1, c2):
# def convert(filename, session, start_date, end_date):
# C = math.radians(delta)
#
# Path: lib/kalkati_reader.py
# def distance(c1, c2):
# def normalize_service(start, end, services, service_start):
# def __init__(self, session, start_date, end_date):
# def startElement(self, name, attrs):
# def endElement(self, name):
# def endDocument(self):
# def convert(filename, session, start_date, end_date):
# C = math.radians(delta)
# class KalkatiHandler(ContentHandler):
. Output only the next line. | gtfs_reader.convert(filename, self.session, start_date, end_date) |
Using the snippet: <|code_start|> edges2 = Metadata(network_name, "Edges", filename)
self.session.add(edges2)
self.session.commit()
mapper(PT_Edge, create_pt_edges_table(str(edges2.id), self.metadata, str(services.id), str(lines.id)))
self.session.commit()
gtfs_reader.convert(filename, self.session, start_date, end_date)
self.init_mappers()
print "Done importing municipal data from " + filename + " for network '" + network_name + "'"
print "---------------------------------------------------------------------"
def import_kalkati(self, filename, start_date, end_date, network_name = "GTFS"):
print "Adding municipal data from " + filename
print "From " + start_date + " to " + end_date + " for " + network_name + " network"
nodes2 = Metadata(network_name, "Nodes", filename)
self.session.add(nodes2)
self.session.commit()
mapper(PT_Node, create_pt_nodes_table(str(nodes2.id), self.metadata))
services = Metadata(network_name, "Services", filename)
self.session.add(services)
self.session.commit()
mapper(PT_Service, create_services_table(str(services.id), self.metadata))
edges2 = Metadata(network_name, "Edges", filename)
self.session.add(edges2)
self.session.commit()
mapper(PT_Edge, create_pt_edges_table(str(edges2.id), self.metadata, str(services.id)))
self.session.commit()
<|code_end|>
, determine the next line of code. You have imports:
import sys
import osm4routing
import os.path
import datetime
import csv
from lib.core.mumoro import *
from lib.core import mumoro
from lib import bikestations, gtfs_reader, kalkati_reader
from lib.datastructures import *
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker, clear_mappers
and context (class names, function names, or code) available:
# Path: lib/bikestations.py
# def get_text(node):
# def get_int(node):
# def get_float(node):
# def __init__(self, url, name, metadata):
# def import_data(self):
# def update_from_db(self):
# def to_string(self):
# class BikeStationImporter():
#
# Path: lib/gtfs_reader.py
# def distance(c1, c2):
# def convert(filename, session, start_date, end_date):
# C = math.radians(delta)
#
# Path: lib/kalkati_reader.py
# def distance(c1, c2):
# def normalize_service(start, end, services, service_start):
# def __init__(self, session, start_date, end_date):
# def startElement(self, name, attrs):
# def endElement(self, name):
# def endDocument(self):
# def convert(filename, session, start_date, end_date):
# C = math.radians(delta)
# class KalkatiHandler(ContentHandler):
. Output only the next line. | kalkati_reader.convert(filename, self.session, start_date, end_date) |
Predict the next line after this snippet: <|code_start|>"""
Copyright (c) 2017 Eric Shook. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
@author: eshook (Eric Shook, eshook@gmail.edu)
@contributors: (Luyi Hunter, chen3461@umn.edu; Xinran Duan, duanx138@umn.edu)
@contributors: <Contribute and add your name here!>
"""
class TileEngine(Engine):
def __init__(self):
# FIXME: Need an object to describe type of engines rather than a string
super(TileEngine,self).__init__("TileEngine")
self.split_stacks = [] # List of Data stacks to maintain (only when split into tiles)
# Split (<)
def split(self):
# If already split, do nothing.
if self.is_split is True:
return
# Set split to True so engine knows that the data stack has been split
self.is_split = True
<|code_end|>
using the current file's imports:
from ..core.Engine import *
from ..core.Bob import *
from ..bobs.Bobs import *
from ..core import Config
import copy
import math
import multiprocessing
import numpy as np
import gdal
and any relevant context from other files:
# Path: forest/core/Config.py
. Output only the next line. | num_tiles = Config.n_cores # The the number of tiles to split into as the number of cores |
Predict the next line after this snippet: <|code_start|>
def get_config():
"""
Prepare and return alembic config
These configurations used to live in alembic config initialiser, but that
just tight coupling. Ideally we should move that to userspace and find a
way to pass these into alembic commands.
@todo: think about it
"""
# used for errors
map = dict(
path='MIGRATIONS_PATH',
db_url='SQLALCHEMY_DATABASE_URI',
metadata='SQLAlchemy metadata'
)
app = bootstrap.get_app()
params = dict()
params['path'] = app.config.get(map['path'], 'migrations')
params['db_url'] = app.config.get(map['db_url'])
<|code_end|>
using the current file's imports:
import click
from alembic import command as alembic_command
from alembic.util import CommandError
from boiler.cli.colors import *
from boiler.feature.orm import db
from boiler import bootstrap
from boiler.migrations.config import MigrationsConfig
and any relevant context from other files:
# Path: boiler/feature/orm.py
# def orm_feature(app):
#
# Path: boiler/bootstrap.py
# def get_config():
# def get_app():
# def test_import_name(name):
# def create_app(name, config=None, flask_params=None):
# def detect_dev_proxy():
# def add_routing(app):
# def add_mail(app):
# def add_orm(app):
# def add_logging(app):
# def add_localization(app):
. Output only the next line. | params['metadata'] = db.metadata |
Using the snippet: <|code_start|>
def get_config():
"""
Prepare and return alembic config
These configurations used to live in alembic config initialiser, but that
just tight coupling. Ideally we should move that to userspace and find a
way to pass these into alembic commands.
@todo: think about it
"""
# used for errors
map = dict(
path='MIGRATIONS_PATH',
db_url='SQLALCHEMY_DATABASE_URI',
metadata='SQLAlchemy metadata'
)
<|code_end|>
, determine the next line of code. You have imports:
import click
from alembic import command as alembic_command
from alembic.util import CommandError
from boiler.cli.colors import *
from boiler.feature.orm import db
from boiler import bootstrap
from boiler.migrations.config import MigrationsConfig
and context (class names, function names, or code) available:
# Path: boiler/feature/orm.py
# def orm_feature(app):
#
# Path: boiler/bootstrap.py
# def get_config():
# def get_app():
# def test_import_name(name):
# def create_app(name, config=None, flask_params=None):
# def detect_dev_proxy():
# def add_routing(app):
# def add_mail(app):
# def add_orm(app):
# def add_logging(app):
# def add_localization(app):
. Output only the next line. | app = bootstrap.get_app() |
Predict the next line for this snippet: <|code_start|> def log(self, message, level=None):
""" Write a message to log """
if level is None:
level = logging.INFO
current_app.logger.log(msg=message, level=level)
def is_instance(self, model):
"""
Is instance?
Checks if provided object is instance of this service's model.
:param model: object
:return: bool
"""
result = isinstance(model, self.__model__)
if result is True:
return True
err = 'Object {} is not of type {}'
raise ValueError(err.format(model, self.__model__))
def commit(self):
"""
Commit
Commits orm transaction. Used mostly for bulk operations when
flush is of to commit multiple items at once.
:return: None
"""
<|code_end|>
with the help of current file imports:
import logging
from flask import current_app
from boiler.feature.orm import db
and context from other files:
# Path: boiler/feature/orm.py
# def orm_feature(app):
, which may contain function names, class names, or code. Output only the next line. | db.session.commit() |
Based on the snippet: <|code_start|>
def route(view, endpoint=None, methods=None, defaults=None, **options):
"""
Route: a shorthand for route declaration
Import and use it in your app.urls file by calling:
url['/path/to/view'] = route('module.views.view', 'route_name')
"""
if not endpoint:
endpoint = view
if not methods:
methods = ['GET']
return dict(
<|code_end|>
, predict the immediate next line with the help of imports:
from boiler.routes.lazy_views import LazyView
and context (classes, functions, sometimes code) from other files:
# Path: boiler/routes/lazy_views.py
# class LazyView:
# """
# Lazy view
# Callable class that provides loading views on-demand as soon as they
# are hit. This reduces startup times and improves general performance.
#
# See flask docs for more:
# http://flask.pocoo.org/docs/0.10/patterns/lazyloading/
# """
#
# def __init__(self, import_name):
# self.import_name = import_name
# self.__module__,self.__name__ = import_name.rsplit('.', 1)
#
# def __call__(self, *args, **kwargs):
# """ Import and create instance of view """
#
# # important issue ahead
# # @see: https://github.com/projectshift/shift-boiler/issues/11
# try:
# result = self.view(*args, **kwargs)
# return result
# except ImportError:
# err = 'Failed to import {}. If it exists, check that it does not '
# err += 'import something non-existent itself! '
# err += 'Try to manually import it to debug.'
# raise ImportError(err.format(self.import_name))
#
# @cached_property
# def view(self):
# result = import_string(self.import_name)
#
# # do we have restfulness?
# try:
# from flask_restful import Resource
# from boiler.feature.api import api
# restful = True
# except ImportError:
# restful = False
#
# # is classy?
# if isinstance(result, type):
#
# # and also restful?
# is_restful = restful and Resource in result.__bases__
#
# if is_restful:
# result = api.output(result)
# else:
# result = result.as_view(self.import_name)
#
# return result
. Output only the next line. | view_func=LazyView(view), |
Predict the next line after this snippet: <|code_start|>
# -----------------------------------------------------------------------------
# Group setup
# -----------------------------------------------------------------------------
@click.group(help=yellow('Boiler project tools'))
def cli():
pass
# -----------------------------------------------------------------------------
# Show version number
# -----------------------------------------------------------------------------
@cli.command(name='version', help='Display current boiler version')
def version():
"""
Version
Imports and displays current boiler version.
:return:
"""
echo(green('\nshift-boiler:'))
echo(green('-' * 40))
<|code_end|>
using the current file's imports:
import click, os, sys, shutil
import os
import fileinput
import subprocess
from boiler.cli.colors import *
from click import echo
from boiler.version import version as boiler_version
from uuid import uuid1
and any relevant context from other files:
# Path: boiler/version.py
. Output only the next line. | echo(yellow('Version: ') + '{}'.format(boiler_version)) |
Based on the snippet: <|code_start|>
def logging_feature(app):
"""
Add logging
Accepts flask application and registers logging functionality within it
"""
# this is important because otherwise only log warn, err and crit
app.logger.setLevel(logging.INFO)
# enable loggers
email_exceptions = app.config.get('LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS')
if email_exceptions and not app.debug and not app.testing:
# config.debug=False
mail_handler = mail_logger(app)
app.logger.addHandler(mail_handler)
if not app.testing:
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
from boiler.log.file import file_logger
from boiler.log.mail import mail_logger
and context (classes, functions, sometimes code) from other files:
# Path: boiler/log/file.py
# def file_logger(app, level=None):
# """
# Get file logger
# Returns configured fire logger ready to be attached to app
#
# :param app: application instance
# :param level: log this level
# :return: RotatingFileHandler
# """
# path = os.path.join(os.getcwd(), 'var', 'logs', 'app.log')
#
# max_bytes = 1024 * 1024 * 2
# file_handler = RotatingFileHandler(
# filename=path,
# mode='a',
# maxBytes=max_bytes,
# backupCount=10
# )
#
# if level is None: level = logging.INFO
# file_handler.setLevel(level)
#
# log_format = '%(asctime)s %(levelname)s: %(message)s'
# log_format += ' [in %(pathname)s:%(lineno)d]'
# file_handler.setFormatter(logging.Formatter(log_format))
#
# return file_handler
#
# Path: boiler/log/mail.py
# def mail_logger(app, level = None):
# """
# Get mail logger
# Returns configured instance of mail logger ready to be attached to app.
#
# Important: app.config['DEBUG'] must be False!
#
# :param app: application instance
# :param level: mail errors of this level
# :return: SMTPHandler
# """
# credentials = None
# if app.config['MAIL_USERNAME'] and app.config['MAIL_PASSWORD']:
# credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
#
# secure = None
# if app.config['MAIL_USE_TLS']:
# secure = tuple()
#
# # @todo: move to configuration
# config = dict(
# mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
# fromaddr=app.config['MAIL_DEFAULT_SENDER'],
# toaddrs=app.config['ADMINS'],
# credentials = credentials,
# subject='Application exception',
# secure = secure,
# timeout=1.0
# )
#
# mail_handler = SMTPHandler(**config)
#
# if level is None: level = logging.ERROR
# mail_handler.setLevel(level)
#
# mail_log_format = '''
# Message type: %(levelname)s
# Location: %(pathname)s:%(lineno)d
# Module: %(module)s
# Function: %(funcName)s
# Time: %(asctime)s
#
# Message:
#
# %(message)s
# '''
#
# mail_handler.setFormatter(logging.Formatter(mail_log_format))
# return mail_handler
. Output only the next line. | file_handler = file_logger(app) |
Given the following code snippet before the placeholder: <|code_start|>
def logging_feature(app):
"""
Add logging
Accepts flask application and registers logging functionality within it
"""
# this is important because otherwise only log warn, err and crit
app.logger.setLevel(logging.INFO)
# enable loggers
email_exceptions = app.config.get('LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS')
if email_exceptions and not app.debug and not app.testing:
# config.debug=False
<|code_end|>
, predict the next line using imports from the current file:
import logging
from boiler.log.file import file_logger
from boiler.log.mail import mail_logger
and context including class names, function names, and sometimes code from other files:
# Path: boiler/log/file.py
# def file_logger(app, level=None):
# """
# Get file logger
# Returns configured fire logger ready to be attached to app
#
# :param app: application instance
# :param level: log this level
# :return: RotatingFileHandler
# """
# path = os.path.join(os.getcwd(), 'var', 'logs', 'app.log')
#
# max_bytes = 1024 * 1024 * 2
# file_handler = RotatingFileHandler(
# filename=path,
# mode='a',
# maxBytes=max_bytes,
# backupCount=10
# )
#
# if level is None: level = logging.INFO
# file_handler.setLevel(level)
#
# log_format = '%(asctime)s %(levelname)s: %(message)s'
# log_format += ' [in %(pathname)s:%(lineno)d]'
# file_handler.setFormatter(logging.Formatter(log_format))
#
# return file_handler
#
# Path: boiler/log/mail.py
# def mail_logger(app, level = None):
# """
# Get mail logger
# Returns configured instance of mail logger ready to be attached to app.
#
# Important: app.config['DEBUG'] must be False!
#
# :param app: application instance
# :param level: mail errors of this level
# :return: SMTPHandler
# """
# credentials = None
# if app.config['MAIL_USERNAME'] and app.config['MAIL_PASSWORD']:
# credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
#
# secure = None
# if app.config['MAIL_USE_TLS']:
# secure = tuple()
#
# # @todo: move to configuration
# config = dict(
# mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
# fromaddr=app.config['MAIL_DEFAULT_SENDER'],
# toaddrs=app.config['ADMINS'],
# credentials = credentials,
# subject='Application exception',
# secure = secure,
# timeout=1.0
# )
#
# mail_handler = SMTPHandler(**config)
#
# if level is None: level = logging.ERROR
# mail_handler.setLevel(level)
#
# mail_log_format = '''
# Message type: %(levelname)s
# Location: %(pathname)s:%(lineno)d
# Module: %(module)s
# Function: %(funcName)s
# Time: %(asctime)s
#
# Message:
#
# %(message)s
# '''
#
# mail_handler.setFormatter(logging.Formatter(mail_log_format))
# return mail_handler
. Output only the next line. | mail_handler = mail_logger(app) |
Using the snippet: <|code_start|> raise x.BootstrapException(err.format(name))
# check if imported module is a namespace
is_namespace = not imported.__file__ and type(imported.__path__) is not list
if is_namespace:
err = '\n\nProvided FLASK_APP "{}" is a namespace package.\n'
err += 'Please verify that you are importing the app from a regular '
err += 'package and not a namespace.\n\n'
err += 'For more info see:\n'
err += 'Related ticket: https://bit.ly/package-vs-namespace:\n'
err += 'Packages and namespaces in Python docs: '
err += 'https://docs.python.org/3/reference/import.html#packages\n'
raise x.BootstrapException(err.format(name))
def create_app(name, config=None, flask_params=None):
"""
Create app
Generalized way of creating a flask app. Use it in your concrete apps and
do further configuration there: add app-specific options, extensions,
listeners and other features.
Note: application name should be its fully qualified __name__, something
like project.api.app. This is how we fetch routing settings.
"""
# check import name
test_import_name(name)
# check config
if not config:
<|code_end|>
, determine the next line of code. You have imports:
import os
from os import path
from flask import Flask
from flask import g
from flask import request
from werkzeug.utils import import_string
from werkzeug.utils import ImportStringError
from jinja2 import ChoiceLoader, FileSystemLoader
from flask_wtf import CSRFProtect
from boiler.config import DefaultConfig
from boiler.timer import restart_timer
from boiler.errors import register_error_handler
from boiler.jinja import functions as jinja_functions
from boiler import exceptions as x
from boiler.feature.routing import routing_feature
from boiler.feature.mail import mail_feature
from boiler.feature.orm import orm_feature
from boiler.feature.logging import logging_feature
from boiler.feature.localization import localization_feature
and context (class names, function names, or code) available:
# Path: boiler/config.py
# class DefaultConfig(Config):
# """
# Default project configuration
# Sets up defaults used and/or overridden in environments and deployments
# """
# ENV = 'production'
#
# SERVER_NAME = None
#
# # secret key
# SECRET_KEY = os.getenv('APP_SECRET_KEY')
#
# TIME_RESTARTS = False
# TESTING = False
# DEBUG = False
# DEBUG_TB_ENABLED = False
# DEBUG_TB_PROFILER_ENABLED = False
# DEBUG_TB_INTERCEPT_REDIRECTS = False
#
# # where built-in server and url_for look for static files (None for default)
# FLASK_STATIC_URL = None
# FLASK_STATIC_PATH = None
#
# # asset helper settings (server must be capable of serving these files)
# ASSETS_VERSION = None
# ASSETS_PATH = None # None falls back to url_for('static')
#
# # do not expose our urls on 404s
# ERROR_404_HELP = False
#
# # uploads
# MAX_CONTENT_LENGTH = 1024 * 1024 * 16 # megabytes
#
# # database
# # 'mysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+pymysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+mysqlconnector://user:password@host:3306/database?charset=utf8mb4'
# SQLALCHEMY_ECHO = False
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# MIGRATIONS_PATH = os.path.join(os.getcwd(), 'migrations')
# SQLALCHEMY_DATABASE_URI = os.getenv('APP_DATABASE_URI')
# TEST_DB_PATH = os.path.join(
# os.getcwd(), 'var', 'data', 'test-db', 'sqlite.db'
# )
#
# # mail server settings
# MAIL_DEBUG = False
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USE_SSL = False
# MAIL_USERNAME = None
# MAIL_PASSWORD = None
# MAIL_DEFAULT_SENDER = ('Webapp Mailer', 'mygmail@gmail.com')
#
# # logging
# ADMINS = ['you@domain']
# LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS = False
#
# # localization (babel)
# DEFAULT_LOCALE = 'en_GB'
# DEFAULT_TIMEZONE = 'UTC'
#
# # csrf protection
# WTF_CSRF_ENABLED = True
#
# # recaptcha
# RECAPTCHA_PUBLIC_KEY = os.getenv('APP_RECAPTCHA_PUBLIC_KEY')
# RECAPTCHA_PRIVATE_KEY = os.getenv('APP_RECAPTCHA_PRIVATE_KEY')
#
# Path: boiler/timer/restart_timer.py
# def time_restarts(data_path):
#
# Path: boiler/errors.py
# def register_error_handler(app, handler=None):
# """
# Register error handler
# Registers an exception handler on the app instance for every type of
# exception code werkzeug is aware about.
#
# :param app: flask.Flask - flask application instance
# :param handler: function - the handler
# :return: None
# """
# if not handler:
# handler = default_error_handler
#
# for code in exceptions.default_exceptions.keys():
# app.register_error_handler(code, handler)
#
# Path: boiler/jinja/functions.py
# def asset(url=None):
# def dev_proxy():
#
# Path: boiler/exceptions.py
# class BoilerException(Exception):
# class BootstrapException(BoilerException, RuntimeError):
. Output only the next line. | config = DefaultConfig() |
Next line prediction: <|code_start|> flask_params['static_folder'] = config.get('FLASK_STATIC_PATH')
# create an app with default config
app = Flask(**flask_params)
app.config.from_object(DefaultConfig())
# apply custom config
if config:
app.config.from_object(config)
# enable csrf protection
CSRFProtect(app)
# register error handler
register_error_handler(app)
# use kernel templates
kernel_templates_path = path.realpath(path.dirname(__file__)+'/templates')
fallback_loader = FileSystemLoader([kernel_templates_path])
custom_loader = ChoiceLoader([app.jinja_loader, fallback_loader])
app.jinja_loader = custom_loader
# register custom jinja functions
app.jinja_env.globals.update(dict(
asset=jinja_functions.asset,
dev_proxy=jinja_functions.dev_proxy
))
# time restarts?
if app.config.get('TIME_RESTARTS'):
<|code_end|>
. Use current file imports:
(import os
from os import path
from flask import Flask
from flask import g
from flask import request
from werkzeug.utils import import_string
from werkzeug.utils import ImportStringError
from jinja2 import ChoiceLoader, FileSystemLoader
from flask_wtf import CSRFProtect
from boiler.config import DefaultConfig
from boiler.timer import restart_timer
from boiler.errors import register_error_handler
from boiler.jinja import functions as jinja_functions
from boiler import exceptions as x
from boiler.feature.routing import routing_feature
from boiler.feature.mail import mail_feature
from boiler.feature.orm import orm_feature
from boiler.feature.logging import logging_feature
from boiler.feature.localization import localization_feature)
and context including class names, function names, or small code snippets from other files:
# Path: boiler/config.py
# class DefaultConfig(Config):
# """
# Default project configuration
# Sets up defaults used and/or overridden in environments and deployments
# """
# ENV = 'production'
#
# SERVER_NAME = None
#
# # secret key
# SECRET_KEY = os.getenv('APP_SECRET_KEY')
#
# TIME_RESTARTS = False
# TESTING = False
# DEBUG = False
# DEBUG_TB_ENABLED = False
# DEBUG_TB_PROFILER_ENABLED = False
# DEBUG_TB_INTERCEPT_REDIRECTS = False
#
# # where built-in server and url_for look for static files (None for default)
# FLASK_STATIC_URL = None
# FLASK_STATIC_PATH = None
#
# # asset helper settings (server must be capable of serving these files)
# ASSETS_VERSION = None
# ASSETS_PATH = None # None falls back to url_for('static')
#
# # do not expose our urls on 404s
# ERROR_404_HELP = False
#
# # uploads
# MAX_CONTENT_LENGTH = 1024 * 1024 * 16 # megabytes
#
# # database
# # 'mysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+pymysql://user:password@server/db?charset=utf8mb4'
# # 'mysql+mysqlconnector://user:password@host:3306/database?charset=utf8mb4'
# SQLALCHEMY_ECHO = False
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# MIGRATIONS_PATH = os.path.join(os.getcwd(), 'migrations')
# SQLALCHEMY_DATABASE_URI = os.getenv('APP_DATABASE_URI')
# TEST_DB_PATH = os.path.join(
# os.getcwd(), 'var', 'data', 'test-db', 'sqlite.db'
# )
#
# # mail server settings
# MAIL_DEBUG = False
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USE_SSL = False
# MAIL_USERNAME = None
# MAIL_PASSWORD = None
# MAIL_DEFAULT_SENDER = ('Webapp Mailer', 'mygmail@gmail.com')
#
# # logging
# ADMINS = ['you@domain']
# LOGGING_EMAIL_EXCEPTIONS_TO_ADMINS = False
#
# # localization (babel)
# DEFAULT_LOCALE = 'en_GB'
# DEFAULT_TIMEZONE = 'UTC'
#
# # csrf protection
# WTF_CSRF_ENABLED = True
#
# # recaptcha
# RECAPTCHA_PUBLIC_KEY = os.getenv('APP_RECAPTCHA_PUBLIC_KEY')
# RECAPTCHA_PRIVATE_KEY = os.getenv('APP_RECAPTCHA_PRIVATE_KEY')
#
# Path: boiler/timer/restart_timer.py
# def time_restarts(data_path):
#
# Path: boiler/errors.py
# def register_error_handler(app, handler=None):
# """
# Register error handler
# Registers an exception handler on the app instance for every type of
# exception code werkzeug is aware about.
#
# :param app: flask.Flask - flask application instance
# :param handler: function - the handler
# :return: None
# """
# if not handler:
# handler = default_error_handler
#
# for code in exceptions.default_exceptions.keys():
# app.register_error_handler(code, handler)
#
# Path: boiler/jinja/functions.py
# def asset(url=None):
# def dev_proxy():
#
# Path: boiler/exceptions.py
# class BoilerException(Exception):
# class BootstrapException(BoilerException, RuntimeError):
. Output only the next line. | restart_timer.time_restarts(os.path.join(os.getcwd(), 'var', 'data')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.