Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given the code snippet: <|code_start|> joint_coord = joint_coord.astype(int)
cv2.circle(disp_img, tuple(joint_coord), radius=3, color=[255,0,0], thickness = 10)
if pred is not None:
if correct[i]:
color=[0,255,0]
else:
color=[0,0,255]
error = np.linalg.norm(joint_coord - pred[i,::-1],2,-1)
cv2.circle(disp_img, tuple(joint_coord), radius=int(error), color=color, thickness = 3)
cv2.line(disp_img, tuple(joint_coord), tuple(pred[i,::-1]),color , thickness = 3)
i+=1
return disp_img
def main():
smal_joint_info = SMALJointInfo()
badja_data = BADJAData(args.seqname)
data_loader = badja_data.get_loader()
print(args.testdir)
# store all the data
all_anno = []
all_mesh = []
all_cam = []
all_fr = []
all_fl = []
#import pdb; pdb.set_trace()
for anno in data_loader:
all_anno.append(anno)
rgb_img, sil_img, joints, visible, name = anno
seqname = name.split('/')[-2]
<|code_end|>
, generate the next line using the imports in this file:
import time
import sys, os
import torch
import torch.nn as nn
import ext_utils.flowlib as flowlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import soft_renderer as sr
import argparse
import trimesh
from torch.autograd import Variable
from ext_utils.badja_data import BADJAData
from ext_utils.joint_catalog import SMALJointInfo
from nnutils.geom_utils import obj_to_cam, pinhole_cam, orthographic_cam, render_flow_soft_3
from models.VCN_exp import WarpModule, flow_reg
from models.VCN_exp import VCN
and context (functions, classes, or occasionally code) from other files:
# Path: nnutils/geom_utils.py
# def obj_to_cam(verts, Rmat, Tmat,nmesh,n_hypo,skin,tocam=True):
# """
# transform from canonical object coordinates to camera coordinates
# """
# verts = verts.clone()
# Rmat = Rmat.clone()
# Tmat = Tmat.clone()
# verts = verts.view(-1,verts.shape[1],3)
#
# bodyR = Rmat[::nmesh].clone()
# bodyT = Tmat[::nmesh].clone()
# if nmesh>1:
# vs = []
# for k in range(nmesh-1):
# partR = Rmat[k+1::nmesh].clone()
# partT = Tmat[k+1::nmesh].clone()
# vs.append( (verts.matmul(partR) + partT)[:,np.newaxis] )
# vs = torch.cat(vs,1) # N, K, Nv, 3
# vs = (vs * skin).sum(1)
# else:
# vs = verts
#
# if tocam:
# vs = vs.clone().matmul(bodyR) + bodyT
# else:
# vs = vs.clone()
# return vs
#
# def pinhole_cam(verts,pp,fl):
# n_hypo = verts.shape[0] // pp.shape[0]
# pp = pp.clone()[:,None].repeat(1,n_hypo,1).view(-1,2)
# fl = fl.clone()[:,None].view(-1,1)
# verts = verts.clone()
# verts[:,:,1] = pp[:,1:2]+verts[:, :, 1].clone()*fl/ verts[:,:,2].clone()
# verts[:,:,0] = pp[:,0:1]+verts[:, :, 0].clone()*fl/ verts[:,:,2].clone()
# return verts
#
# def orthographic_cam(verts,pp,fl):
# n_hypo = verts.shape[0] // pp.shape[0]
# pp = pp.clone()[:,None].repeat(1,n_hypo,1).view(-1,2)
# fl = fl.clone()[:,None].view(-1,1)
# verts = verts.clone()
# verts[:,:,1] = pp[:,1:2]+verts[:, :, 1].clone()*fl
# verts[:,:,0] = pp[:,0:1]+verts[:, :, 0].clone()*fl
# return verts
#
# def render_flow_soft_3(renderer_soft, verts, verts_target, faces):
# """
# Render optical flow from two frame 3D vertex locations
# """
# offset = torch.Tensor( renderer_soft.transform.transformer._eye ).cuda()[np.newaxis,np.newaxis]
# verts_pre = verts[:,:,:3]+offset; verts_pre[:,:,1] = -1*verts_pre[:,:,1]
#
# verts_pos_px = renderer_soft.render_mesh(sr.Mesh(verts_pre, faces,
# textures=verts_target[:,:,:3],texture_type='vertex')).clone()
# fgmask = verts_pos_px[:,-1]
# verts_pos_px = verts_pos_px.permute(0,2,3,1)
#
# bgmask = (verts_pos_px[:,:,:,2]<1e-9)
# verts_pos_px[bgmask]=10
#
# verts_pos0_px = torch.Tensor(np.meshgrid(range(bgmask.shape[2]), range(bgmask.shape[1]))).cuda()
# verts_pos0_px[0] = verts_pos0_px[0]*2 / (bgmask.shape[2] - 1) - 1
# verts_pos0_px[1] = verts_pos0_px[1]*2 / (bgmask.shape[1] - 1) - 1
# verts_pos0_px = verts_pos0_px.permute(1,2,0)[None]
#
# flow_fw = (verts_pos_px[:,:,:,:2] - verts_pos0_px)
# flow_fw[bgmask] = flow_fw[bgmask].detach()
# return flow_fw, bgmask, fgmask
. Output only the next line. | fr = int(name.split('/')[-1].split('.')[-2]) |
Next line prediction: <|code_start|> # upsampling
flow = torch.squeeze(flow).data.cpu().numpy()
flow = np.concatenate( [cv2.resize(flow[0],(input_size[1],input_size[0]))[:,:,np.newaxis],
cv2.resize(flow[1],(input_size[1],input_size[0]))[:,:,np.newaxis]],-1)
flow[:,:,0] *= imgL_o.shape[1] / max_w
flow[:,:,1] *= imgL_o.shape[0] / max_h
flow = np.concatenate( (flow, np.ones([flow.shape[0],flow.shape[1],1])),-1)
torch.cuda.empty_cache()
flow = torch.Tensor(flow).cuda()[None]
return flow
def preprocess_image(img,mask,imgsize):
if len(img.shape) == 2:
img = np.repeat(np.expand_dims(img, 2), 3, axis=2)
mask = mask[:,:,:1]
# crop box
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( (xid.max()-xid.min())//2, (yid.max()-yid.min())//2)
maxlength = int(1.2*max(length))
length = (maxlength,maxlength)
alp = 2*length[0]/float(imgsize)
refpp = np.asarray(center)/(imgsize/2.) - 1
return alp, refpp,center,length[0]
def draw_joints_on_image(rgb_img, joints, visibility, region_colors, marker_types,pred=None,correct=None):
joints = joints[:, ::-1] # OpenCV works in (x, y) rather than (i, j)
<|code_end|>
. Use current file imports:
(import time
import sys, os
import torch
import torch.nn as nn
import ext_utils.flowlib as flowlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import soft_renderer as sr
import argparse
import trimesh
from torch.autograd import Variable
from ext_utils.badja_data import BADJAData
from ext_utils.joint_catalog import SMALJointInfo
from nnutils.geom_utils import obj_to_cam, pinhole_cam, orthographic_cam, render_flow_soft_3
from models.VCN_exp import WarpModule, flow_reg
from models.VCN_exp import VCN)
and context including class names, function names, or small code snippets from other files:
# Path: nnutils/geom_utils.py
# def obj_to_cam(verts, Rmat, Tmat,nmesh,n_hypo,skin,tocam=True):
# """
# transform from canonical object coordinates to camera coordinates
# """
# verts = verts.clone()
# Rmat = Rmat.clone()
# Tmat = Tmat.clone()
# verts = verts.view(-1,verts.shape[1],3)
#
# bodyR = Rmat[::nmesh].clone()
# bodyT = Tmat[::nmesh].clone()
# if nmesh>1:
# vs = []
# for k in range(nmesh-1):
# partR = Rmat[k+1::nmesh].clone()
# partT = Tmat[k+1::nmesh].clone()
# vs.append( (verts.matmul(partR) + partT)[:,np.newaxis] )
# vs = torch.cat(vs,1) # N, K, Nv, 3
# vs = (vs * skin).sum(1)
# else:
# vs = verts
#
# if tocam:
# vs = vs.clone().matmul(bodyR) + bodyT
# else:
# vs = vs.clone()
# return vs
#
# def pinhole_cam(verts,pp,fl):
# n_hypo = verts.shape[0] // pp.shape[0]
# pp = pp.clone()[:,None].repeat(1,n_hypo,1).view(-1,2)
# fl = fl.clone()[:,None].view(-1,1)
# verts = verts.clone()
# verts[:,:,1] = pp[:,1:2]+verts[:, :, 1].clone()*fl/ verts[:,:,2].clone()
# verts[:,:,0] = pp[:,0:1]+verts[:, :, 0].clone()*fl/ verts[:,:,2].clone()
# return verts
#
# def orthographic_cam(verts,pp,fl):
# n_hypo = verts.shape[0] // pp.shape[0]
# pp = pp.clone()[:,None].repeat(1,n_hypo,1).view(-1,2)
# fl = fl.clone()[:,None].view(-1,1)
# verts = verts.clone()
# verts[:,:,1] = pp[:,1:2]+verts[:, :, 1].clone()*fl
# verts[:,:,0] = pp[:,0:1]+verts[:, :, 0].clone()*fl
# return verts
#
# def render_flow_soft_3(renderer_soft, verts, verts_target, faces):
# """
# Render optical flow from two frame 3D vertex locations
# """
# offset = torch.Tensor( renderer_soft.transform.transformer._eye ).cuda()[np.newaxis,np.newaxis]
# verts_pre = verts[:,:,:3]+offset; verts_pre[:,:,1] = -1*verts_pre[:,:,1]
#
# verts_pos_px = renderer_soft.render_mesh(sr.Mesh(verts_pre, faces,
# textures=verts_target[:,:,:3],texture_type='vertex')).clone()
# fgmask = verts_pos_px[:,-1]
# verts_pos_px = verts_pos_px.permute(0,2,3,1)
#
# bgmask = (verts_pos_px[:,:,:,2]<1e-9)
# verts_pos_px[bgmask]=10
#
# verts_pos0_px = torch.Tensor(np.meshgrid(range(bgmask.shape[2]), range(bgmask.shape[1]))).cuda()
# verts_pos0_px[0] = verts_pos0_px[0]*2 / (bgmask.shape[2] - 1) - 1
# verts_pos0_px[1] = verts_pos0_px[1]*2 / (bgmask.shape[1] - 1) - 1
# verts_pos0_px = verts_pos0_px.permute(1,2,0)[None]
#
# flow_fw = (verts_pos_px[:,:,:,:2] - verts_pos0_px)
# flow_fw[bgmask] = flow_fw[bgmask].detach()
# return flow_fw, bgmask, fgmask
. Output only the next line. | disp_img = rgb_img.copy() |
Given the following code snippet before the placeholder: <|code_start|> size = 150
else:
size = len(all_anno)
for i in range(size):
if args.append_render=='no':break
# render flow between mesh 1 and 2
if args.freeze:
print(i)
refimg = all_anno[0][0]
img_size = max(refimg.shape)
refmesh = all_mesh[0]
refmesh.vertices -= refmesh.vertices.mean(0)[None]
refmesh.vertices /= 1.2*np.abs(refmesh.vertices).max()
refcam = all_cam[0].copy()
refcam[:3,:3] = refcam[:3,:3].dot(cv2.Rodrigues(np.asarray([0.,-i*2*np.pi/size,0.]))[0])
refcam[:2,3] = 0 # trans xy
refcam[2,3] = 20 # depth
if args.cam_type=='perspective':
refcam[3,2] = refimg.shape[1]/2 # px py
refcam[3,3] = refimg.shape[0]/2 # px py
refcam[3,:2] = 8*img_size/2 # fl
else:
refcam[3,2] = refimg.shape[1]/2 # px py
refcam[3,3] = refimg.shape[1]/2 # px py
refcam[3,:2] =0.5 * img_size/2 # fl
else:
refimg, refsil, refkp, refvis, refname = all_anno[i]
print('%s'%(refname))
img_size = max(refimg.shape)
renderer_softflf.rasterizer.image_size = img_size
<|code_end|>
, predict the next line using imports from the current file:
import sys, os
import subprocess
import imageio
import glob
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import soft_renderer as sr
import argparse
import trimesh
import pyrender
import configparser
from ext_utils.badja_data import BADJAData
from ext_utils.joint_catalog import SMALJointInfo
from nnutils.geom_utils import obj_to_cam, pinhole_cam
from pyrender import IntrinsicsCamera,Mesh, Node, Scene,OffscreenRenderer
and context including class names, function names, and sometimes code from other files:
# Path: nnutils/geom_utils.py
# def obj_to_cam(verts, Rmat, Tmat,nmesh,n_hypo,skin,tocam=True):
# """
# transform from canonical object coordinates to camera coordinates
# """
# verts = verts.clone()
# Rmat = Rmat.clone()
# Tmat = Tmat.clone()
# verts = verts.view(-1,verts.shape[1],3)
#
# bodyR = Rmat[::nmesh].clone()
# bodyT = Tmat[::nmesh].clone()
# if nmesh>1:
# vs = []
# for k in range(nmesh-1):
# partR = Rmat[k+1::nmesh].clone()
# partT = Tmat[k+1::nmesh].clone()
# vs.append( (verts.matmul(partR) + partT)[:,np.newaxis] )
# vs = torch.cat(vs,1) # N, K, Nv, 3
# vs = (vs * skin).sum(1)
# else:
# vs = verts
#
# if tocam:
# vs = vs.clone().matmul(bodyR) + bodyT
# else:
# vs = vs.clone()
# return vs
#
# def pinhole_cam(verts,pp,fl):
# n_hypo = verts.shape[0] // pp.shape[0]
# pp = pp.clone()[:,None].repeat(1,n_hypo,1).view(-1,2)
# fl = fl.clone()[:,None].view(-1,1)
# verts = verts.clone()
# verts[:,:,1] = pp[:,1:2]+verts[:, :, 1].clone()*fl/ verts[:,:,2].clone()
# verts[:,:,0] = pp[:,0:1]+verts[:, :, 0].clone()*fl/ verts[:,:,2].clone()
# return verts
. Output only the next line. | refmesh = all_mesh[i] |
Given snippet: <|code_start|> elif os.path.exists(file_name):
print ("BADJA SEGMENTATION file path: {0} is missing".format(seg_name))
else:
print ("BADJA IMAGE file path: {0} is missing".format(file_name))
self.animal_dict[animal_id] = (filenames, segnames, joints, visible)
print ("Loaded BADJA dataset")
def get_loader(self):
#self.animal_dict.pop(10)
#self.animal_dict.pop(2)
#for idx in range(int(1e6)):
#pdb.set_trace()
for idx in self.animal_dict.keys():
animal_id = idx
#animal_id = np.random.choice(self.animal_dict.keys())
filenames, segnames, joints, visible = self.animal_dict[animal_id]
for image_id in range(len(filenames)):
seg_file = segnames[image_id]
image_file = filenames[image_id]
jointstmp = joints[image_id].copy()
jointstmp = jointstmp[self.smal_joint_info.annotated_classes]
visibletmp = visible[image_id][self.smal_joint_info.annotated_classes]
rgb_img = cv2.imread(image_file)[:,:,::-1]
sil_img = imageio.imread(seg_file)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pdb
import numpy as np
import imageio
import json
import os
import cv2
import sys
from random import shuffle
from .joint_catalog import SMALJointInfo
and context:
# Path: third_party/ext_utils/joint_catalog.py
# class SMALJointInfo():
# def __init__(self):
# # These are the
# self.annotated_classes = np.array([
# 8, 9, 10, # upper_right
# 12, 13, 14, # upper_left
# 15, # neck
# 18, 19, 20, # lower_right
# 22, 23, 24, # lower_left
# 25, 28, 31, # tail
# 32, 33, # head
# 35, # right_ear
# 36]) # left_ear
#
# self.annotated_markers = np.array([
# cv2.MARKER_CROSS, cv2.MARKER_STAR, cv2.MARKER_TRIANGLE_DOWN,
# cv2.MARKER_CROSS, cv2.MARKER_STAR, cv2.MARKER_TRIANGLE_DOWN,
# cv2.MARKER_CROSS,
# cv2.MARKER_CROSS, cv2.MARKER_STAR, cv2.MARKER_TRIANGLE_DOWN,
# cv2.MARKER_CROSS, cv2.MARKER_STAR, cv2.MARKER_TRIANGLE_DOWN,
# cv2.MARKER_CROSS, cv2.MARKER_STAR, cv2.MARKER_TRIANGLE_DOWN,
# cv2.MARKER_CROSS, cv2.MARKER_STAR,
# cv2.MARKER_CROSS,
# cv2.MARKER_CROSS])
#
# self.joint_regions = np.array([
# 0, 0, 0, 0, 0, 0, 0,
# 1, 1, 1, 1,
# 2, 2, 2, 2,
# 3, 3,
# 4, 4, 4, 4,
# 5, 5, 5, 5,
# 6, 6, 6, 6, 6, 6, 6,
# 7, 7, 7,
# 8,
# 9])
#
# self.annotated_joint_region = self.joint_regions[self.annotated_classes]
# self.region_colors = np.array([
# [250, 190, 190], # body, light pink
# [60, 180, 75], # upper_right, green
# [230, 25, 75], # upper_left, red
# [128, 0, 0], # neck, maroon
# [0, 130, 200], # lower_right, blue
# [255, 255, 25], # lower_left, yellow
# [240, 50, 230], # tail, majenta
# [245, 130, 48], # jaw / nose / chin, orange
# [29, 98, 115], # right_ear, turquoise
# [255, 153, 204]]) # left_ear, pink
#
# self.joint_colors = np.array(self.region_colors)[self.annotated_joint_region]
which might include code, classes, or functions. Output only the next line. | rgb_h, rgb_w, _ = rgb_img.shape |
Given the code snippet: <|code_start|>#from IPython import embed
class DistModel(BaseModel):
def name(self):
return self.model_name
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import torch
import os
import os.path as osp
import itertools
import fractions
import functools
import skimage.transform
from torch import nn
from collections import OrderedDict
from torch.autograd import Variable
from ..util import util as util
from .base_model import BaseModel
from . import networks_basic as networks
from scipy.ndimage import zoom
and context (functions, classes, or occasionally code) from other files:
# Path: third_party/PerceptualSimilarity/util/util.py
# def datetime_str():
# def read_text_file(in_path):
# def bootstrap(in_vec,num_samples=100,bootfunc=np.mean):
# def rand_flip(input1,input2):
# def l2(p0, p1, range=255.):
# def psnr(p0, p1, peak=255.):
# def dssim(p0, p1, range=255.):
# def rgb2lab(in_img,mean_cent=False):
# def normalize_blob(in_feat,eps=1e-10):
# def cos_sim_blob(in0,in1):
# def normalize_tensor(in_feat,eps=1e-10):
# def cos_sim(in0,in1):
# def tensor2np(tensor_obj):
# def np2tensor(np_obj):
# def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
# def tensorlab2tensor(lab_tensor,return_inbnd=False):
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
# def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
# def tensor2vec(vector_tensor):
# def diagnose_network(net, name='network'):
# def grab_patch(img_in, P, yy, xx):
# def load_image(path):
# def resize_image(img, max_size=256):
# def resize_image_zoom(img, zoom_factor=1., order=3):
# def save_image(image_numpy, image_path, ):
# def prep_display_image(img, dtype='uint8'):
# def info(object, spacing=10, collapse=1):
# def varname(p):
# def print_numpy(x, val=True, shp=False):
# def mkdirs(paths):
# def mkdir(path):
# def rgb2lab(input):
# def montage(
# imgs,
# PAD=5,
# RATIO=16 / 9.,
# EXTRA_PAD=(
# False,
# False),
# MM=-1,
# NN=-1,
# primeDir=0,
# verbose=False,
# returnGridPos=False,
# backClr=np.array(
# (0,
# 0,
# 0))):
# def __init__(self, frequency=1):
# def __call__(self, module):
# def flatten_nested_list(nested_list):
# def read_file(in_path,list_lines=False):
# def read_csv_file_as_text(in_path):
# def random_swap(obj0,obj1):
# def voc_ap(rec, prec, use_07_metric=False):
# (N,C,X,Y) = in0_norm.shape
# N = in0.size()[0]
# X = in0.size()[2]
# Y = in0.size()[3]
# [Y, X] = img.shape[:2]
# Y = imgs.shape[0]
# X = imgs.shape[1]
# M = imgs.shape[2]
# N = imgs.shape[3]
# PADS = np.array((PAD))
# PADY = PADS
# PADX = PADS
# PADY = PADS[0]
# PADX = PADS[1]
# NN = np.ceil(np.sqrt(1.0 * N * RATIO))
# MM = np.ceil(1.0 * N / NN)
# NN = np.ceil(1.0 * N / MM)
# MM = np.ceil(1.0 * N / NN)
# NN = np.ceil(1.0 * N / MM)
# EXTRA_PADY = EXTRA_PAD[0] * PADY
# EXTRA_PADX = EXTRA_PAD[0] * PADX
# class zeroClipper(object):
#
# Path: third_party/PerceptualSimilarity/models/base_model.py
# class BaseModel(nn.Module):
# def __init__(self):
# super(BaseModel, self).__init__()
# pass;
#
# def name(self):
# return 'BaseModel'
#
# def initialize(self, use_gpu=True):
# self.use_gpu = use_gpu
# self.Tensor = torch.cuda.FloatTensor if self.use_gpu else torch.Tensor
# # self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
#
# def forward(self):
# pass
#
# def get_image_paths(self):
# pass
#
# def optimize_parameters(self):
# pass
#
# def get_current_visuals(self):
# return self.input
#
# def get_current_errors(self):
# return {}
#
# def save(self, label):
# pass
#
# # helper saving function that can be used by subclasses
# def save_network(self, network, path, network_label, epoch_label):
# save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
# save_path = os.path.join(path, save_filename)
# torch.save(network.state_dict(), save_path)
#
# # helper loading function that can be used by subclasses
# def load_network(self, network, network_label, epoch_label):
# # embed()
# save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
# save_path = os.path.join(self.save_dir, save_filename)
# print('Loading network from %s'%save_path)
# network.load_state_dict(torch.load(save_path))
#
# def update_learning_rate():
# pass
#
# def get_image_paths(self):
# return self.image_paths
#
# def save_done(self, flag=False):
# np.save(os.path.join(self.save_dir, 'done_flag'),flag)
# np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
. Output only the next line. | def initialize(self, model='net-lin', net='alex', model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5): |
Here is a snippet: <|code_start|>
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, path, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(path, save_filename)
torch.save(network.state_dict(), save_path)
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
# embed()
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
print('Loading network from %s'%save_path)
network.load_state_dict(torch.load(save_path))
def update_learning_rate():
pass
<|code_end|>
. Write the next line using the current file imports:
import os
import torch
import torch.nn as nn
from ..util import util as util
from torch.autograd import Variable
from pdb import set_trace as st
and context from other files:
# Path: third_party/PerceptualSimilarity/util/util.py
# def datetime_str():
# def read_text_file(in_path):
# def bootstrap(in_vec,num_samples=100,bootfunc=np.mean):
# def rand_flip(input1,input2):
# def l2(p0, p1, range=255.):
# def psnr(p0, p1, peak=255.):
# def dssim(p0, p1, range=255.):
# def rgb2lab(in_img,mean_cent=False):
# def normalize_blob(in_feat,eps=1e-10):
# def cos_sim_blob(in0,in1):
# def normalize_tensor(in_feat,eps=1e-10):
# def cos_sim(in0,in1):
# def tensor2np(tensor_obj):
# def np2tensor(np_obj):
# def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
# def tensorlab2tensor(lab_tensor,return_inbnd=False):
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
# def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
# def tensor2vec(vector_tensor):
# def diagnose_network(net, name='network'):
# def grab_patch(img_in, P, yy, xx):
# def load_image(path):
# def resize_image(img, max_size=256):
# def resize_image_zoom(img, zoom_factor=1., order=3):
# def save_image(image_numpy, image_path, ):
# def prep_display_image(img, dtype='uint8'):
# def info(object, spacing=10, collapse=1):
# def varname(p):
# def print_numpy(x, val=True, shp=False):
# def mkdirs(paths):
# def mkdir(path):
# def rgb2lab(input):
# def montage(
# imgs,
# PAD=5,
# RATIO=16 / 9.,
# EXTRA_PAD=(
# False,
# False),
# MM=-1,
# NN=-1,
# primeDir=0,
# verbose=False,
# returnGridPos=False,
# backClr=np.array(
# (0,
# 0,
# 0))):
# def __init__(self, frequency=1):
# def __call__(self, module):
# def flatten_nested_list(nested_list):
# def read_file(in_path,list_lines=False):
# def read_csv_file_as_text(in_path):
# def random_swap(obj0,obj1):
# def voc_ap(rec, prec, use_07_metric=False):
# (N,C,X,Y) = in0_norm.shape
# N = in0.size()[0]
# X = in0.size()[2]
# Y = in0.size()[3]
# [Y, X] = img.shape[:2]
# Y = imgs.shape[0]
# X = imgs.shape[1]
# M = imgs.shape[2]
# N = imgs.shape[3]
# PADS = np.array((PAD))
# PADY = PADS
# PADX = PADS
# PADY = PADS[0]
# PADX = PADS[1]
# NN = np.ceil(np.sqrt(1.0 * N * RATIO))
# MM = np.ceil(1.0 * N / NN)
# NN = np.ceil(1.0 * N / MM)
# MM = np.ceil(1.0 * N / NN)
# NN = np.ceil(1.0 * N / MM)
# EXTRA_PADY = EXTRA_PAD[0] * PADY
# EXTRA_PADX = EXTRA_PAD[0] * PADX
# class zeroClipper(object):
, which may include functions, classes, or code. Output only the next line. | def get_image_paths(self): |
Given the code snippet: <|code_start|>@user_passes_test(lambda u: u.is_superuser)
def script_runner_index(request):
munger_builder_list = MungerBuilder.objects.order_by('id')
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_runner/script_runner_index.html', context)
@user_passes_test(lambda u: u.is_superuser)
def munger_builder_index(request):
munger_builder_list = MungerBuilder.objects.order_by('id')
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_runner/munger_builder_index.html', context)
@user_passes_test(lambda u: u.is_superuser)
def run_munger_output(request, munger_builder_id):
pretext_url = reverse('pivot_builder', args=[munger_builder_id])
pretext = "<p><a class=back-link href=\"{0}\">< Munger Tools</a></p>".format(pretext_url)
return StreamingHttpResponse(
content_generator(run_munger.main(munger_builder_id), pretext=pretext)
)
def build_munger_output(request, munger_builder_id):
mb = MungerBuilder.objects.get(pk=munger_builder_id)
if not mb.user_is_authorized():
return INDEX_REDIRECT
print(mb.munger_template)
script_string = build_munger.main(munger_builder_id)
highlighted = highlight(script_string, PythonLexer(), HtmlFormatter())
<|code_end|>
, generate the next line using the imports in this file:
import time
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.http import StreamingHttpResponse
from django.core.urlresolvers import reverse
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from scripts import run_munger, build_munger
from script_builder.models import MungerBuilder
and context (functions, classes, or occasionally code) from other files:
# Path: scripts/run_munger.py
# def print_run_status(run_start_time, message):
# def main(munger_builder_id=1):
#
# Path: scripts/build_munger.py
# def main(munger_builder_id=1):
#
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
. Output only the next line. | context = {'script_string': highlighted, 'mb_id': munger_builder_id} |
Using the snippet: <|code_start|>
INDEX_REDIRECT = HttpResponseRedirect('/script_builder/munger_builder_index')
@user_passes_test(lambda u: u.is_superuser)
def script_runner_index(request):
munger_builder_list = MungerBuilder.objects.order_by('id')
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_runner/script_runner_index.html', context)
@user_passes_test(lambda u: u.is_superuser)
def munger_builder_index(request):
munger_builder_list = MungerBuilder.objects.order_by('id')
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_runner/munger_builder_index.html', context)
@user_passes_test(lambda u: u.is_superuser)
def run_munger_output(request, munger_builder_id):
pretext_url = reverse('pivot_builder', args=[munger_builder_id])
pretext = "<p><a class=back-link href=\"{0}\">< Munger Tools</a></p>".format(pretext_url)
return StreamingHttpResponse(
content_generator(run_munger.main(munger_builder_id), pretext=pretext)
)
def build_munger_output(request, munger_builder_id):
mb = MungerBuilder.objects.get(pk=munger_builder_id)
if not mb.user_is_authorized():
return INDEX_REDIRECT
<|code_end|>
, determine the next line of code. You have imports:
import time
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.http import StreamingHttpResponse
from django.core.urlresolvers import reverse
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from scripts import run_munger, build_munger
from script_builder.models import MungerBuilder
and context (class names, function names, or code) available:
# Path: scripts/run_munger.py
# def print_run_status(run_start_time, message):
# def main(munger_builder_id=1):
#
# Path: scripts/build_munger.py
# def main(munger_builder_id=1):
#
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
. Output only the next line. | print(mb.munger_template) |
Here is a snippet: <|code_start|>
INDEX_REDIRECT = HttpResponseRedirect('/script_builder/munger_builder_index')
@user_passes_test(lambda u: u.is_superuser)
def script_runner_index(request):
munger_builder_list = MungerBuilder.objects.order_by('id')
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_runner/script_runner_index.html', context)
@user_passes_test(lambda u: u.is_superuser)
def munger_builder_index(request):
munger_builder_list = MungerBuilder.objects.order_by('id')
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_runner/munger_builder_index.html', context)
@user_passes_test(lambda u: u.is_superuser)
def run_munger_output(request, munger_builder_id):
pretext_url = reverse('pivot_builder', args=[munger_builder_id])
pretext = "<p><a class=back-link href=\"{0}\">< Munger Tools</a></p>".format(pretext_url)
<|code_end|>
. Write the next line using the current file imports:
import time
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.http import StreamingHttpResponse
from django.core.urlresolvers import reverse
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from scripts import run_munger, build_munger
from script_builder.models import MungerBuilder
and context from other files:
# Path: scripts/run_munger.py
# def print_run_status(run_start_time, message):
# def main(munger_builder_id=1):
#
# Path: scripts/build_munger.py
# def main(munger_builder_id=1):
#
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
, which may include functions, classes, or code. Output only the next line. | return StreamingHttpResponse( |
Next line prediction: <|code_start|>
logger = get_task_logger(__name__)
@shared_task
def run_munger(munger_builder_id=1):
<|code_end|>
. Use current file imports:
(import os
import scripts.run_munger
import scripts.build_munger
from django.http import HttpResponse
from django.conf import settings
from celery import task, shared_task
from celery.utils.log import get_task_logger
from .models import MungerBuilder)
and context including class names, function names, or small code snippets from other files:
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
. Output only the next line. | return [log_entry for log_entry in scripts.run_munger.main(munger_builder_id)] |
Next line prediction: <|code_start|>
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
def main(munger_builder_id=1):
mb = MungerBuilder.objects.get(pk=munger_builder_id)
jinja_env = Environment(trim_blocks=True, lstrip_blocks=True,
loader=PackageLoader('script_builder', 'templates/munger_templates'),
<|code_end|>
. Use current file imports:
(import os
import sys
from script_builder.models import MungerBuilder
from jinja2 import Template, Environment, PackageLoader)
and context including class names, function names, or small code snippets from other files:
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
. Output only the next line. | ) |
Predict the next line after this snippet: <|code_start|>
# Read data from singups with orders CSV from Looker and load into pandas DataFrame
# input_file = glob(os.path.abspath(mb.input_path))[0]
input_file = os.path.join(settings.BASE_DIR, 'static', mb.input_path)
print_run_status(run_start_time, 'Reading Data From:\n' + input_file.replace('\\', '/'))
if mb.rows_to_delete_top and mb.rows_to_delete_top != 0:
lines = open(input_file).readlines()
lines_top_removed = lines[mb.rows_to_delete_top:]
df = pd.read_csv(StringIO(''.join(lines_top_removed)))
else:
df = pd.read_csv(input_file)
if mb.rows_to_delete_bottom and mb.rows_to_delete_bottom != 0:
df = df.drop(df.index[-mb.rows_to_delete_bottom:])
if mb.rename_field_dict:
df = df.rename(columns=mb.rename_field_dict)
yield df.to_html()
# Create Pivot Table on Key and Write Output CSV
print_run_status(run_start_time, 'Writing Output CSVs...')
pivot_output = pd.pivot_table(
df,
index=mb.index_fields,
columns=mb.column_fields,
values=list(mb.aggregate_names_with_functions().keys()),
aggfunc=mb.aggregate_names_with_functions(evaled=True),
fill_value=0,
<|code_end|>
using the current file's imports:
import pandas as pd
import os
import sys
import traceback
from datetime import datetime
from io import StringIO
from django.conf import settings
from script_builder.models import MungerBuilder
and any relevant context from other files:
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
. Output only the next line. | ) |
Given the code snippet: <|code_start|>class MungerBuilderAPIView(MungerPermissions,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
generics.GenericAPIView):
def get_queryset(self):
return self.serializer_class.Meta.model.objects.all()
def get(self, request, *args, **kwargs):
if 'pk' in kwargs:
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
user = request.user
if self.under_limit(user):
return self.create(request, *args, **kwargs)
else:
error_string = 'Cannot Create more {} - Delete some to make space'.format(
self.__class__.__name__
)
return Response(error_string, status=status.HTTP_403_FORBIDDEN)
def delete(self, request, *args, **kwargs):
<|code_end|>
, generate the next line using the imports in this file:
from guardian.shortcuts import get_objects_for_user
from .serializers import MungerSerializer, DataFieldSerializer, PivotFieldSerializer, FieldTypeSerializer
from rest_framework.response import Response
from rest_framework import status, filters, mixins, generics, permissions
and context (functions, classes, or occasionally code) from other files:
# Path: script_builder/serializers.py
# class MungerSerializer(PartialAllowed):
# data_fields = DataFieldSerializer(many=True, read_only=True)
# pivot_fields = PivotFieldSerializer(many=True, read_only=True)
# field_types = FieldTypeSerializer(many=True, read_only=True)
#
# class Meta:
# model = MungerBuilder
# fields = ('munger_name', 'munger_template', 'input_path', 'output_path', 'rows_to_delete_top',
# 'rows_to_delete_bottom', 'data_fields', 'pivot_fields', 'field_types',
# 'default_aggregate_field_type', 'is_sample')
#
# class DataFieldSerializer(PartialAllowed):
# class Meta:
# model = DataField
# fields = ('id', 'munger_builder', 'current_name', 'new_name', 'active_name')
#
# class PivotFieldSerializer(PartialAllowed):
# class Meta:
# model = PivotField
#
# class FieldTypeSerializer(PartialAllowed):
# class Meta:
# model = FieldType
# fields = '__all__'
. Output only the next line. | return self.destroy(request, *args, **kwargs) |
Given snippet: <|code_start|> return self.create(request, *args, **kwargs)
else:
error_string = 'Cannot Create more {} - Delete some to make space'.format(
self.__class__.__name__
)
return Response(error_string, status=status.HTTP_403_FORBIDDEN)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
def under_limit(self, user):
meta_name = self.serializer_class.Meta.model._meta.model_name
permission_name = 'script_builder.change_{}'.format(meta_name)
current_objects = get_objects_for_user(user, permission_name)
return len(current_objects) <= self.USER_OBJECT_LIMIT or user.is_superuser
class Mungers(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 5
serializer_class = MungerSerializer
filter_backends = (filters.DjangoObjectPermissionsFilter,)
class DataFields(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 100
serializer_class = DataFieldSerializer
class PivotFields(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 100
serializer_class = PivotFieldSerializer
class FieldTypes(MungerBuilderAPIView):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from guardian.shortcuts import get_objects_for_user
from .serializers import MungerSerializer, DataFieldSerializer, PivotFieldSerializer, FieldTypeSerializer
from rest_framework.response import Response
from rest_framework import status, filters, mixins, generics, permissions
and context:
# Path: script_builder/serializers.py
# class MungerSerializer(PartialAllowed):
# data_fields = DataFieldSerializer(many=True, read_only=True)
# pivot_fields = PivotFieldSerializer(many=True, read_only=True)
# field_types = FieldTypeSerializer(many=True, read_only=True)
#
# class Meta:
# model = MungerBuilder
# fields = ('munger_name', 'munger_template', 'input_path', 'output_path', 'rows_to_delete_top',
# 'rows_to_delete_bottom', 'data_fields', 'pivot_fields', 'field_types',
# 'default_aggregate_field_type', 'is_sample')
#
# class DataFieldSerializer(PartialAllowed):
# class Meta:
# model = DataField
# fields = ('id', 'munger_builder', 'current_name', 'new_name', 'active_name')
#
# class PivotFieldSerializer(PartialAllowed):
# class Meta:
# model = PivotField
#
# class FieldTypeSerializer(PartialAllowed):
# class Meta:
# model = FieldType
# fields = '__all__'
which might include code, classes, or functions. Output only the next line. | USER_OBJECT_LIMIT = 10 |
Predict the next line for this snippet: <|code_start|> if 'pk' in kwargs:
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
user = request.user
if self.under_limit(user):
return self.create(request, *args, **kwargs)
else:
error_string = 'Cannot Create more {} - Delete some to make space'.format(
self.__class__.__name__
)
return Response(error_string, status=status.HTTP_403_FORBIDDEN)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
def under_limit(self, user):
meta_name = self.serializer_class.Meta.model._meta.model_name
permission_name = 'script_builder.change_{}'.format(meta_name)
current_objects = get_objects_for_user(user, permission_name)
return len(current_objects) <= self.USER_OBJECT_LIMIT or user.is_superuser
class Mungers(MungerBuilderAPIView):
USER_OBJECT_LIMIT = 5
serializer_class = MungerSerializer
<|code_end|>
with the help of current file imports:
from guardian.shortcuts import get_objects_for_user
from .serializers import MungerSerializer, DataFieldSerializer, PivotFieldSerializer, FieldTypeSerializer
from rest_framework.response import Response
from rest_framework import status, filters, mixins, generics, permissions
and context from other files:
# Path: script_builder/serializers.py
# class MungerSerializer(PartialAllowed):
# data_fields = DataFieldSerializer(many=True, read_only=True)
# pivot_fields = PivotFieldSerializer(many=True, read_only=True)
# field_types = FieldTypeSerializer(many=True, read_only=True)
#
# class Meta:
# model = MungerBuilder
# fields = ('munger_name', 'munger_template', 'input_path', 'output_path', 'rows_to_delete_top',
# 'rows_to_delete_bottom', 'data_fields', 'pivot_fields', 'field_types',
# 'default_aggregate_field_type', 'is_sample')
#
# class DataFieldSerializer(PartialAllowed):
# class Meta:
# model = DataField
# fields = ('id', 'munger_builder', 'current_name', 'new_name', 'active_name')
#
# class PivotFieldSerializer(PartialAllowed):
# class Meta:
# model = PivotField
#
# class FieldTypeSerializer(PartialAllowed):
# class Meta:
# model = FieldType
# fields = '__all__'
, which may contain function names, class names, or code. Output only the next line. | filter_backends = (filters.DjangoObjectPermissionsFilter,) |
Given the code snippet: <|code_start|>
if request.method == 'POST':
user_form = UserRegistrationForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid():
new_user = user_form.save()
assign_perm('script_builder.add_mungerbuilder', new_user)
assign_perm('script_builder.add_fieldtype', new_user)
assign_perm('script_builder.add_datafield', new_user)
assign_perm('script_builder.add_pivotfield', new_user)
messages.success(request, "Thanks for registering. You are now logged in.")
new_user = authenticate(username=request.POST['username'],
password=request.POST['password1'])
login(request, new_user)
return HttpResponseRedirect('/script_builder/munger_builder_index/')
else:
input_dict = request.POST.dict()
for key in input_dict:
if not input_dict[key]:
messages.error(request, 'Please enter: {0}'.format(key))
return HttpResponseRedirect('/register/')
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
<|code_end|>
, generate the next line using the imports in this file:
from django.shortcuts import render, get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from guardian.shortcuts import assign_perm
from .forms import UserRegistrationForm
and context (functions, classes, or occasionally code) from other files:
# Path: munger_builder/forms.py
# class UserRegistrationForm(UserCreationForm):
#
# class Meta:
# model = User
# fields = ('username', 'email',)
. Output only the next line. | else: |
Next line prediction: <|code_start|>
class MungerTestCase(TestCase):
def setUp(self):
self.field_type_dict = {
'column': 'column',
'index': 'index',
'count': 'len',
'sum': 'np.sum',
'mean': 'np.mean',
'median': 'np.median',
}
self.add_field_types()
self.user = User.objects.create_user(
username='test_case_user', email='test_case_user@gmail.com', password='test_pw'
)
self.munger = MungerBuilder.objects.create(munger_name='test_munger', input_path='test_data.csv')
self.munger.save()
self.add_data_fields()
def add_field_types(self):
field_type_list = [FieldType(type_name=k, type_function=v) for k, v in self.field_type_dict.items()]
FieldType.objects.bulk_create(field_type_list)
def add_data_fields(self):
<|code_end|>
. Use current file imports:
(from django.contrib.auth.models import User
from django.test import TestCase
from script_builder.models import MungerBuilder, FieldType, DataField
from guardian.shortcuts import assign_perm)
and context including class names, function names, or small code snippets from other files:
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
#
# class FieldType(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_fieldtype', 'View Pivot Field'),
# )
#
# type_name = models.CharField(max_length=200)
# type_function = models.CharField(max_length=200)
#
# @classmethod
# def default_field_types(cls):
# return cls.objects.filter(pk__in=range(1, 7))
#
# def __str__(self):
# return self.type_name.capitalize()
#
# class DataField(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_datafield', 'View Data Field'),
# )
#
# munger_builder = models.ForeignKey(MungerBuilder, related_name='data_fields', related_query_name='data_fields')
# current_name = models.CharField(max_length=200)
# new_name = models.CharField(max_length=200, null=True, blank=True)
#
# def save(self, *args, **kwargs):
# if not self.munger_builder.user_is_authorized():
# raise ValidationError(
# _('Not authorized to change munger: {}'.format(self.munger_builder.munger_name))
# )
# super().save(*args, **kwargs)
# self.assign_perms(current_user())
#
# def delete(self, *args, **kwargs):
# super().delete(*args, **kwargs)
#
# @property
# def needs_rename(self):
# return self.new_name and self.new_name != self.current_name
#
# @property
# def active_name(self):
# if self.new_name:
# return self.new_name
# else:
# return self.current_name
#
# def __str__(self):
# return self.active_name
. Output only the next line. | test_field_dict = { |
Continue the code snippet: <|code_start|>
class MungerTestCase(TestCase):
def setUp(self):
self.field_type_dict = {
'column': 'column',
'index': 'index',
'count': 'len',
'sum': 'np.sum',
'mean': 'np.mean',
'median': 'np.median',
}
self.add_field_types()
self.user = User.objects.create_user(
username='test_case_user', email='test_case_user@gmail.com', password='test_pw'
<|code_end|>
. Use current file imports:
from django.contrib.auth.models import User
from django.test import TestCase
from script_builder.models import MungerBuilder, FieldType, DataField
from guardian.shortcuts import assign_perm
and context (classes, functions, or code) from other files:
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
#
# class FieldType(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_fieldtype', 'View Pivot Field'),
# )
#
# type_name = models.CharField(max_length=200)
# type_function = models.CharField(max_length=200)
#
# @classmethod
# def default_field_types(cls):
# return cls.objects.filter(pk__in=range(1, 7))
#
# def __str__(self):
# return self.type_name.capitalize()
#
# class DataField(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_datafield', 'View Data Field'),
# )
#
# munger_builder = models.ForeignKey(MungerBuilder, related_name='data_fields', related_query_name='data_fields')
# current_name = models.CharField(max_length=200)
# new_name = models.CharField(max_length=200, null=True, blank=True)
#
# def save(self, *args, **kwargs):
# if not self.munger_builder.user_is_authorized():
# raise ValidationError(
# _('Not authorized to change munger: {}'.format(self.munger_builder.munger_name))
# )
# super().save(*args, **kwargs)
# self.assign_perms(current_user())
#
# def delete(self, *args, **kwargs):
# super().delete(*args, **kwargs)
#
# @property
# def needs_rename(self):
# return self.new_name and self.new_name != self.current_name
#
# @property
# def active_name(self):
# if self.new_name:
# return self.new_name
# else:
# return self.current_name
#
# def __str__(self):
# return self.active_name
. Output only the next line. | ) |
Predict the next line for this snippet: <|code_start|>
class MungerTestCase(TestCase):
def setUp(self):
self.field_type_dict = {
'column': 'column',
'index': 'index',
'count': 'len',
'sum': 'np.sum',
'mean': 'np.mean',
'median': 'np.median',
}
self.add_field_types()
self.user = User.objects.create_user(
username='test_case_user', email='test_case_user@gmail.com', password='test_pw'
)
self.munger = MungerBuilder.objects.create(munger_name='test_munger', input_path='test_data.csv')
self.munger.save()
self.add_data_fields()
def add_field_types(self):
field_type_list = [FieldType(type_name=k, type_function=v) for k, v in self.field_type_dict.items()]
FieldType.objects.bulk_create(field_type_list)
def add_data_fields(self):
test_field_dict = {
'order_num': ['count'],
'product': [None],
'sales_name': ['index'],
<|code_end|>
with the help of current file imports:
from django.contrib.auth.models import User
from django.test import TestCase
from script_builder.models import MungerBuilder, FieldType, DataField
from guardian.shortcuts import assign_perm
and context from other files:
# Path: script_builder/models.py
# class MungerBuilder(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_mungerbuilder', 'View Munger'),
# )
#
# munger_name = models.CharField(max_length=200)
#
# munger_template = models.FilePathField(path='script_builder/templates/munger_templates', max_length=200,
# default='pandas_munger_template_basic.html')
#
# input_path = models.CharField(max_length=999, default='', blank=True)
# output_path = models.CharField(max_length=999, default='', blank=True)
#
# rows_to_delete_top = models.IntegerField(default=0)
# rows_to_delete_bottom = models.IntegerField(default=0)
#
# field_types = models.ManyToManyField(FieldType, related_name='munger_builder', related_query_name='munger_builder')
#
# default_aggregate_field_type = models.ForeignKey(FieldType, default=3, limit_choices_to={'pk__gt': 2},)
# is_sample = models.BooleanField(default=False)
#
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# # Always add default field types unless set from admin
# self.field_types.add(*(field_type for field_type in FieldType.default_field_types()))
# self.assign_perms(current_user())
#
# def user_is_authorized(self):
# return self.id == 1 or current_user().has_perm('script_builder.change_mungerbuilder', self)
#
# @property
# def pivot_fields(self):
# return PivotField.objects.filter(data_field__munger_builder__id=self.id)
#
# @property
# def index_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=1)]
#
# @property
# def column_fields(self):
# return [pf.active_name for pf in self.pivot_fields.filter(field_type__id=2)]
#
# def aggregate_names_with_functions(self, evaled=False):
# # Needs to be ordered dicts
# func = eval if evaled else str
# aggregates_dict = defaultdict(list)
# for pf in self.pivot_fields.filter(field_type__id__gt=2):
# aggregates_dict[pf.active_name].append(pf.type_function)
# return {name: func(', '.join(type_functions)) for name, type_functions in aggregates_dict.items()}
#
# @property
# def rename_field_dict(self):
# return {field.current_name: field.new_name for field in self.data_fields.all() if field.needs_rename}
#
# @property
# def safe_file_name(self):
# return self.munger_name.replace(' ', '_').lower()
#
# @property
# def get_output_path(self):
# if self.output_path:
# return self.output_path
# else:
# input_dir = os.path.dirname(self.input_path)
# return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
#
# def __str__(self):
# return self.munger_name
#
# class FieldType(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_fieldtype', 'View Pivot Field'),
# )
#
# type_name = models.CharField(max_length=200)
# type_function = models.CharField(max_length=200)
#
# @classmethod
# def default_field_types(cls):
# return cls.objects.filter(pk__in=range(1, 7))
#
# def __str__(self):
# return self.type_name.capitalize()
#
# class DataField(models.Model, PermissionedModel):
#
# class Meta:
# permissions = (
# ('view_datafield', 'View Data Field'),
# )
#
# munger_builder = models.ForeignKey(MungerBuilder, related_name='data_fields', related_query_name='data_fields')
# current_name = models.CharField(max_length=200)
# new_name = models.CharField(max_length=200, null=True, blank=True)
#
# def save(self, *args, **kwargs):
# if not self.munger_builder.user_is_authorized():
# raise ValidationError(
# _('Not authorized to change munger: {}'.format(self.munger_builder.munger_name))
# )
# super().save(*args, **kwargs)
# self.assign_perms(current_user())
#
# def delete(self, *args, **kwargs):
# super().delete(*args, **kwargs)
#
# @property
# def needs_rename(self):
# return self.new_name and self.new_name != self.current_name
#
# @property
# def active_name(self):
# if self.new_name:
# return self.new_name
# else:
# return self.current_name
#
# def __str__(self):
# return self.active_name
, which may contain function names, class names, or code. Output only the next line. | 'region': ['column'], |
Based on the snippet: <|code_start|> input_dir = os.path.dirname(self.input_path)
return os.path.join(input_dir, '{0}-output.csv'.format(self.safe_file_name))
def __str__(self):
return self.munger_name
class DataField(models.Model, PermissionedModel):
class Meta:
permissions = (
('view_datafield', 'View Data Field'),
)
munger_builder = models.ForeignKey(MungerBuilder, related_name='data_fields', related_query_name='data_fields')
current_name = models.CharField(max_length=200)
new_name = models.CharField(max_length=200, null=True, blank=True)
def save(self, *args, **kwargs):
if not self.munger_builder.user_is_authorized():
raise ValidationError(
_('Not authorized to change munger: {}'.format(self.munger_builder.munger_name))
)
super().save(*args, **kwargs)
self.assign_perms(current_user())
def delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
@property
def needs_rename(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from guardian.shortcuts import assign_perm
from ordered_model.models import OrderedModel
from collections import defaultdict
from .current_user import current_user
and context (classes, functions, sometimes code) from other files:
# Path: script_builder/current_user.py
# def current_user():
# t = current_thread()
# if t not in _requests:
# return None
# return _requests[t].user
. Output only the next line. | return self.new_name and self.new_name != self.current_name |
Predict the next line for this snippet: <|code_start|>def main():
"""
Generate the organization enum
"""
print datetime.now(), "Start"
parms = get_parms()
# Organization
query = """
SELECT (MIN (?xlabel) AS ?short) ?vivo
WHERE
{
?vivo rdf:type foaf:Organization .
?vivo rdfs:label ?xlabel .
}
GROUP BY ?vivo
ORDER BY ?short
"""
create_enum("country_enum.txt", query, parms)
# Country
query = """
SELECT (MIN (?xlabel) AS ?short) ?vivo
WHERE
{
?vivo rdf:type vivo:Country .
?vivo rdfs:label ?xlabel .
<|code_end|>
with the help of current file imports:
from datetime import datetime
from pump.vivopump import get_parms, create_enum
and context from other files:
# Path: pump/vivopump.py
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def create_enum(filename, query, parms, trim=0, skip=0):
# """
# Given, query, parms and a filename, execute the query and write the enum into the file
# :param: filename: name of the file to contain the enumeration
# :param: query: the query to be used to create the columns for the enumeration
# :param: parms: dictionary of VIVO SPARQL API parameters
# :param: trim: If 0, no trim. If k, return the first k characters as a trimmed value for short
# :param: skip: If 0, no skip. If k, skip the first k characters as a trimmed value for short
# :return: None
# """
# import codecs
# data = vivo_query(query, parms)
# outfile = codecs.open(filename, mode='w', encoding='ascii', errors='xmlcharrefreplace')
# outfile.write("short\tvivo\n")
# for item in data['results']['bindings']:
# if trim == 0 and skip==0:
# outfile.write(item["short"]["value"] + "\t" + item["vivo"]["value"] + "\n")
# elif trim != 0 and skip == 0:
# outfile.write(item["short"]["value"][:trim] + "\t" + item["vivo"]["value"] + "\n")
# elif trim == 0 and skip != 0:
# outfile.write(item["short"]["value"][skip:] + "\t" + item["vivo"]["value"] + "\n")
# else:
# outfile.write(item["short"]["value"][skip:-trim] + "\t" + item["vivo"]["value"] + "\n")
# outfile.close()
, which may contain function names, class names, or code. Output only the next line. | } |
Continue the code snippet: <|code_start|>#!/usr/bin/env/python
"""
make_enum.py -- make enumerations for orgs
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2017 (c) Michael Conlon"
__license__ = "BSD 3-Clause license"
__version__ = "0.2"
def main():
"""
Generate the organization enum
"""
print datetime.now(), "Start"
parms = get_parms()
# Organization
query = """
SELECT (MIN (?xlabel) AS ?short) ?vivo
WHERE
<|code_end|>
. Use current file imports:
from datetime import datetime
from pump.vivopump import get_parms, create_enum
and context (classes, functions, or code) from other files:
# Path: pump/vivopump.py
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def create_enum(filename, query, parms, trim=0, skip=0):
# """
# Given, query, parms and a filename, execute the query and write the enum into the file
# :param: filename: name of the file to contain the enumeration
# :param: query: the query to be used to create the columns for the enumeration
# :param: parms: dictionary of VIVO SPARQL API parameters
# :param: trim: If 0, no trim. If k, return the first k characters as a trimmed value for short
# :param: skip: If 0, no skip. If k, skip the first k characters as a trimmed value for short
# :return: None
# """
# import codecs
# data = vivo_query(query, parms)
# outfile = codecs.open(filename, mode='w', encoding='ascii', errors='xmlcharrefreplace')
# outfile.write("short\tvivo\n")
# for item in data['results']['bindings']:
# if trim == 0 and skip==0:
# outfile.write(item["short"]["value"] + "\t" + item["vivo"]["value"] + "\n")
# elif trim != 0 and skip == 0:
# outfile.write(item["short"]["value"][:trim] + "\t" + item["vivo"]["value"] + "\n")
# elif trim == 0 and skip != 0:
# outfile.write(item["short"]["value"][skip:] + "\t" + item["vivo"]["value"] + "\n")
# else:
# outfile.write(item["short"]["value"][skip:-trim] + "\t" + item["vivo"]["value"] + "\n")
# outfile.close()
. Output only the next line. | { |
Continue the code snippet: <|code_start|> 2. pubs in the source keyed by doi
There are three cases
- pub in VIVO and in source => add to update data with uri
- pub in VIVO, not in source => nothing to do
- pub not in VIVO, is in source => Add to update data with blank uri
(to be assigned during update)
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
def get_vivo_academic_articles(parms):
"""
Query VIVO and return a list of all the academic articles.
@see uf_examples/publications/filters/pub_match_filter.py
@see https://wiki.duraspace.org/display/VIVO/VIVO-ISF+1.6+relationship+diagrams%3A+Authorship
:param: parms: vivo_query params
:return: dictionary of uri keyed by DOI
"""
query = """
SELECT
?uri ?doi
WHERE {
<|code_end|>
. Use current file imports:
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_parms, vivo_query
from disambiguate.utils import print_err
and context (classes, functions, or code) from other files:
# Path: pump/vivopump.py
# def read_csv_fp(fp, skip=True, delimiter="|"):
# """
# Given a filename, read the CSV file with that name. We use "|" as a
# separator in CSV files to allow commas to appear in values.
#
# CSV files read by this function follow these conventions:
# -- use delimiter as a separator. Defaults to vertical bar.
# -- have a first row that contains column headings.
# -- all elements must have values. To specify a missing value, use
# the string "None" or "NULL" between separators, that is |None| or |NULL|
# -- leading and trailing whitespace in values is ignored. | The | will be
# read as "The"
# -- if skip=True, rows with too many or too few data elements are skipped.
# if skip=False, a RowError is thrown
#
# CSV files processed by read_csv will be returned as a dictionary of
# dictionaries, one dictionary per row keyed by an integer row number. This supports
# maintaining the order of the data input, which is important for some applications
# """
#
# class RowError(Exception):
# """
# Thrown when the number of data elements on a row in a CSV is not equal to the number of header elements
# """
# pass
#
# heading = []
# row_number = 0
# data = {}
# for row in UnicodeCsvReader(fp, delimiter=delimiter):
# i = 0
# for r in row:
# # remove white space fore and aft
# row[i] = r.strip(string.whitespace)
# i += 1
# if len(heading) == 0:
# heading = row # the first row is the heading
# continue
# row_number += 1
# if len(row) == len(heading):
# data[row_number] = {}
# i = 0
# for r in row:
# data[row_number][heading[i]] = r
# i += 1
# elif not skip:
# raise RowError("On row " + str(row_number) + ", expecting " +
# str(len(heading)) + " data values. Found " +
# str(len(row)) + " data values. Row contents = " +
# str(row))
# else:
# pass # row has wrong number of columns and skip is True
# logger.debug("loader returns {} rows".format(len(data)))
# return data
#
# def write_csv_fp(fp, data, delimiter='|'):
# """
# Write a CSV to a file pointer. Used to support stdout.
# :param fp: File pointer. Could be stdout.
# :param data: data to be written
# :param delimiter: field delimiter for output
# :return:
# """
# assert(len(data.keys()) > 0)
#
# # create a list of var_names from the first row
# var_names = data[data.keys()[0]].keys()
# fp.write(delimiter.join(var_names) + '\n')
#
# for key in sorted(data.keys()):
# fp.write(delimiter.join([data[key][x] for x in var_names]) + '\n')
#
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def vivo_query(query, parms):
# """
# A new VIVO query function using SPARQLWrapper. Tested with Stardog, UF VIVO and Dbpedia
# :param query: SPARQL query. VIVO PREFIX will be added
# :param parms: dictionary with query parms: queryuri, username and password
# :return: result object, typically JSON
# :rtype: dict
# """
# from SPARQLWrapper import SPARQLWrapper, JSON
#
# logger.debug(u"in vivo_query\n{}".format(parms))
# sparql = SPARQLWrapper(parms['queryuri'])
# new_query = parms['prefix'] + '\n' + query
# sparql.setQuery(new_query)
# logger.debug(new_query)
# sparql.setReturnFormat(JSON)
# sparql.addParameter("email", parms['username'])
# sparql.addParameter("password", parms['password'])
# # sparql.setCredentials(parms['username'], parms['password'])
# results = sparql.query()
# results = results.convert()
# return results
. Output only the next line. | ?uri a vivo:InformationResource . |
Next line prediction: <|code_start|>
There are two inputs:
1. pubs in VIVO keyed by doi
2. pubs in the source keyed by doi
There are three cases
- pub in VIVO and in source => add to update data with uri
- pub in VIVO, not in source => nothing to do
- pub not in VIVO, is in source => Add to update data with blank uri
(to be assigned during update)
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
def get_vivo_academic_articles(parms):
"""
Query VIVO and return a list of all the academic articles.
@see uf_examples/publications/filters/pub_match_filter.py
@see https://wiki.duraspace.org/display/VIVO/VIVO-ISF+1.6+relationship+diagrams%3A+Authorship
:param: parms: vivo_query params
:return: dictionary of uri keyed by DOI
"""
query = """
<|code_end|>
. Use current file imports:
(import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_parms, vivo_query
from disambiguate.utils import print_err)
and context including class names, function names, or small code snippets from other files:
# Path: pump/vivopump.py
# def read_csv_fp(fp, skip=True, delimiter="|"):
# """
# Given a filename, read the CSV file with that name. We use "|" as a
# separator in CSV files to allow commas to appear in values.
#
# CSV files read by this function follow these conventions:
# -- use delimiter as a separator. Defaults to vertical bar.
# -- have a first row that contains column headings.
# -- all elements must have values. To specify a missing value, use
# the string "None" or "NULL" between separators, that is |None| or |NULL|
# -- leading and trailing whitespace in values is ignored. | The | will be
# read as "The"
# -- if skip=True, rows with too many or too few data elements are skipped.
# if skip=False, a RowError is thrown
#
# CSV files processed by read_csv will be returned as a dictionary of
# dictionaries, one dictionary per row keyed by an integer row number. This supports
# maintaining the order of the data input, which is important for some applications
# """
#
# class RowError(Exception):
# """
# Thrown when the number of data elements on a row in a CSV is not equal to the number of header elements
# """
# pass
#
# heading = []
# row_number = 0
# data = {}
# for row in UnicodeCsvReader(fp, delimiter=delimiter):
# i = 0
# for r in row:
# # remove white space fore and aft
# row[i] = r.strip(string.whitespace)
# i += 1
# if len(heading) == 0:
# heading = row # the first row is the heading
# continue
# row_number += 1
# if len(row) == len(heading):
# data[row_number] = {}
# i = 0
# for r in row:
# data[row_number][heading[i]] = r
# i += 1
# elif not skip:
# raise RowError("On row " + str(row_number) + ", expecting " +
# str(len(heading)) + " data values. Found " +
# str(len(row)) + " data values. Row contents = " +
# str(row))
# else:
# pass # row has wrong number of columns and skip is True
# logger.debug("loader returns {} rows".format(len(data)))
# return data
#
# def write_csv_fp(fp, data, delimiter='|'):
# """
# Write a CSV to a file pointer. Used to support stdout.
# :param fp: File pointer. Could be stdout.
# :param data: data to be written
# :param delimiter: field delimiter for output
# :return:
# """
# assert(len(data.keys()) > 0)
#
# # create a list of var_names from the first row
# var_names = data[data.keys()[0]].keys()
# fp.write(delimiter.join(var_names) + '\n')
#
# for key in sorted(data.keys()):
# fp.write(delimiter.join([data[key][x] for x in var_names]) + '\n')
#
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def vivo_query(query, parms):
# """
# A new VIVO query function using SPARQLWrapper. Tested with Stardog, UF VIVO and Dbpedia
# :param query: SPARQL query. VIVO PREFIX will be added
# :param parms: dictionary with query parms: queryuri, username and password
# :return: result object, typically JSON
# :rtype: dict
# """
# from SPARQLWrapper import SPARQLWrapper, JSON
#
# logger.debug(u"in vivo_query\n{}".format(parms))
# sparql = SPARQLWrapper(parms['queryuri'])
# new_query = parms['prefix'] + '\n' + query
# sparql.setQuery(new_query)
# logger.debug(new_query)
# sparql.setReturnFormat(JSON)
# sparql.addParameter("email", parms['username'])
# sparql.addParameter("password", parms['password'])
# # sparql.setCredentials(parms['username'], parms['password'])
# results = sparql.query()
# results = results.convert()
# return results
. Output only the next line. | SELECT |
Based on the snippet: <|code_start|>There are three cases
- pub in VIVO and in source => add to update data with uri
- pub in VIVO, not in source => nothing to do
- pub not in VIVO, is in source => Add to update data with blank uri
(to be assigned during update)
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
def get_vivo_academic_articles(parms):
"""
Query VIVO and return a list of all the academic articles.
@see uf_examples/publications/filters/pub_match_filter.py
@see https://wiki.duraspace.org/display/VIVO/VIVO-ISF+1.6+relationship+diagrams%3A+Authorship
:param: parms: vivo_query params
:return: dictionary of uri keyed by DOI
"""
query = """
SELECT
?uri ?doi
WHERE {
?uri a vivo:InformationResource .
?uri bibo:doi ?doi .
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_parms, vivo_query
from disambiguate.utils import print_err
and context (classes, functions, sometimes code) from other files:
# Path: pump/vivopump.py
# def read_csv_fp(fp, skip=True, delimiter="|"):
# """
# Given a filename, read the CSV file with that name. We use "|" as a
# separator in CSV files to allow commas to appear in values.
#
# CSV files read by this function follow these conventions:
# -- use delimiter as a separator. Defaults to vertical bar.
# -- have a first row that contains column headings.
# -- all elements must have values. To specify a missing value, use
# the string "None" or "NULL" between separators, that is |None| or |NULL|
# -- leading and trailing whitespace in values is ignored. | The | will be
# read as "The"
# -- if skip=True, rows with too many or too few data elements are skipped.
# if skip=False, a RowError is thrown
#
# CSV files processed by read_csv will be returned as a dictionary of
# dictionaries, one dictionary per row keyed by an integer row number. This supports
# maintaining the order of the data input, which is important for some applications
# """
#
# class RowError(Exception):
# """
# Thrown when the number of data elements on a row in a CSV is not equal to the number of header elements
# """
# pass
#
# heading = []
# row_number = 0
# data = {}
# for row in UnicodeCsvReader(fp, delimiter=delimiter):
# i = 0
# for r in row:
# # remove white space fore and aft
# row[i] = r.strip(string.whitespace)
# i += 1
# if len(heading) == 0:
# heading = row # the first row is the heading
# continue
# row_number += 1
# if len(row) == len(heading):
# data[row_number] = {}
# i = 0
# for r in row:
# data[row_number][heading[i]] = r
# i += 1
# elif not skip:
# raise RowError("On row " + str(row_number) + ", expecting " +
# str(len(heading)) + " data values. Found " +
# str(len(row)) + " data values. Row contents = " +
# str(row))
# else:
# pass # row has wrong number of columns and skip is True
# logger.debug("loader returns {} rows".format(len(data)))
# return data
#
# def write_csv_fp(fp, data, delimiter='|'):
# """
# Write a CSV to a file pointer. Used to support stdout.
# :param fp: File pointer. Could be stdout.
# :param data: data to be written
# :param delimiter: field delimiter for output
# :return:
# """
# assert(len(data.keys()) > 0)
#
# # create a list of var_names from the first row
# var_names = data[data.keys()[0]].keys()
# fp.write(delimiter.join(var_names) + '\n')
#
# for key in sorted(data.keys()):
# fp.write(delimiter.join([data[key][x] for x in var_names]) + '\n')
#
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def vivo_query(query, parms):
# """
# A new VIVO query function using SPARQLWrapper. Tested with Stardog, UF VIVO and Dbpedia
# :param query: SPARQL query. VIVO PREFIX will be added
# :param parms: dictionary with query parms: queryuri, username and password
# :return: result object, typically JSON
# :rtype: dict
# """
# from SPARQLWrapper import SPARQLWrapper, JSON
#
# logger.debug(u"in vivo_query\n{}".format(parms))
# sparql = SPARQLWrapper(parms['queryuri'])
# new_query = parms['prefix'] + '\n' + query
# sparql.setQuery(new_query)
# logger.debug(new_query)
# sparql.setReturnFormat(JSON)
# sparql.addParameter("email", parms['username'])
# sparql.addParameter("password", parms['password'])
# # sparql.setCredentials(parms['username'], parms['password'])
# results = sparql.query()
# results = results.convert()
# return results
. Output only the next line. | } |
Predict the next line for this snippet: <|code_start|>
There are three cases
- pub in VIVO and in source => add to update data with uri
- pub in VIVO, not in source => nothing to do
- pub not in VIVO, is in source => Add to update data with blank uri
(to be assigned during update)
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
def get_vivo_academic_articles(parms):
"""
Query VIVO and return a list of all the academic articles.
@see uf_examples/publications/filters/pub_match_filter.py
@see https://wiki.duraspace.org/display/VIVO/VIVO-ISF+1.6+relationship+diagrams%3A+Authorship
:param: parms: vivo_query params
:return: dictionary of uri keyed by DOI
"""
query = """
SELECT
?uri ?doi
WHERE {
?uri a vivo:InformationResource .
<|code_end|>
with the help of current file imports:
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_parms, vivo_query
from disambiguate.utils import print_err
and context from other files:
# Path: pump/vivopump.py
# def read_csv_fp(fp, skip=True, delimiter="|"):
# """
# Given a filename, read the CSV file with that name. We use "|" as a
# separator in CSV files to allow commas to appear in values.
#
# CSV files read by this function follow these conventions:
# -- use delimiter as a separator. Defaults to vertical bar.
# -- have a first row that contains column headings.
# -- all elements must have values. To specify a missing value, use
# the string "None" or "NULL" between separators, that is |None| or |NULL|
# -- leading and trailing whitespace in values is ignored. | The | will be
# read as "The"
# -- if skip=True, rows with too many or too few data elements are skipped.
# if skip=False, a RowError is thrown
#
# CSV files processed by read_csv will be returned as a dictionary of
# dictionaries, one dictionary per row keyed by an integer row number. This supports
# maintaining the order of the data input, which is important for some applications
# """
#
# class RowError(Exception):
# """
# Thrown when the number of data elements on a row in a CSV is not equal to the number of header elements
# """
# pass
#
# heading = []
# row_number = 0
# data = {}
# for row in UnicodeCsvReader(fp, delimiter=delimiter):
# i = 0
# for r in row:
# # remove white space fore and aft
# row[i] = r.strip(string.whitespace)
# i += 1
# if len(heading) == 0:
# heading = row # the first row is the heading
# continue
# row_number += 1
# if len(row) == len(heading):
# data[row_number] = {}
# i = 0
# for r in row:
# data[row_number][heading[i]] = r
# i += 1
# elif not skip:
# raise RowError("On row " + str(row_number) + ", expecting " +
# str(len(heading)) + " data values. Found " +
# str(len(row)) + " data values. Row contents = " +
# str(row))
# else:
# pass # row has wrong number of columns and skip is True
# logger.debug("loader returns {} rows".format(len(data)))
# return data
#
# def write_csv_fp(fp, data, delimiter='|'):
# """
# Write a CSV to a file pointer. Used to support stdout.
# :param fp: File pointer. Could be stdout.
# :param data: data to be written
# :param delimiter: field delimiter for output
# :return:
# """
# assert(len(data.keys()) > 0)
#
# # create a list of var_names from the first row
# var_names = data[data.keys()[0]].keys()
# fp.write(delimiter.join(var_names) + '\n')
#
# for key in sorted(data.keys()):
# fp.write(delimiter.join([data[key][x] for x in var_names]) + '\n')
#
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def vivo_query(query, parms):
# """
# A new VIVO query function using SPARQLWrapper. Tested with Stardog, UF VIVO and Dbpedia
# :param query: SPARQL query. VIVO PREFIX will be added
# :param parms: dictionary with query parms: queryuri, username and password
# :return: result object, typically JSON
# :rtype: dict
# """
# from SPARQLWrapper import SPARQLWrapper, JSON
#
# logger.debug(u"in vivo_query\n{}".format(parms))
# sparql = SPARQLWrapper(parms['queryuri'])
# new_query = parms['prefix'] + '\n' + query
# sparql.setQuery(new_query)
# logger.debug(new_query)
# sparql.setReturnFormat(JSON)
# sparql.addParameter("email", parms['username'])
# sparql.addParameter("password", parms['password'])
# # sparql.setCredentials(parms['username'], parms['password'])
# results = sparql.query()
# results = results.convert()
# return results
, which may contain function names, class names, or code. Output only the next line. | ?uri bibo:doi ?doi . |
Given the code snippet: <|code_start|> SELECT (MIN (?xshort) AS ?short) ?vivo
WHERE
{
?vivo vivo:orcidId ?xshort .
}
GROUP BY ?vivo
ORDER BY ?short
"""
create_enum("orcid_enum.txt", query, parms)
# journals via issn
query = """
SELECT (MIN (?xlabel) AS ?short) ?vivo
WHERE
{
?vivo a bibo:Journal .
?vivo rdfs:label ?xlabel .
}
GROUP BY ?vivo
ORDER BY ?short
"""
create_enum("journal_enum.txt", query, parms)
# dates via datetime
query = """
SELECT ?short ?vivo
<|code_end|>
, generate the next line using the imports in this file:
from datetime import datetime
from pump.vivopump import get_parms, create_enum
and context (functions, classes, or occasionally code) from other files:
# Path: pump/vivopump.py
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def create_enum(filename, query, parms, trim=0, skip=0):
# """
# Given, query, parms and a filename, execute the query and write the enum into the file
# :param: filename: name of the file to contain the enumeration
# :param: query: the query to be used to create the columns for the enumeration
# :param: parms: dictionary of VIVO SPARQL API parameters
# :param: trim: If 0, no trim. If k, return the first k characters as a trimmed value for short
# :param: skip: If 0, no skip. If k, skip the first k characters as a trimmed value for short
# :return: None
# """
# import codecs
# data = vivo_query(query, parms)
# outfile = codecs.open(filename, mode='w', encoding='ascii', errors='xmlcharrefreplace')
# outfile.write("short\tvivo\n")
# for item in data['results']['bindings']:
# if trim == 0 and skip==0:
# outfile.write(item["short"]["value"] + "\t" + item["vivo"]["value"] + "\n")
# elif trim != 0 and skip == 0:
# outfile.write(item["short"]["value"][:trim] + "\t" + item["vivo"]["value"] + "\n")
# elif trim == 0 and skip != 0:
# outfile.write(item["short"]["value"][skip:] + "\t" + item["vivo"]["value"] + "\n")
# else:
# outfile.write(item["short"]["value"][skip:-trim] + "\t" + item["vivo"]["value"] + "\n")
# outfile.close()
. Output only the next line. | WHERE |
Next line prediction: <|code_start|>def main():
"""
Generate the enums for publications
"""
print datetime.now(), "Start"
parms = get_parms()
# person via Orcid
query = """
SELECT (MIN (?xshort) AS ?short) ?vivo
WHERE
{
?vivo vivo:orcidId ?xshort .
}
GROUP BY ?vivo
ORDER BY ?short
"""
create_enum("orcid_enum.txt", query, parms)
# journals via issn
query = """
SELECT (MIN (?xlabel) AS ?short) ?vivo
WHERE
{
?vivo a bibo:Journal .
?vivo rdfs:label ?xlabel .
}
<|code_end|>
. Use current file imports:
(from datetime import datetime
from pump.vivopump import get_parms, create_enum)
and context including class names, function names, or small code snippets from other files:
# Path: pump/vivopump.py
# def get_parms():
# """
# Use get_args to get the args, and return a dictionary of the args ready for
# use in pump software.
# @see get_args()
#
# :return: dict: parms
# """
# parms = {}
# args = get_args()
# for name, val in vars(args).items():
# if val is not None:
# parms[name] = val
# return parms
#
# def create_enum(filename, query, parms, trim=0, skip=0):
# """
# Given, query, parms and a filename, execute the query and write the enum into the file
# :param: filename: name of the file to contain the enumeration
# :param: query: the query to be used to create the columns for the enumeration
# :param: parms: dictionary of VIVO SPARQL API parameters
# :param: trim: If 0, no trim. If k, return the first k characters as a trimmed value for short
# :param: skip: If 0, no skip. If k, skip the first k characters as a trimmed value for short
# :return: None
# """
# import codecs
# data = vivo_query(query, parms)
# outfile = codecs.open(filename, mode='w', encoding='ascii', errors='xmlcharrefreplace')
# outfile.write("short\tvivo\n")
# for item in data['results']['bindings']:
# if trim == 0 and skip==0:
# outfile.write(item["short"]["value"] + "\t" + item["vivo"]["value"] + "\n")
# elif trim != 0 and skip == 0:
# outfile.write(item["short"]["value"][:trim] + "\t" + item["vivo"]["value"] + "\n")
# elif trim == 0 and skip != 0:
# outfile.write(item["short"]["value"][skip:] + "\t" + item["vivo"]["value"] + "\n")
# else:
# outfile.write(item["short"]["value"][skip:-trim] + "\t" + item["vivo"]["value"] + "\n")
# outfile.close()
. Output only the next line. | GROUP BY ?vivo |
Here is a snippet: <|code_start|>
try:
except ImportError: # pragma: noqa
_session_factory = None
def _column_keys(query):
return [(column.primary_key, column.key) for column in query._primary_entity.entity_zero.columns]
@add_metaclass(abc.ABCMeta)
<|code_end|>
. Write the next line using the current file imports:
import abc
from pyramid.httpexceptions import HTTPNotFound
from sqlalchemy.orm import object_session
from ..compat import add_metaclass
from pyramid_sqlalchemy import Session as _session_factory
and context from other files:
# Path: src/rest_toolkit/compat.py
# def add_metaclass(metaclass):
# """Class decorator for creating a class with a metaclass."""
# def wrapper(cls):
# orig_vars = cls.__dict__.copy()
# orig_vars.pop('__dict__', None)
# orig_vars.pop('__weakref__', None)
# slots = orig_vars.get('__slots__')
# if slots is not None:
# if isinstance(slots, str):
# slots = [slots]
# for slots_var in slots:
# orig_vars.pop(slots_var)
# return metaclass(cls.__name__, cls.__bases__, orig_vars)
# return wrapper
, which may include functions, classes, or code. Output only the next line. | class SQLResource(object): |
Based on the snippet: <|code_start|>
class ViewWriter(object):
def __init__(self, view):
self._view_lock = threading.Lock()
self._newline = True
self.view = view
def set_view(self, view):
with self._view_lock:
self.view = view
self._newline = True
def write(self, text, timestamp=""):
if not self.view.is_valid():
return
# If timestamps are enabled, append a timestamp to the start of each line
if timestamp:
# Newline was stripped from the end of the last write, needs to be
# added to the beginning of this write
if self._newline:
text = timestamp + text
self._newline = False
# Count the number of newlines in the text to add a timestamp to
# if the text ends with a newline, do not add a timestamp to the next
# line and instead add it with the next text received
newlines = text.count("\n")
if text[-1] == '\n':
newlines -= 1
self._newline = True
<|code_end|>
, predict the immediate next line with the help of imports:
import threading
import time
import util
import logger
from filter.manager import FilterManager
and context (classes, functions, sometimes code) from other files:
# Path: filter/manager.py
# class FilterManager(object):
# def __init__(self):
# super(FilterManager, self).__init__()
# self._filters = []
# self.filter_lock = threading.Lock()
# self._incomplete_line = ""
#
# def add_filter(self, new_filter, output_view):
# """
# :type new_filter: serial_filter.FilterFile
# """
# filter_args = _FilterArgs(new_filter, output_view)
# with self.filter_lock:
# self._filters.append(filter_args)
#
# def remove_filter(self, filter_to_remove):
# """
# :type filter_to_remove: serial_filter.FilterFile
# """
# filter_files = [f.filter_file for f in self._filters]
# if filter_to_remove in filter_files:
# with self.filter_lock:
# i = filter_files.index(filter_to_remove)
# filter_args = self._filters[i]
# filter_args.write("Filter Disabled")
# self._filters.remove(filter_args)
#
# def port_closed(self, port_name):
# with self.filter_lock:
# for f in self._filters:
# f.write("Disconnected from {}".format(port_name))
#
# def filters(self):
# return [f.filter_file for f in self._filters]
#
# def apply_filters(self, text, timestamp=""):
# if len(self._filters) == 0:
# return
# lines = self._split_text(text)
# if len(lines) == 0:
# return
#
# filters_to_remove = []
# # Loop through all lines and all filters for matches
# for line in lines:
# with self.filter_lock:
# for f in self._filters:
# if not f.view or not f.filter_file:
# continue
#
# if not f.view.is_valid():
# filters_to_remove.append(f)
# else:
# f.apply(line, timestamp)
#
# # If any filters have invalid views, remove from the list
# with self.filter_lock:
# for f in filters_to_remove:
# self._filters.remove(f)
#
# def _split_text(self, text):
# lines = text.splitlines(True)
# if len(lines) == 0:
# return lines
#
# # Append the last incomplete line to the beginning of this text
# lines[0] = self._incomplete_line + lines[0]
# self._incomplete_line = ""
#
# # Check if the last line is complete. If not, pop it from the end of the list and save it as an incomplete line
# if not lines[-1].endswith("\n"):
# self._incomplete_line = lines.pop()
# return lines
. Output only the next line. | text = text.replace("\n", "\n%s" % timestamp, newlines)
|
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
"""Generates a response"""
# Semantics constants
KNOWN_ACTIONS = set(ACTION_ALIASES.values())
# Response constants
DUNNO = "Sorry, I don't know how to%s."
GOTIT = "Got it. I'll%s."
MISUNDERSTAND = "Sorry, I didn't understand that at all."
def make_response(new_commands, kb_response):
"""Make a response based on the new commands
and the knowledge base response."""
if not new_commands:
# Use knowledge base response if available, otherwise give up
if kb_response:
return kb_response
else:
return MISUNDERSTAND
# Split into good and bad commands, futher filtering the good ones
good_commands = []
bad_commands = []
# TODO: handle unknown entities
for c in new_commands:
if c.action in KNOWN_ACTIONS:
<|code_end|>
with the help of current file imports:
from semantics.lexical_constants import ACTION_ALIASES
and context from other files:
# Path: semantics/lexical_constants.py
# ACTION_ALIASES = {
# 'appear': GO_ACTION,
# 'get': GET_ACTION,
# 'obtain': GET_ACTION,
# 'meander': GO_ACTION,
# 'slide': GO_ACTION,
# 'nonvehicle': GO_ACTION,
# 'escape': GO_ACTION,
# 'rummage': SEARCH_ACTION,
# 'characterize': SEE_ACTION,
# 'chase': FOLLOW_ACTION,
# 'lodge': STAY_ACTION,
# SEARCH_ACTION: SEARCH_ACTION,
# GO_ACTION: GO_ACTION,
# GET_ACTION: GET_ACTION,
# FOLLOW_ACTION: FOLLOW_ACTION,
# SEE_ACTION: SEE_ACTION,
# TELL_ACTION: TELL_ACTION,
# BEGIN_ACTION: BEGIN_ACTION,
# ACTIVATE_ACTION: ACTIVATE_ACTION,
# DEACTIVATE_ACTION: DEACTIVATE_ACTION,
# AVOID_ACTION: AVOID_ACTION,
# PATROL_ACTION: PATROL_ACTION,
# CARRY_ACTION: CARRY_ACTION,
# STAY_ACTION: STAY_ACTION,
# DEFUSE_ACTION: DEFUSE_ACTION,
# }
, which may contain function names, class names, or code. Output only the next line. | good_commands.append(c) |
Using the snippet: <|code_start|> if self.type == 'all':
return self.type
if self.number == 0:
return 'no'
if self.number == 1:
return 'the'
else:
return str(self.number)
else:
if self.number == 1:
return 'a'
else:
return 'any'
def __str__(self, lvl=0):
indent = '\t'*(lvl)
return '\n' + indent + '\tDefinite: %s\n' % str(self.definite) + \
indent + '\tType: %s\n' % str(self.type) +\
indent + '\tNumber: %s' % str(self.number)
def fill_determiner(self, dt):
"""Fills self with a determiner by merging it with
a new quantifier created with that determiner"""
self.merge(Quantifier(dt=dt))
def fill_cardinal(self, cd):
"""Fills self with a cardinal number by merging it with
a new quantifier created with that cardinal number"""
self.merge(Quantifier(cd=cd))
<|code_end|>
, determine the next line of code. You have imports:
from semantics.util import text2int, is_pronoun
and context (class names, function names, or code) available:
# Path: semantics/util.py
# def text2int(textnum):
# """From recursive at
# http://stackoverflow.com/questions/493174/is-there-a-way-to-convert-number-words-to-integers-python
#
# Converts number words to integers.
# """
# numwords = {}
# units = [
# "zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
# "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
# "sixteen", "seventeen", "eighteen", "nineteen",
# ]
#
# tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
#
# scales = ["hundred", "thousand", "million", "billion", "trillion"]
#
# numwords["and"] = (1, 0)
# for idx, word in enumerate(units):
# numwords[word] = (1, idx)
# for idx, word in enumerate(tens):
# numwords[word] = (1, idx * 10)
# for idx, word in enumerate(scales):
# numwords[word] = (10 ** (idx * 3 or 2), 0)
#
# current = result = 0
# for word in textnum.split():
# if word not in numwords:
# return None
#
# scale, increment = numwords[word]
# current = current * scale + increment
# if scale > 100:
# result += current
# current = 0
#
# return result + current
#
# def is_pronoun(word):
# """Return whether a word is a pronoun."""
# return word.lower() in PRONOUNS if word else False
. Output only the next line. | def merge(self, other): |
Predict the next line after this snippet: <|code_start|> if cd != None:
self.number = cd if cd.isdigit() else text2int(cd)
def readable(self):
if self.definite:
if self.type == 'all':
return self.type
if self.number == 0:
return 'no'
if self.number == 1:
return 'the'
else:
return str(self.number)
else:
if self.number == 1:
return 'a'
else:
return 'any'
def __str__(self, lvl=0):
indent = '\t'*(lvl)
return '\n' + indent + '\tDefinite: %s\n' % str(self.definite) + \
indent + '\tType: %s\n' % str(self.type) +\
indent + '\tNumber: %s' % str(self.number)
def fill_determiner(self, dt):
"""Fills self with a determiner by merging it with
a new quantifier created with that determiner"""
self.merge(Quantifier(dt=dt))
<|code_end|>
using the current file's imports:
from semantics.util import text2int, is_pronoun
and any relevant context from other files:
# Path: semantics/util.py
# def text2int(textnum):
# """From recursive at
# http://stackoverflow.com/questions/493174/is-there-a-way-to-convert-number-words-to-integers-python
#
# Converts number words to integers.
# """
# numwords = {}
# units = [
# "zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
# "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
# "sixteen", "seventeen", "eighteen", "nineteen",
# ]
#
# tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
#
# scales = ["hundred", "thousand", "million", "billion", "trillion"]
#
# numwords["and"] = (1, 0)
# for idx, word in enumerate(units):
# numwords[word] = (1, idx)
# for idx, word in enumerate(tens):
# numwords[word] = (1, idx * 10)
# for idx, word in enumerate(scales):
# numwords[word] = (10 ** (idx * 3 or 2), 0)
#
# current = result = 0
# for word in textnum.split():
# if word not in numwords:
# return None
#
# scale, increment = numwords[word]
# current = current * scale + increment
# if scale > 100:
# result += current
# current = 0
#
# return result + current
#
# def is_pronoun(word):
# """Return whether a word is a pronoun."""
# return word.lower() in PRONOUNS if word else False
. Output only the next line. | def fill_cardinal(self, cd): |
Based on the snippet: <|code_start|> Fiducial.TYPE_HOSTAGE: "icon-hostage",
Fiducial.TYPE_BADGUY: "icon-badguy",
Fiducial.TYPE_USER1: "icon-user1",
Fiducial.TYPE_USER2: "icon-user2"}
def SerializeAction(event, argument):
"""Serialize an action message."""
validActions = ['highlight', 'rename', 'showimage', 'hideimage', 'text']
#make sure event is one of the above
validActions.index(event)
# Create the JSON
tmp = dict()
tmp['type'] = "action"
tmp['data'] = dict()
tmp['data']['event'] = event
tmp['data']['argument'] = argument
return json.dumps(tmp)
def SerializeAddObj(obj_name, obj_type, location):
'''Serialize Add Obj Request
@param obj_name String name of map object
@param obj_type String either "region" or "icon"
@param location [[x1,y1], [x2,y2] ...] - should only be one "point" for icon'''
validObjTypes = ['region', 'icon-bomb', 'icon-hostage', 'icon-badguy',
'icon-user1', 'icon-user2', 'icon-robot']
validObjTypes.index(obj_type)
new_obj = dict()
new_obj['type'] = "mapupdate"
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import rospy
from threading import Thread, Lock
from time import sleep
from commproxy import CallbackSocket
from worldmap import get_topo_map, get_occupancy_grid
from subtle_msgs.msg import Fiducial
from tf import TransformListener
from tf import ExtrapolationException
from geometry_msgs.msg import *
and context (classes, functions, sometimes code) from other files:
# Path: commproxy.py
# class CallbackSocket(object):
# """Listen on a socket and call back when a message comes in."""
# name = "commproxy"
#
# def __init__(self, port, msg_sep="\n", local=False):
# self.msg_sep = msg_sep
# self.callbacks = []
# self.queue = Queue()
# self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# self._sock.bind(('' if not local else 'localhost', port))
# self._sock.listen(5)
# self._callback_lock = threading.Lock()
#
# # Make a thread that accepts new connections
# accept_thread = threading.Thread(target=self._accept)
# accept_thread.daemon = True
# accept_thread.start()
#
# # Start the callback thread for synchronously handling requests
# callback_thread = threading.Thread(target=self._pumpmsg)
# callback_thread.daemon = True
# callback_thread.start()
#
# def _accept(self):
# """Accept connections on the listening port."""
# timed_out = False # Use this flag to avoid printing when we timeout
# while True:
# if not timed_out:
# print "%s: Waiting for connection..." % self.name
# try:
# conn, addr = self._sock.accept()
# print "%s: Connected to %s" % (self.name, str(addr))
# # TODO: Add support for unique client names
# client = ClientHandler(conn, addr, self)
# client.daemon = True
# client.start()
#
# timed_out = False
# except timeout:
# timed_out = True
# continue
# except: # Because any error can occur here during shutdown
# break
#
# def shutdown(self):
# """Shutdown the socket."""
# print "%s: Shutting down socket." % self.name
# try:
# self._sock.shutdown(socket.SHUT_RDWR)
# except error:
# pass
# self._sock.close()
# print "%s: Socket closed." % self.name
#
# def register_callback(self, func):
# """Add a callback to the set of callbacks."""
# with self._callback_lock:
# self.callbacks.append(func)
#
# def _pumpmsg(self):
# """Read messages and synchronously call back listeners."""
# while True:
# msg, client = self.queue.get()
# # Lock just in case callbacks change while we are pumping
# with self._callback_lock:
# for func in self.callbacks:
# func(msg, client)
. Output only the next line. | new_obj['data'] = dict() |
Continue the code snippet: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class KnowledgeBase:
"""Stores knowledge about the world"""
def __init__(self, other_agents=None):
self.facts = [MapFact()]
if other_agents:
self.facts.extend(KnowledgeFact(agent) for agent in other_agents)
self.last_object = None
self.last_location = None
def process_semantic_structures(self, semantic_structures, source=None):
"""Processes semantic structures and returns a response
if given a query"""
response = ''
for structure in semantic_structures:
if isinstance(structure, Assertion):
self.assimilate(structure, source)
response = 'Got it. %s' % structure.readable()
elif isinstance(structure, Query):
<|code_end|>
. Use current file imports:
from semantics.new_structures import Assertion, Query, YNQuery, \
LocationQuery, EntityQuery, Command
from semantics.util import is_pronoun
and context (classes, functions, or code) from other files:
# Path: semantics/new_structures.py
# class Assertion(object):
# """Asserts the existence or property of an Entity in the world."""
#
# def __init__(self, theme, location, existential=False):
# self.theme = theme
# self.location = location
# self.existential = existential
#
# def readable(self):
# return '{!r} is/are in {!r}'.format(self.theme.readable(case=True), self.location.readable())
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'Assertion: \n' + \
# (indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) if self.theme else '') + \
# (indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1) if self.location else '')+ \
# indent + '\tExistential: %s' % str(self.existential)
#
# def __repr__(self):
# return str(self)
#
# class Query(object):
# """Base class for all queries"""
# def __repr__(self):
# return str(self)
#
# class YNQuery(Query):
# """Yes/No queries."""
# def __init__(self, theme, location):
# self.theme = theme
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'YNQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) + \
# indent + '\tLocation: %s' % self.location.__str__(lvl + 1)
#
# class LocationQuery(Query):
# """Where queries"""
# def __init__(self, theme):
# self.theme = theme
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return '\n' + indent + 'LocationQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1)
#
# class EntityQuery(Query):
# """Who/What queries"""
# def __init__(self, location):
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'EntityQuery: \n' + \
# indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1)
#
# class Command(object):
# """A Command for Junior to do something."""
#
# def __init__(self, agent, theme, patient, location, source, destination, action,
# condition=None, negation=False):
# self.agent = agent
# self.theme = theme
# self.patient = patient
# self.location = location
# self.source = source
# self.destination = destination
# self.action = action
# self.condition = condition
# self.negation = negation
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl + 1)
# return 'Command: \n' + \
# (indent + 'Agent: ' + self.agent.__str__(lvl + 1) + '\n' if self.agent else '') + \
# indent + 'Action: ' + str(self.action) + '\n' + \
# (indent + 'Theme: ' + self.theme.__str__(lvl + 1) + '\n' if self.theme else '') + \
# (indent + 'Patient:' + self.patient.__str__(lvl + 1) + '\n' if self.patient else '') + \
# (indent + 'Location: ' + self.location.__str__(lvl + 1) + '\n' if self.location else '') + \
# (indent + 'Source: ' + self.source.__str__(lvl + 1) + '\n' if self.source else '') + \
# (indent + 'Destination: ' + self.destination.__str__(lvl + 1) + '\n' if self.destination else '') + \
# (indent + 'Condition: ' + self.condition.__str__(lvl + 1) + '\n' if self.condition else '') + \
# indent + 'Negation: ' + str(self.negation)
#
# def __repr__(self):
# return str(self)
#
# def readable(self):
# response = ''
# if self.negation:
# response += ' not'
# if not self.action:
# return ''
# else:
# response += ' %s' % self.action
# if self.theme:
# response += ' %s' % self.theme.readable(case=False)
# elif self.patient:
# response += ' %s' % self.patient.readable(case=False)
# if self.location:
# response += ' in %s' % self.location.readable()
# if self.source:
# response += ' from %s' % self.source.readable()
# if self.destination:
# response += ' to %s' % self.destination.readable()
# if self.condition:
# response += ' if %s' % self.condition.readable()
# return response
#
# Path: semantics/util.py
# def is_pronoun(word):
# """Return whether a word is a pronoun."""
# return word.lower() in PRONOUNS if word else False
. Output only the next line. | response = self.query(structure) |
Given the code snippet: <|code_start|> return '\n'.join(responses)
return None
def fill_commands(self, commands):
"""Fills in underspecified fields
based on current knowledge"""
for c in commands:
if isinstance(c, Command):
if self.last_object:
if c.theme and is_pronoun(c.theme.name):
c.theme.name = self.last_object.name
if c.patient and is_pronoun(c.patient.name):
c.patient.name = self.last_object.name
if c.condition and c.condition.theme\
and is_pronoun(c.condition.theme.name):
c.condition.theme.name = self.last_object.name
if self.last_location:
if c.location and c.location.name == 'there':
c.location.name = self.last_location.name
for f in self.facts:
if isinstance(f, MapFact):
if c.destination and not c.source:
# Fill in source
if c.theme:
result = f.query_map(None, c.theme)
if len(result) > 0:
c.source = result[0]
if not c.location and not c.destination \
and not c.source:
# Fill in location
<|code_end|>
, generate the next line using the imports in this file:
from semantics.new_structures import Assertion, Query, YNQuery, \
LocationQuery, EntityQuery, Command
from semantics.util import is_pronoun
and context (functions, classes, or occasionally code) from other files:
# Path: semantics/new_structures.py
# class Assertion(object):
# """Asserts the existence or property of an Entity in the world."""
#
# def __init__(self, theme, location, existential=False):
# self.theme = theme
# self.location = location
# self.existential = existential
#
# def readable(self):
# return '{!r} is/are in {!r}'.format(self.theme.readable(case=True), self.location.readable())
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'Assertion: \n' + \
# (indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) if self.theme else '') + \
# (indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1) if self.location else '')+ \
# indent + '\tExistential: %s' % str(self.existential)
#
# def __repr__(self):
# return str(self)
#
# class Query(object):
# """Base class for all queries"""
# def __repr__(self):
# return str(self)
#
# class YNQuery(Query):
# """Yes/No queries."""
# def __init__(self, theme, location):
# self.theme = theme
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'YNQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) + \
# indent + '\tLocation: %s' % self.location.__str__(lvl + 1)
#
# class LocationQuery(Query):
# """Where queries"""
# def __init__(self, theme):
# self.theme = theme
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return '\n' + indent + 'LocationQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1)
#
# class EntityQuery(Query):
# """Who/What queries"""
# def __init__(self, location):
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'EntityQuery: \n' + \
# indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1)
#
# class Command(object):
# """A Command for Junior to do something."""
#
# def __init__(self, agent, theme, patient, location, source, destination, action,
# condition=None, negation=False):
# self.agent = agent
# self.theme = theme
# self.patient = patient
# self.location = location
# self.source = source
# self.destination = destination
# self.action = action
# self.condition = condition
# self.negation = negation
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl + 1)
# return 'Command: \n' + \
# (indent + 'Agent: ' + self.agent.__str__(lvl + 1) + '\n' if self.agent else '') + \
# indent + 'Action: ' + str(self.action) + '\n' + \
# (indent + 'Theme: ' + self.theme.__str__(lvl + 1) + '\n' if self.theme else '') + \
# (indent + 'Patient:' + self.patient.__str__(lvl + 1) + '\n' if self.patient else '') + \
# (indent + 'Location: ' + self.location.__str__(lvl + 1) + '\n' if self.location else '') + \
# (indent + 'Source: ' + self.source.__str__(lvl + 1) + '\n' if self.source else '') + \
# (indent + 'Destination: ' + self.destination.__str__(lvl + 1) + '\n' if self.destination else '') + \
# (indent + 'Condition: ' + self.condition.__str__(lvl + 1) + '\n' if self.condition else '') + \
# indent + 'Negation: ' + str(self.negation)
#
# def __repr__(self):
# return str(self)
#
# def readable(self):
# response = ''
# if self.negation:
# response += ' not'
# if not self.action:
# return ''
# else:
# response += ' %s' % self.action
# if self.theme:
# response += ' %s' % self.theme.readable(case=False)
# elif self.patient:
# response += ' %s' % self.patient.readable(case=False)
# if self.location:
# response += ' in %s' % self.location.readable()
# if self.source:
# response += ' from %s' % self.source.readable()
# if self.destination:
# response += ' to %s' % self.destination.readable()
# if self.condition:
# response += ' if %s' % self.condition.readable()
# return response
#
# Path: semantics/util.py
# def is_pronoun(word):
# """Return whether a word is a pronoun."""
# return word.lower() in PRONOUNS if word else False
. Output only the next line. | if c.theme: |
Here is a snippet: <|code_start|> def query(self, query):
""""Override this in subclasses"""
return None
def assimilate(self, assertion, source):
"""Override this in subclasses"""
pass
class MapFact(Fact):
"""Spatial map of the environment"""
def __init__(self):
# Mapping from location to a set of entities
self.env_map = {}
def query_map(self, location, theme):
""""Returns the missing argument.
If both arguments are present,
returns whether that mapping is found"""
if not location and not theme:
return None
elif not theme:
return self.env_map.get(location, None)
elif not location:
return [location for location, entities in \
self.env_map.items() if theme in entities]
else:
# Both arguments are present
if location in self.env_map:
return theme in self.env_map[location]
else:
<|code_end|>
. Write the next line using the current file imports:
from semantics.new_structures import Assertion, Query, YNQuery, \
LocationQuery, EntityQuery, Command
from semantics.util import is_pronoun
and context from other files:
# Path: semantics/new_structures.py
# class Assertion(object):
# """Asserts the existence or property of an Entity in the world."""
#
# def __init__(self, theme, location, existential=False):
# self.theme = theme
# self.location = location
# self.existential = existential
#
# def readable(self):
# return '{!r} is/are in {!r}'.format(self.theme.readable(case=True), self.location.readable())
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'Assertion: \n' + \
# (indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) if self.theme else '') + \
# (indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1) if self.location else '')+ \
# indent + '\tExistential: %s' % str(self.existential)
#
# def __repr__(self):
# return str(self)
#
# class Query(object):
# """Base class for all queries"""
# def __repr__(self):
# return str(self)
#
# class YNQuery(Query):
# """Yes/No queries."""
# def __init__(self, theme, location):
# self.theme = theme
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'YNQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) + \
# indent + '\tLocation: %s' % self.location.__str__(lvl + 1)
#
# class LocationQuery(Query):
# """Where queries"""
# def __init__(self, theme):
# self.theme = theme
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return '\n' + indent + 'LocationQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1)
#
# class EntityQuery(Query):
# """Who/What queries"""
# def __init__(self, location):
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'EntityQuery: \n' + \
# indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1)
#
# class Command(object):
# """A Command for Junior to do something."""
#
# def __init__(self, agent, theme, patient, location, source, destination, action,
# condition=None, negation=False):
# self.agent = agent
# self.theme = theme
# self.patient = patient
# self.location = location
# self.source = source
# self.destination = destination
# self.action = action
# self.condition = condition
# self.negation = negation
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl + 1)
# return 'Command: \n' + \
# (indent + 'Agent: ' + self.agent.__str__(lvl + 1) + '\n' if self.agent else '') + \
# indent + 'Action: ' + str(self.action) + '\n' + \
# (indent + 'Theme: ' + self.theme.__str__(lvl + 1) + '\n' if self.theme else '') + \
# (indent + 'Patient:' + self.patient.__str__(lvl + 1) + '\n' if self.patient else '') + \
# (indent + 'Location: ' + self.location.__str__(lvl + 1) + '\n' if self.location else '') + \
# (indent + 'Source: ' + self.source.__str__(lvl + 1) + '\n' if self.source else '') + \
# (indent + 'Destination: ' + self.destination.__str__(lvl + 1) + '\n' if self.destination else '') + \
# (indent + 'Condition: ' + self.condition.__str__(lvl + 1) + '\n' if self.condition else '') + \
# indent + 'Negation: ' + str(self.negation)
#
# def __repr__(self):
# return str(self)
#
# def readable(self):
# response = ''
# if self.negation:
# response += ' not'
# if not self.action:
# return ''
# else:
# response += ' %s' % self.action
# if self.theme:
# response += ' %s' % self.theme.readable(case=False)
# elif self.patient:
# response += ' %s' % self.patient.readable(case=False)
# if self.location:
# response += ' in %s' % self.location.readable()
# if self.source:
# response += ' from %s' % self.source.readable()
# if self.destination:
# response += ' to %s' % self.destination.readable()
# if self.condition:
# response += ' if %s' % self.condition.readable()
# return response
#
# Path: semantics/util.py
# def is_pronoun(word):
# """Return whether a word is a pronoun."""
# return word.lower() in PRONOUNS if word else False
, which may include functions, classes, or code. Output only the next line. | return None |
Here is a snippet: <|code_start|> def __init__(self):
pass
def query(self, query):
""""Override this in subclasses"""
return None
def assimilate(self, assertion, source):
"""Override this in subclasses"""
pass
class MapFact(Fact):
"""Spatial map of the environment"""
def __init__(self):
# Mapping from location to a set of entities
self.env_map = {}
def query_map(self, location, theme):
""""Returns the missing argument.
If both arguments are present,
returns whether that mapping is found"""
if not location and not theme:
return None
elif not theme:
return self.env_map.get(location, None)
elif not location:
return [location for location, entities in \
self.env_map.items() if theme in entities]
else:
# Both arguments are present
<|code_end|>
. Write the next line using the current file imports:
from semantics.new_structures import Assertion, Query, YNQuery, \
LocationQuery, EntityQuery, Command
from semantics.util import is_pronoun
and context from other files:
# Path: semantics/new_structures.py
# class Assertion(object):
# """Asserts the existence or property of an Entity in the world."""
#
# def __init__(self, theme, location, existential=False):
# self.theme = theme
# self.location = location
# self.existential = existential
#
# def readable(self):
# return '{!r} is/are in {!r}'.format(self.theme.readable(case=True), self.location.readable())
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'Assertion: \n' + \
# (indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) if self.theme else '') + \
# (indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1) if self.location else '')+ \
# indent + '\tExistential: %s' % str(self.existential)
#
# def __repr__(self):
# return str(self)
#
# class Query(object):
# """Base class for all queries"""
# def __repr__(self):
# return str(self)
#
# class YNQuery(Query):
# """Yes/No queries."""
# def __init__(self, theme, location):
# self.theme = theme
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'YNQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1) + \
# indent + '\tLocation: %s' % self.location.__str__(lvl + 1)
#
# class LocationQuery(Query):
# """Where queries"""
# def __init__(self, theme):
# self.theme = theme
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return '\n' + indent + 'LocationQuery: \n' + \
# indent + '\tTheme: %s\n' % self.theme.__str__(lvl + 1)
#
# class EntityQuery(Query):
# """Who/What queries"""
# def __init__(self, location):
# self.location = location
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl)
# return 'EntityQuery: \n' + \
# indent + '\tLocation: %s\n' % self.location.__str__(lvl + 1)
#
# class Command(object):
# """A Command for Junior to do something."""
#
# def __init__(self, agent, theme, patient, location, source, destination, action,
# condition=None, negation=False):
# self.agent = agent
# self.theme = theme
# self.patient = patient
# self.location = location
# self.source = source
# self.destination = destination
# self.action = action
# self.condition = condition
# self.negation = negation
#
# def __str__(self, lvl=0):
# indent = '\t'*(lvl + 1)
# return 'Command: \n' + \
# (indent + 'Agent: ' + self.agent.__str__(lvl + 1) + '\n' if self.agent else '') + \
# indent + 'Action: ' + str(self.action) + '\n' + \
# (indent + 'Theme: ' + self.theme.__str__(lvl + 1) + '\n' if self.theme else '') + \
# (indent + 'Patient:' + self.patient.__str__(lvl + 1) + '\n' if self.patient else '') + \
# (indent + 'Location: ' + self.location.__str__(lvl + 1) + '\n' if self.location else '') + \
# (indent + 'Source: ' + self.source.__str__(lvl + 1) + '\n' if self.source else '') + \
# (indent + 'Destination: ' + self.destination.__str__(lvl + 1) + '\n' if self.destination else '') + \
# (indent + 'Condition: ' + self.condition.__str__(lvl + 1) + '\n' if self.condition else '') + \
# indent + 'Negation: ' + str(self.negation)
#
# def __repr__(self):
# return str(self)
#
# def readable(self):
# response = ''
# if self.negation:
# response += ' not'
# if not self.action:
# return ''
# else:
# response += ' %s' % self.action
# if self.theme:
# response += ' %s' % self.theme.readable(case=False)
# elif self.patient:
# response += ' %s' % self.patient.readable(case=False)
# if self.location:
# response += ' in %s' % self.location.readable()
# if self.source:
# response += ' from %s' % self.source.readable()
# if self.destination:
# response += ' to %s' % self.destination.readable()
# if self.condition:
# response += ' if %s' % self.condition.readable()
# return response
#
# Path: semantics/util.py
# def is_pronoun(word):
# """Return whether a word is a pronoun."""
# return word.lower() in PRONOUNS if word else False
, which may include functions, classes, or code. Output only the next line. | if location in self.env_map: |
Given the code snippet: <|code_start|> | rp(lam, unit='kev') : Returns real part (unit='kev'|'angs')
| ip(lam, unit='kev') : Returns imaginary part (always 0.0)
| cm(lam, unit='kev') : Complex index of refraction of dtype='complex'
| plot(lam, unit='kev') : Plots Re(m-1)
"""
def __init__(self, rho=RHO_DRUDE): # Returns a CM using the Drude approximation
self.cmtype = 'Drude'
self.rho = rho
self.citation = "Using the Drude approximation.\nBohren, C. F. & Huffman, D. R., 1983, Absorption and Scattering of Light by Small Particles (New York: Wiley)"
def rp(self, lam, unit='kev'):
assert unit in c.ALLOWED_LAM_UNITS
lam_cm = c._lam_cm(lam, unit)
mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)
return mm1 + 1.0
'''# Returns 1 if the wavelength supplied is too low energy (i.e. inappropriate for applying Drude)
mm1 = np.zeros(np.size(lam_cm))
if (np.size(lam_cm) == 1):
if lam_cm >= LAM_MAX:
pass
else:
mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)
else:
ii = (lam_cm <= LAM_MAX)
mm1[ii] = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm[ii], 2)
return mm1 + 1.0'''
def ip(self, lam, unit='kev'):
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
from newdust import constants as c
and context (functions, classes, or occasionally code) from other files:
# Path: newdust/constants.py
# ALLOWED_LAM_UNITS = ['kev','angs','keV','Angs','angstrom','Angstrom']
# def intz(x, y):
# def trapezoidal_int(x, y):
# def _lam_cm(lam, unit='kev'):
# def _lam_kev(lam, unit='kev'):
# def _make_array(scalar):
. Output only the next line. | if np.size(lam) > 1: |
Using the snippet: <|code_start|># coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Check that a dataset's .tfrecords and datataset_spec.json are consistent.
In particular, check that:
- The name of the directory containing these files corresponds to the "dataset"
field of dataset_spec.
- The number of .tfrecords corresponds to the number of classes, and they are
numbered sequentially
- The number of examples in each .tfrecords file corresponds to the one in the
<|code_end|>
, determine the next line of code. You have imports:
import json
import os
import tensorflow.compat.v1 as tf
from absl import app
from absl import flags
from absl import logging
from meta_dataset.data import dataset_spec as dataset_spec_lib
and context (class names, function names, or code) available:
# Path: meta_dataset/data/dataset_spec.py
# def get_classes(split, classes_per_split):
# def _check_validity_of_restricted_classes_per_split(
# restricted_classes_per_split, classes_per_split):
# def get_total_images_per_class(data_spec, class_id=None, pool=None):
# def __new__(cls, name, image_shape, dataset_spec_list, has_dag_ontology,
# has_bilevel_ontology, splits_to_contribute):
# def initialize(self, restricted_classes_per_split=None):
# def get_total_images_per_class(self, class_id=None, pool=None):
# def get_classes(self, split):
# def to_dict(self):
# def initialize(self, restricted_classes_per_split=None):
# def get_total_images_per_class(self, class_id=None, pool=None):
# def get_superclasses(self, split):
# def _count_classes_in_superclasses(self, superclass_ids):
# def _get_split_offset(self, split):
# def get_classes(self, split):
# def get_class_ids_from_superclass_subclass_inds(self, split, superclass_id,
# class_inds):
# def to_dict(self):
# def initialize(self, restricted_classes_per_split=None):
# def get_classes_per_split(self):
# def count_split_classes(split):
# def get_split_subgraph(self, split):
# def get_classes(self, split):
# def get_total_images_per_class(self, class_id, pool=None):
# def to_dict(self):
# def as_dataset_spec(dct):
# def _key_to_int(dct):
# def _key_to_split(dct):
# def load_dataset_spec(dataset_records_path, convert_from_pkl=False):
# class BenchmarkSpecification(
# collections.namedtuple(
# 'BenchmarkSpecification', 'name, image_shape, dataset_spec_list,'
# 'has_dag_ontology, has_bilevel_ontology, splits_to_contribute')):
# class DatasetSpecification(
# collections.namedtuple('DatasetSpecification',
# ('name, classes_per_split, images_per_class, '
# 'class_names, path, file_pattern'))):
# class BiLevelDatasetSpecification(
# collections.namedtuple('BiLevelDatasetSpecification',
# ('name, superclasses_per_split, '
# 'classes_per_superclass, images_per_class, '
# 'superclass_names, class_names, path, '
# 'file_pattern'))):
# class HierarchicalDatasetSpecification(
# collections.namedtuple('HierarchicalDatasetSpecification',
# ('name, split_subgraphs, images_per_class, '
# 'class_names, path, file_pattern'))):
. Output only the next line. | dataset_spec. |
Given the code snippet: <|code_start|> def totalize(self, discount=Decimal(0), surcharge=Decimal(0),
taxcode=TaxType.NONE):
log.info('totalize(discount=%r, surcharge=%r, taxcode=%r)' % (
discount, surcharge, taxcode))
if discount and surcharge:
raise TypeError("discount and surcharge can not be used together")
if surcharge and taxcode == TaxType.NONE:
raise ValueError("to specify a surcharge you need specify its "
"tax code")
result = self._driver.coupon_totalize(discount, surcharge, taxcode)
self._has_been_totalized = True
self.totalized_value = result
return result
def add_payment(self, payment_method: str, payment_value: Decimal, description=''):
log.info("add_payment(method=%r, value=%r, description=%r)" % (
payment_method, payment_value, description))
if not self._has_been_totalized:
raise PaymentAdditionError(_("You must totalize the coupon "
"before add payments."))
result = self._driver.coupon_add_payment(
payment_method, payment_value,
self._format_text(description))
self.payments_total_value += payment_value
return result
def cancel(self):
log.info('coupon_cancel()')
<|code_end|>
, generate the next line using the imports in this file:
from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys
and context (functions, classes, or occasionally code) from other files:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
. Output only the next line. | retval = self._driver.coupon_cancel() |
Given snippet: <|code_start|> try:
self.setup()
self._setup_complete = True
except Exception:
log.error(''.join(traceback.format_exception(*sys.exc_info())))
self._setup_complete = False
def setup_complete(self):
return self._setup_complete
def get_capabilities(self):
return self._capabilities
def _format_text(self, text):
return encode_text(text, self._charset)
def setup(self):
log.info('setup()')
self._driver.setup()
def identify_customer(self, customer_name: str, customer_address: str, customer_id: str):
log.info('identify_customer(customer_name=%r, '
'customer_address=%r, customer_id=%r)' % (
customer_name, customer_address, customer_id))
self._driver.coupon_identify_customer(
self._format_text(customer_name),
self._format_text(customer_address),
self._format_text(customer_id))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys
and context:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
which might include code, classes, or functions. Output only the next line. | def coupon_is_customer_identified(self): |
Based on the snippet: <|code_start|>
if discount and surcharge:
raise TypeError("discount and surcharge can not be used together")
if surcharge and taxcode == TaxType.NONE:
raise ValueError("to specify a surcharge you need specify its "
"tax code")
result = self._driver.coupon_totalize(discount, surcharge, taxcode)
self._has_been_totalized = True
self.totalized_value = result
return result
def add_payment(self, payment_method: str, payment_value: Decimal, description=''):
log.info("add_payment(method=%r, value=%r, description=%r)" % (
payment_method, payment_value, description))
if not self._has_been_totalized:
raise PaymentAdditionError(_("You must totalize the coupon "
"before add payments."))
result = self._driver.coupon_add_payment(
payment_method, payment_value,
self._format_text(description))
self.payments_total_value += payment_value
return result
def cancel(self):
log.info('coupon_cancel()')
retval = self._driver.coupon_cancel()
self._has_been_totalized = False
self.payments_total_value = Decimal("0.0")
self.totalized_value = Decimal("0.0")
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys
and context (classes, functions, sometimes code) from other files:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
. Output only the next line. | return retval |
Given the code snippet: <|code_start|> 'customer_address=%r, customer_id=%r)' % (
customer_name, customer_address, customer_id))
self._driver.coupon_identify_customer(
self._format_text(customer_name),
self._format_text(customer_address),
self._format_text(customer_id))
def coupon_is_customer_identified(self):
return self._driver.coupon_is_customer_identified()
def has_open_coupon(self):
log.info('has_open_coupon()')
return self._driver.has_open_coupon()
def open(self):
log.info('coupon_open()')
return self._driver.coupon_open()
def add_item(self, item_code: str, item_description: str, item_price: Real, taxcode: TaxType,
items_quantity=Decimal("1.0"), unit=UnitType.EMPTY,
discount=Decimal("0.0"), surcharge=Decimal("0.0"),
unit_desc=""):
log.info("add_item(code=%r, description=%r, price=%r, "
"taxcode=%r, quantity=%r, unit=%r, discount=%r, "
"surcharge=%r, unit_desc=%r)" % (
item_code, item_description, item_price, taxcode,
items_quantity, unit, discount, surcharge, unit_desc))
<|code_end|>
, generate the next line using the imports in this file:
from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys
and context (functions, classes, or occasionally code) from other files:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
. Output only the next line. | if self._has_been_totalized: |
Based on the snippet: <|code_start|># USA.
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
# Henrique Romano <henrique@async.com.br>
#
_ = stoqdrivers_gettext
log = logging.getLogger('stoqdrivers.fiscalprinter')
#
# FiscalPrinter interface
#
class FiscalPrinter(BasePrinter):
def __init__(self, brand=None, model=None, device=None, config_file=None,
*args, **kwargs):
BasePrinter.__init__(self, brand, model, device, config_file, *args,
**kwargs)
self._has_been_totalized = False
self.payments_total_value = Decimal("0.0")
self.totalized_value = Decimal("0.0")
self._capabilities = self._driver.get_capabilities()
self._charset = self._driver.coupon_printer_charset
try:
self.setup()
self._setup_complete = True
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys
and context (classes, functions, sometimes code) from other files:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
. Output only the next line. | except Exception: |
Given snippet: <|code_start|>#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
# Henrique Romano <henrique@async.com.br>
#
_ = stoqdrivers_gettext
log = logging.getLogger('stoqdrivers.fiscalprinter')
#
# FiscalPrinter interface
#
class FiscalPrinter(BasePrinter):
def __init__(self, brand=None, model=None, device=None, config_file=None,
*args, **kwargs):
BasePrinter.__init__(self, brand, model, device, config_file, *args,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys
and context:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
which might include code, classes, or functions. Output only the next line. | **kwargs) |
Next line prediction: <|code_start|> raise AlreadyTotalized("the coupon is already totalized, you "
"can't add more items")
if discount and surcharge:
raise TypeError("discount and surcharge can not be used together")
elif unit != UnitType.CUSTOM and unit_desc:
raise ValueError("You can't specify the unit description if "
"you aren't using UnitType.CUSTOM constant.")
elif unit == UnitType.CUSTOM and not unit_desc:
raise ValueError("You must specify the unit description when "
"using UnitType.CUSTOM constant.")
elif unit == UnitType.CUSTOM and len(unit_desc) != 2:
raise ValueError("unit description must be 2-byte sized string")
if not item_price:
raise InvalidValue("The item value must be greater than zero")
if surcharge < 0:
raise ValueError('Surcharge cannot be negative')
if discount < 0:
raise ValueError('Discount cannot be negative')
return self._driver.coupon_add_item(
self._format_text(item_code), self._format_text(item_description),
item_price, taxcode, items_quantity, unit, discount, surcharge,
unit_desc=self._format_text(unit_desc))
def totalize(self, discount=Decimal(0), surcharge=Decimal(0),
taxcode=TaxType.NONE):
log.info('totalize(discount=%r, surcharge=%r, taxcode=%r)' % (
discount, surcharge, taxcode))
<|code_end|>
. Use current file imports:
(from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys)
and context including class names, function names, or small code snippets from other files:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
. Output only the next line. | if discount and surcharge: |
Next line prediction: <|code_start|>
if surcharge < 0:
raise ValueError('Surcharge cannot be negative')
if discount < 0:
raise ValueError('Discount cannot be negative')
return self._driver.coupon_add_item(
self._format_text(item_code), self._format_text(item_description),
item_price, taxcode, items_quantity, unit, discount, surcharge,
unit_desc=self._format_text(unit_desc))
def totalize(self, discount=Decimal(0), surcharge=Decimal(0),
taxcode=TaxType.NONE):
log.info('totalize(discount=%r, surcharge=%r, taxcode=%r)' % (
discount, surcharge, taxcode))
if discount and surcharge:
raise TypeError("discount and surcharge can not be used together")
if surcharge and taxcode == TaxType.NONE:
raise ValueError("to specify a surcharge you need specify its "
"tax code")
result = self._driver.coupon_totalize(discount, surcharge, taxcode)
self._has_been_totalized = True
self.totalized_value = result
return result
def add_payment(self, payment_method: str, payment_value: Decimal, description=''):
log.info("add_payment(method=%r, value=%r, description=%r)" % (
payment_method, payment_value, description))
<|code_end|>
. Use current file imports:
(from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys)
and context including class names, function names, or small code snippets from other files:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
. Output only the next line. | if not self._has_been_totalized: |
Next line prediction: <|code_start|> items_quantity=Decimal("1.0"), unit=UnitType.EMPTY,
discount=Decimal("0.0"), surcharge=Decimal("0.0"),
unit_desc=""):
log.info("add_item(code=%r, description=%r, price=%r, "
"taxcode=%r, quantity=%r, unit=%r, discount=%r, "
"surcharge=%r, unit_desc=%r)" % (
item_code, item_description, item_price, taxcode,
items_quantity, unit, discount, surcharge, unit_desc))
if self._has_been_totalized:
raise AlreadyTotalized("the coupon is already totalized, you "
"can't add more items")
if discount and surcharge:
raise TypeError("discount and surcharge can not be used together")
elif unit != UnitType.CUSTOM and unit_desc:
raise ValueError("You can't specify the unit description if "
"you aren't using UnitType.CUSTOM constant.")
elif unit == UnitType.CUSTOM and not unit_desc:
raise ValueError("You must specify the unit description when "
"using UnitType.CUSTOM constant.")
elif unit == UnitType.CUSTOM and len(unit_desc) != 2:
raise ValueError("unit description must be 2-byte sized string")
if not item_price:
raise InvalidValue("The item value must be greater than zero")
if surcharge < 0:
raise ValueError('Surcharge cannot be negative')
if discount < 0:
raise ValueError('Discount cannot be negative')
<|code_end|>
. Use current file imports:
(from collections import namedtuple
from decimal import Decimal
from numbers import Real
from stoqdrivers.exceptions import (CloseCouponError, PaymentAdditionError,
AlreadyTotalized, InvalidValue)
from stoqdrivers.enum import TaxType, UnitType
from stoqdrivers.printers.base import BasePrinter
from stoqdrivers.utils import encode_text
from stoqdrivers.translation import stoqdrivers_gettext
import datetime
import logging
import traceback
import sys)
and context including class names, function names, or small code snippets from other files:
# Path: stoqdrivers/exceptions.py
# class CloseCouponError(DriverError):
# "Could not close the coupon."
#
# class PaymentAdditionError(DriverError):
# "Error while adding a payment."
#
# class AlreadyTotalized(DriverError):
# "The coupon is already totalized"
#
# class InvalidValue(DriverError):
# "The value specified is invalid or is not in the expected range"
#
# Path: stoqdrivers/enum.py
# class TaxType(IntEnum):
# """
# Enum for taxes
# """
# (ICMS,
# SUBSTITUTION,
# EXEMPTION,
# NONE,
# SERVICE,
# CUSTOM) = range(40, 46)
#
# class UnitType(IntEnum):
# """
# Enum for units
# """
# (WEIGHT,
# METERS,
# LITERS,
# EMPTY,
# CUSTOM) = range(20, 25)
#
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
#
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
. Output only the next line. | return self._driver.coupon_add_item( |
Predict the next line after this snippet: <|code_start|>## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
##
## Author(s): Johan Dahlin <jdahlin@async.com.br>
## Henrique Romano <henrique@async.com.br>
##
_ = stoqdrivers_gettext
log = logging.getLogger('stoqdrivers.serial')
@implementer(ISerialPort)
class VirtualPort:
def getDSR(self):
return True
<|code_end|>
using the current file's imports:
import logging
import socket
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from zope.interface import implementer
from stoqdrivers.interfaces import ISerialPort
from stoqdrivers.exceptions import DriverError, PrinterError
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.utils import str2bytes, bytes2str
and any relevant context from other files:
# Path: stoqdrivers/interfaces.py
# class ISerialPort(Interface):
# """ Interface used by drivers to write commands and get reply from devices
# """
#
# def getDSR():
# """ Returns True if the device is done to send data. Some drivers
# block in a loop waiting for this function returns True before call
# read.
# """
#
# def setDTR(value):
# """ Set to True when the driver is going to send data to the device
# """
#
# def read(n_bytes=1):
# """ Read data """
#
# def write(data):
# """ Write data """
#
# Path: stoqdrivers/exceptions.py
# class DriverError(Exception):
# "Base exception for all printer errors"
#
# def __init__(self, error='', code=-1):
# if code != -1:
# error = '%d: %s' % (code, error)
# Exception.__init__(self, error)
# self.code = code
#
# class PrinterError(Exception):
# "General printer errors"
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
#
# Path: stoqdrivers/utils.py
# def str2bytes(text):
# if isinstance(text, bytes):
# return text
# return bytes(ord(i) for i in text)
#
# def bytes2str(data):
# return ''.join(chr(i) for i in data)
. Output only the next line. | def setDTR(self, value): |
Continue the code snippet: <|code_start|>
class EthernetPort:
def __init__(self, address, port):
self.address = address
self.port = port
self._get_or_create_connection()
def _get_or_create_connection(self):
self.device = active_device or self._create_connection()
def _create_connection(self):
global active_device
try:
active_device = self.device = socket.create_connection(
(self.address, self.port), timeout=5)
except OSError:
raise PrinterError
def _check_device(self):
# The device can be None if flask starts without a printer on,
# when the printer is turned on, the stoqserver can reset
# the connection with the socket.
if not isinstance(self.device, socket.socket):
raise PrinterError
def write(self, data):
self._check_device()
try:
self.device.sendall(data)
except (ConnectionResetError, OSError):
<|code_end|>
. Use current file imports:
import logging
import socket
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from zope.interface import implementer
from stoqdrivers.interfaces import ISerialPort
from stoqdrivers.exceptions import DriverError, PrinterError
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.utils import str2bytes, bytes2str
and context (classes, functions, or code) from other files:
# Path: stoqdrivers/interfaces.py
# class ISerialPort(Interface):
# """ Interface used by drivers to write commands and get reply from devices
# """
#
# def getDSR():
# """ Returns True if the device is done to send data. Some drivers
# block in a loop waiting for this function returns True before call
# read.
# """
#
# def setDTR(value):
# """ Set to True when the driver is going to send data to the device
# """
#
# def read(n_bytes=1):
# """ Read data """
#
# def write(data):
# """ Write data """
#
# Path: stoqdrivers/exceptions.py
# class DriverError(Exception):
# "Base exception for all printer errors"
#
# def __init__(self, error='', code=-1):
# if code != -1:
# error = '%d: %s' % (code, error)
# Exception.__init__(self, error)
# self.code = code
#
# class PrinterError(Exception):
# "General printer errors"
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
#
# Path: stoqdrivers/utils.py
# def str2bytes(text):
# if isinstance(text, bytes):
# return text
# return bytes(ord(i) for i in text)
#
# def bytes2str(data):
# return ''.join(chr(i) for i in data)
. Output only the next line. | self._create_connection() |
Given the code snippet: <|code_start|>
def fileno(self):
return self._port.fileno()
def writeline(self, data):
self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
return self.readline()
def write(self, data):
# pyserial is expecting bytes but we work with str in stoqdrivers
data = str2bytes(data)
log.debug(">>> %r (%d bytes)" % (data, len(data)))
self._port.write(data)
def read(self, n_bytes):
# stoqdrivers is expecting str but pyserial will reply with bytes
data = self._port.read(n_bytes)
return bytes2str(data)
def readline(self):
out = ''
a = 0
retries = 10
while True:
if a > retries:
raise DriverError(_("Timeout communicating with fiscal "
"printer"))
c = self.read(1)
if not c:
<|code_end|>
, generate the next line using the imports in this file:
import logging
import socket
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from zope.interface import implementer
from stoqdrivers.interfaces import ISerialPort
from stoqdrivers.exceptions import DriverError, PrinterError
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.utils import str2bytes, bytes2str
and context (functions, classes, or occasionally code) from other files:
# Path: stoqdrivers/interfaces.py
# class ISerialPort(Interface):
# """ Interface used by drivers to write commands and get reply from devices
# """
#
# def getDSR():
# """ Returns True if the device is done to send data. Some drivers
# block in a loop waiting for this function returns True before call
# read.
# """
#
# def setDTR(value):
# """ Set to True when the driver is going to send data to the device
# """
#
# def read(n_bytes=1):
# """ Read data """
#
# def write(data):
# """ Write data """
#
# Path: stoqdrivers/exceptions.py
# class DriverError(Exception):
# "Base exception for all printer errors"
#
# def __init__(self, error='', code=-1):
# if code != -1:
# error = '%d: %s' % (code, error)
# Exception.__init__(self, error)
# self.code = code
#
# class PrinterError(Exception):
# "General printer errors"
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
#
# Path: stoqdrivers/utils.py
# def str2bytes(text):
# if isinstance(text, bytes):
# return text
# return bytes(ord(i) for i in text)
#
# def bytes2str(data):
# return ''.join(chr(i) for i in data)
. Output only the next line. | a += 1 |
Next line prediction: <|code_start|> def _get_or_create_connection(self):
self.device = active_device or self._create_connection()
def _create_connection(self):
global active_device
try:
active_device = self.device = socket.create_connection(
(self.address, self.port), timeout=5)
except OSError:
raise PrinterError
def _check_device(self):
# The device can be None if flask starts without a printer on,
# when the printer is turned on, the stoqserver can reset
# the connection with the socket.
if not isinstance(self.device, socket.socket):
raise PrinterError
def write(self, data):
self._check_device()
try:
self.device.sendall(data)
except (ConnectionResetError, OSError):
self._create_connection()
self.device.sendall(data)
def read(self, n_bytes):
self._check_device()
try:
data = self.device.recv(n_bytes)
<|code_end|>
. Use current file imports:
(import logging
import socket
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from zope.interface import implementer
from stoqdrivers.interfaces import ISerialPort
from stoqdrivers.exceptions import DriverError, PrinterError
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.utils import str2bytes, bytes2str)
and context including class names, function names, or small code snippets from other files:
# Path: stoqdrivers/interfaces.py
# class ISerialPort(Interface):
# """ Interface used by drivers to write commands and get reply from devices
# """
#
# def getDSR():
# """ Returns True if the device is done to send data. Some drivers
# block in a loop waiting for this function returns True before call
# read.
# """
#
# def setDTR(value):
# """ Set to True when the driver is going to send data to the device
# """
#
# def read(n_bytes=1):
# """ Read data """
#
# def write(data):
# """ Write data """
#
# Path: stoqdrivers/exceptions.py
# class DriverError(Exception):
# "Base exception for all printer errors"
#
# def __init__(self, error='', code=-1):
# if code != -1:
# error = '%d: %s' % (code, error)
# Exception.__init__(self, error)
# self.code = code
#
# class PrinterError(Exception):
# "General printer errors"
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
#
# Path: stoqdrivers/utils.py
# def str2bytes(text):
# if isinstance(text, bytes):
# return text
# return bytes(ord(i) for i in text)
#
# def bytes2str(data):
# return ''.join(chr(i) for i in data)
. Output only the next line. | except (ConnectionResetError, socket.timeout): |
Predict the next line for this snippet: <|code_start|> # Most serial printers allow connecting a cash drawer to them. You can then
# open the drawer, and also check its status. Some models, for instance,
# the Radiant drawers, use inverted logic to describe whether they are
# open, specified by this attribute, settable via BaseDevice config.
inverted_drawer = False
def __init__(self, port):
self.set_port(port)
def set_port(self, port):
self._port = port
def get_port(self):
return self._port
def fileno(self):
return self._port.fileno()
def writeline(self, data):
self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
return self.readline()
def write(self, data):
# pyserial is expecting bytes but we work with str in stoqdrivers
data = str2bytes(data)
log.debug(">>> %r (%d bytes)" % (data, len(data)))
self._port.write(data)
def read(self, n_bytes):
# stoqdrivers is expecting str but pyserial will reply with bytes
<|code_end|>
with the help of current file imports:
import logging
import socket
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from zope.interface import implementer
from stoqdrivers.interfaces import ISerialPort
from stoqdrivers.exceptions import DriverError, PrinterError
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.utils import str2bytes, bytes2str
and context from other files:
# Path: stoqdrivers/interfaces.py
# class ISerialPort(Interface):
# """ Interface used by drivers to write commands and get reply from devices
# """
#
# def getDSR():
# """ Returns True if the device is done to send data. Some drivers
# block in a loop waiting for this function returns True before call
# read.
# """
#
# def setDTR(value):
# """ Set to True when the driver is going to send data to the device
# """
#
# def read(n_bytes=1):
# """ Read data """
#
# def write(data):
# """ Write data """
#
# Path: stoqdrivers/exceptions.py
# class DriverError(Exception):
# "Base exception for all printer errors"
#
# def __init__(self, error='', code=-1):
# if code != -1:
# error = '%d: %s' % (code, error)
# Exception.__init__(self, error)
# self.code = code
#
# class PrinterError(Exception):
# "General printer errors"
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
#
# Path: stoqdrivers/utils.py
# def str2bytes(text):
# if isinstance(text, bytes):
# return text
# return bytes(ord(i) for i in text)
#
# def bytes2str(data):
# return ''.join(chr(i) for i in data)
, which may contain function names, class names, or code. Output only the next line. | data = self._port.read(n_bytes) |
Here is a snippet: <|code_start|> def __init__(self, device, baudrate=9600):
# WARNING: Never change these default options, some drivers are based
# on this to work. Maybe we should change this and make all the driver
# specify its options, but right now I think that's ok, since these
# options are common to most of the drivers.
Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
write_timeout=3)
self.setDTR(True)
self.flushInput()
self.flushOutput()
# When we are printing NFCe danfe there is a hack that forces the reinitialization of the printer.
# So this variable is used to check if we need to create a new socket or reuse the previous one.
active_device = None
class EthernetPort:
def __init__(self, address, port):
self.address = address
self.port = port
self._get_or_create_connection()
def _get_or_create_connection(self):
self.device = active_device or self._create_connection()
def _create_connection(self):
global active_device
try:
<|code_end|>
. Write the next line using the current file imports:
import logging
import socket
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from zope.interface import implementer
from stoqdrivers.interfaces import ISerialPort
from stoqdrivers.exceptions import DriverError, PrinterError
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.utils import str2bytes, bytes2str
and context from other files:
# Path: stoqdrivers/interfaces.py
# class ISerialPort(Interface):
# """ Interface used by drivers to write commands and get reply from devices
# """
#
# def getDSR():
# """ Returns True if the device is done to send data. Some drivers
# block in a loop waiting for this function returns True before call
# read.
# """
#
# def setDTR(value):
# """ Set to True when the driver is going to send data to the device
# """
#
# def read(n_bytes=1):
# """ Read data """
#
# def write(data):
# """ Write data """
#
# Path: stoqdrivers/exceptions.py
# class DriverError(Exception):
# "Base exception for all printer errors"
#
# def __init__(self, error='', code=-1):
# if code != -1:
# error = '%d: %s' % (code, error)
# Exception.__init__(self, error)
# self.code = code
#
# class PrinterError(Exception):
# "General printer errors"
#
# Path: stoqdrivers/translation.py
# def stoqdrivers_gettext(message):
# return gettext.dgettext('stoqdrivers', message)
#
# Path: stoqdrivers/utils.py
# def str2bytes(text):
# if isinstance(text, bytes):
# return text
# return bytes(ord(i) for i in text)
#
# def bytes2str(data):
# return ''.join(chr(i) for i in data)
, which may include functions, classes, or code. Output only the next line. | active_device = self.device = socket.create_connection( |
Given snippet: <|code_start|> start = 0
for tag in re.finditer(b'<\w+>', data):
# Text before the tag
text = data[start: tag.start()]
if text:
self._driver.print_inline(text)
start = tag.end()
tag = tag.group()[1:-1].decode() # remove < and >
if hasattr(self, tag):
getattr(self, tag)()
# any remaining text after the last tag
text = data[start:]
if text:
self._driver.print_inline(text)
def print_barcode(self, barcode):
self._driver.print_barcode(barcode)
def print_qrcode(self, code):
self._driver.print_qrcode(code)
def print_matrix(self, data):
if hasattr(self._driver, 'print_matrix'):
self._driver.print_matrix(data)
def separator(self):
if hasattr(self._driver, 'separator'):
self._driver.separator()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from stoqdrivers.printers.base import BasePrinter
and context:
# Path: stoqdrivers/printers/base.py
# class BasePrinter(BaseDevice):
# device_dirname = "printers"
# device_type = DeviceType.PRINTER
#
# def check_interfaces(self):
# driver_interfaces = providedBy(self._driver)
# if (ICouponPrinter not in driver_interfaces
# and IChequePrinter not in driver_interfaces
# and INonFiscalPrinter not in driver_interfaces):
# raise TypeError("The driver `%r' doesn't implements a valid "
# "interface" % self._driver)
#
# def get_constants(self):
# return self._driver.get_constants()
#
# def get_tax_constant(self, item):
# for enum, constant, value in self.get_tax_constants():
# if enum == item:
# return constant
#
# def get_model_name(self):
# return self._driver.model_name
which might include code, classes, or functions. Output only the next line. | else: |
Based on the snippet: <|code_start|> 'cp863': ESC + '\x74\x04', # Canadian-French
'cp865': ESC + '\x74\x05', # Nordic
'latin1': ESC + '\x74\x06', # Simplified Kanji, Hirakana
'cp737': ESC + '\x74\x07', # Simplified Kanji
'cp862': ESC + '\x74\x08', # Simplified Kanji
'cp1252': ESC + '\x74\x11', # Western European Windows Code Set
'cp866': ESC + '\x74\x12', # Cirillic #2
'cp852': ESC + '\x74\x13', # Latin 2
'cp858': ESC + '\x74\x14', # Euro
}
#: How many line feeds should be done before cutting the paper
cut_line_feeds = 4
#: The default font
default_font = FONT_CONDENSED
#: The maximum number of characters that fit a line
max_characters = 64
#: The maximum number of characters that fit a barcode
max_barcode_characters = 27
GRAPHICS_API = GRAPHICS_8BITS
GRAPHICS_MULTIPLIER = 1
GRAPHICS_MAX_COLS = {
GRAPHICS_8BITS: 576,
GRAPHICS_24BITS: 1728,
}
<|code_end|>
, predict the immediate next line with the help of imports:
from stoqdrivers.utils import encode_text, GRAPHICS_8BITS, GRAPHICS_24BITS, matrix2graphics
and context (classes, functions, sometimes code) from other files:
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# GRAPHICS_8BITS = 8
#
# GRAPHICS_24BITS = 24
#
# def matrix2graphics(graphics_api, matrix, max_cols, multiplier=1, centralized=True):
# if graphics_api not in (GRAPHICS_8BITS, GRAPHICS_24BITS):
# raise ValueError("Graphics api %s not supported" % (graphics_api, ))
#
# sub_len = int(graphics_api / multiplier)
#
# for i in range(0, len(matrix), sub_len):
# bytes_ = []
# sub = matrix[i:i + sub_len]
# if len(sub) < sub_len:
# sub.extend([[False] * len(matrix[0])] * (sub_len - len(sub)))
#
# for j in range(len(matrix[0])):
# bits = []
# for bit in sub:
# bits.extend([bit[j]] * multiplier)
#
# if graphics_api == GRAPHICS_8BITS:
# # The 3 is to compensate for the fact that each pixel is
# # 3x larger vertically than horizontally
# bytes_.extend([bits2byte(bits)] * 3 * multiplier)
# elif graphics_api == GRAPHICS_24BITS:
# splitted_bytes = []
# for k in range(0, 24, 8):
# splitted_bytes.append(bits2byte(bits[k: k + 8]))
# bytes_.extend(splitted_bytes * multiplier)
# else:
# raise AssertionError
#
# if centralized:
# diff = max_cols - len(bytes_)
# if diff:
# bytes_ = ([0] * int(diff / 2)) + bytes_
#
# divide_len_by = graphics_api / 8
# yield ''.join(chr(b) for b in bytes_), int(len(bytes_) / divide_len_by)
. Output only the next line. | GRAPHICS_CMD = { |
Predict the next line for this snippet: <|code_start|>
# Based on python-escpos's escpos.escpos.Escpos:
#
# https://github.com/python-escpos/python-escpos/blob/master/src/escpos/escpos.py
ESC = '\x1b' # Escape
GS = '\x1d' # Group Separator
class EscPosMixin(object):
FONT_REGULAR = ESC + 'M0'
FONT_CONDENSED = ESC + 'M1'
TXT_ALIGN_LEFT = ESC + 'a\x00' # Left justification
TXT_ALIGN_CENTER = ESC + 'a\x01' # Centering
TXT_BOLD_OFF = ESC + 'E\x00' # Bold font OFF
TXT_BOLD_ON = ESC + 'E\x01' # Bold font ON
DOUBLE_HEIGHT_ON = ESC + 'G\x00' # Double height character
DOUBLE_HEIGHT_OFF = ESC + 'G\x01' # Normal height character
LINE_FEED = '\x0a'
LINE_FEED_RESET = ESC + '2'
LINE_FEED_SET = ESC + '3'
BARCODE_HEIGHT = GS + 'h' # Barcode Height [1-255]
BARCODE_WIDTH = GS + 'w' # Barcode Width [2-6]
BARCODE_FONT_REGULAR = GS + 'f' + '\x00' # Font Regular for HRI barcode chars
<|code_end|>
with the help of current file imports:
from stoqdrivers.utils import encode_text, GRAPHICS_8BITS, GRAPHICS_24BITS, matrix2graphics
and context from other files:
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# GRAPHICS_8BITS = 8
#
# GRAPHICS_24BITS = 24
#
# def matrix2graphics(graphics_api, matrix, max_cols, multiplier=1, centralized=True):
# if graphics_api not in (GRAPHICS_8BITS, GRAPHICS_24BITS):
# raise ValueError("Graphics api %s not supported" % (graphics_api, ))
#
# sub_len = int(graphics_api / multiplier)
#
# for i in range(0, len(matrix), sub_len):
# bytes_ = []
# sub = matrix[i:i + sub_len]
# if len(sub) < sub_len:
# sub.extend([[False] * len(matrix[0])] * (sub_len - len(sub)))
#
# for j in range(len(matrix[0])):
# bits = []
# for bit in sub:
# bits.extend([bit[j]] * multiplier)
#
# if graphics_api == GRAPHICS_8BITS:
# # The 3 is to compensate for the fact that each pixel is
# # 3x larger vertically than horizontally
# bytes_.extend([bits2byte(bits)] * 3 * multiplier)
# elif graphics_api == GRAPHICS_24BITS:
# splitted_bytes = []
# for k in range(0, 24, 8):
# splitted_bytes.append(bits2byte(bits[k: k + 8]))
# bytes_.extend(splitted_bytes * multiplier)
# else:
# raise AssertionError
#
# if centralized:
# diff = max_cols - len(bytes_)
# if diff:
# bytes_ = ([0] * int(diff / 2)) + bytes_
#
# divide_len_by = graphics_api / 8
# yield ''.join(chr(b) for b in bytes_), int(len(bytes_) / divide_len_by)
, which may contain function names, class names, or code. Output only the next line. | BARCODE_FONT_CONDENSED = GS + 'f' + '\x01' # Font Condensed for HRI barcode chars |
Continue the code snippet: <|code_start|>## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
# Based on python-escpos's escpos.escpos.Escpos:
#
# https://github.com/python-escpos/python-escpos/blob/master/src/escpos/escpos.py
ESC = '\x1b' # Escape
GS = '\x1d' # Group Separator
class EscPosMixin(object):
FONT_REGULAR = ESC + 'M0'
FONT_CONDENSED = ESC + 'M1'
TXT_ALIGN_LEFT = ESC + 'a\x00' # Left justification
TXT_ALIGN_CENTER = ESC + 'a\x01' # Centering
TXT_BOLD_OFF = ESC + 'E\x00' # Bold font OFF
<|code_end|>
. Use current file imports:
from stoqdrivers.utils import encode_text, GRAPHICS_8BITS, GRAPHICS_24BITS, matrix2graphics
and context (classes, functions, or code) from other files:
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# GRAPHICS_8BITS = 8
#
# GRAPHICS_24BITS = 24
#
# def matrix2graphics(graphics_api, matrix, max_cols, multiplier=1, centralized=True):
# if graphics_api not in (GRAPHICS_8BITS, GRAPHICS_24BITS):
# raise ValueError("Graphics api %s not supported" % (graphics_api, ))
#
# sub_len = int(graphics_api / multiplier)
#
# for i in range(0, len(matrix), sub_len):
# bytes_ = []
# sub = matrix[i:i + sub_len]
# if len(sub) < sub_len:
# sub.extend([[False] * len(matrix[0])] * (sub_len - len(sub)))
#
# for j in range(len(matrix[0])):
# bits = []
# for bit in sub:
# bits.extend([bit[j]] * multiplier)
#
# if graphics_api == GRAPHICS_8BITS:
# # The 3 is to compensate for the fact that each pixel is
# # 3x larger vertically than horizontally
# bytes_.extend([bits2byte(bits)] * 3 * multiplier)
# elif graphics_api == GRAPHICS_24BITS:
# splitted_bytes = []
# for k in range(0, 24, 8):
# splitted_bytes.append(bits2byte(bits[k: k + 8]))
# bytes_.extend(splitted_bytes * multiplier)
# else:
# raise AssertionError
#
# if centralized:
# diff = max_cols - len(bytes_)
# if diff:
# bytes_ = ([0] * int(diff / 2)) + bytes_
#
# divide_len_by = graphics_api / 8
# yield ''.join(chr(b) for b in bytes_), int(len(bytes_) / divide_len_by)
. Output only the next line. | TXT_BOLD_ON = ESC + 'E\x01' # Bold font ON |
Given the following code snippet before the placeholder: <|code_start|>
LINE_FEED = '\x0a'
LINE_FEED_RESET = ESC + '2'
LINE_FEED_SET = ESC + '3'
BARCODE_HEIGHT = GS + 'h' # Barcode Height [1-255]
BARCODE_WIDTH = GS + 'w' # Barcode Width [2-6]
BARCODE_FONT_REGULAR = GS + 'f' + '\x00' # Font Regular for HRI barcode chars
BARCODE_FONT_CONDENSED = GS + 'f' + '\x01' # Font Condensed for HRI barcode chars
BARCODE_TXT_OFF = GS + 'H' + '\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = GS + 'H' + '\x01' # HRI barcode chars above
BARCODE_TXT_BLW = GS + 'H' + '\x02' # HRI barcode chars below
BARCODE_TXT_BTH = GS + 'H' + '\x03' # HRI both above and below
BARCODE_CODE93 = GS + 'k' + 'H' # Use a CODE93 Barcode
PAPER_FULL_CUT = GS + 'V\x00' # Full Paper Cut
CHARSET_CMD = {
'cp850': ESC + '\x74\x02', # Multilingual
'cp437': ESC + '\x74\x00', # USA: Standard Europe
'cp932': ESC + '\x74\x01', # Japanese Katakana
'cp860': ESC + '\x74\x03', # Portuguese
'cp863': ESC + '\x74\x04', # Canadian-French
'cp865': ESC + '\x74\x05', # Nordic
'latin1': ESC + '\x74\x06', # Simplified Kanji, Hirakana
'cp737': ESC + '\x74\x07', # Simplified Kanji
'cp862': ESC + '\x74\x08', # Simplified Kanji
'cp1252': ESC + '\x74\x11', # Western European Windows Code Set
'cp866': ESC + '\x74\x12', # Cirillic #2
<|code_end|>
, predict the next line using imports from the current file:
from stoqdrivers.utils import encode_text, GRAPHICS_8BITS, GRAPHICS_24BITS, matrix2graphics
and context including class names, function names, and sometimes code from other files:
# Path: stoqdrivers/utils.py
# def encode_text(text, encoding):
# """ Converts the string 'text' to encoding 'encoding' and optionally
# normalizes the string (currently only for ascii)
#
# @param text: text to convert
# @type text: str
# @param encoding: encoding to use
# @type text: str
# @returns: converted text
# """
# if encoding == "ascii":
# text = unicodedata.normalize("NFKD", text)
# # If we use text.encode we will get this sometimes:
# # TypeError: 'abicomp' encoder returned 'str' instead of 'bytes';
# # use codecs.encode() to encode to arbitrary types
# text = codecs.encode(text, encoding, "ignore")
# # Only do the bellow conversion if the encoding above worked
# if isinstance(text, bytes):
# # Use bytes2str instead of decode or otherwise we would encode
# # the bytes in unicode. This way we will still keep them in the
# # encoding that we want (since SerialBase.write will do the reversal
# # operation, str2bytes) even though we will get some strange
# # characters in the "unicode version" of the string
# text = bytes2str(text)
# return text
#
# GRAPHICS_8BITS = 8
#
# GRAPHICS_24BITS = 24
#
# def matrix2graphics(graphics_api, matrix, max_cols, multiplier=1, centralized=True):
# if graphics_api not in (GRAPHICS_8BITS, GRAPHICS_24BITS):
# raise ValueError("Graphics api %s not supported" % (graphics_api, ))
#
# sub_len = int(graphics_api / multiplier)
#
# for i in range(0, len(matrix), sub_len):
# bytes_ = []
# sub = matrix[i:i + sub_len]
# if len(sub) < sub_len:
# sub.extend([[False] * len(matrix[0])] * (sub_len - len(sub)))
#
# for j in range(len(matrix[0])):
# bits = []
# for bit in sub:
# bits.extend([bit[j]] * multiplier)
#
# if graphics_api == GRAPHICS_8BITS:
# # The 3 is to compensate for the fact that each pixel is
# # 3x larger vertically than horizontally
# bytes_.extend([bits2byte(bits)] * 3 * multiplier)
# elif graphics_api == GRAPHICS_24BITS:
# splitted_bytes = []
# for k in range(0, 24, 8):
# splitted_bytes.append(bits2byte(bits[k: k + 8]))
# bytes_.extend(splitted_bytes * multiplier)
# else:
# raise AssertionError
#
# if centralized:
# diff = max_cols - len(bytes_)
# if diff:
# bytes_ = ([0] * int(diff / 2)) + bytes_
#
# divide_len_by = graphics_api / 8
# yield ''.join(chr(b) for b in bytes_), int(len(bytes_) / divide_len_by)
. Output only the next line. | 'cp852': ESC + '\x74\x13', # Latin 2 |
Using the snippet: <|code_start|>class DR700(SerialBase):
supported = True
model_name = "Daruma DR 700"
max_characters = 57
def __init__(self, port, consts=None):
SerialBase.__init__(self, port)
self.set_condensed()
self.descentralize()
self.unset_bold()
self.unset_double_height()
def centralize(self):
self.write(CENTRALIZE)
def descentralize(self):
self.write(DESCENTRALIZE)
def set_bold(self):
self.write(SET_BOLD)
def unset_bold(self):
self.write(UNSET_BOLD)
def set_condensed(self):
self.write(CONDENSED_MODE)
def unset_condensed(self):
<|code_end|>
, determine the next line of code. You have imports:
from zope.interface import implementer
from stoqdrivers.interfaces import INonFiscalPrinter
from stoqdrivers.serialbase import SerialBase
and context (class names, function names, or code) available:
# Path: stoqdrivers/interfaces.py
# class INonFiscalPrinter(IDevice):
# """ Interface used to formatting texts in non fiscal printers.
# """
#
# max_characters = Attribute("The maximum characters per line")
#
# def centralize():
# """ Centralize the text to be sent to coupon. """
#
# def descentralize():
# """ Descentralize the text to be sent to coupon. """
#
# def set_bold():
# """ The sent text will be appear in bold. """
#
# def unset_bold():
# """ Remove the bold option. """
#
# def print_line(data):
# """ Performs a line break to the given text. """
#
# def print_inline(data):
# """ Print a given text in a unique line. """
#
# def print_barcode(code):
# """ Print a barcode representing the given code. """
#
# def print_qrcode(code):
# """ Print a qrcode representing the given code. """
#
# def cut_paper():
# """ Performs a paper cutting. """
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
. Output only the next line. | self.write(NORMAL_MODE) |
Given the following code snippet before the placeholder: <|code_start|> def unset_bold(self):
self.write(UNSET_BOLD)
def set_condensed(self):
self.write(CONDENSED_MODE)
def unset_condensed(self):
self.write(NORMAL_MODE)
def set_double_height(self):
self.write(DOUBLE_HEIGHT_ON)
def unset_double_height(self):
self.write(DOUBLE_HEIGHT_OFF)
def print_line(self, data):
self.write(data + b'\n')
def print_inline(self, data):
self.write(data)
def print_barcode(self, code):
code_128 = chr(5)
width = chr(2)
height = chr(80)
barcode_label = chr(0)
self.write(ESC + '\x62%s%s%s%s%s\x00' % (code_128, width, height,
barcode_label, code))
self.write('\x0A')
<|code_end|>
, predict the next line using imports from the current file:
from zope.interface import implementer
from stoqdrivers.interfaces import INonFiscalPrinter
from stoqdrivers.serialbase import SerialBase
and context including class names, function names, and sometimes code from other files:
# Path: stoqdrivers/interfaces.py
# class INonFiscalPrinter(IDevice):
# """ Interface used to formatting texts in non fiscal printers.
# """
#
# max_characters = Attribute("The maximum characters per line")
#
# def centralize():
# """ Centralize the text to be sent to coupon. """
#
# def descentralize():
# """ Descentralize the text to be sent to coupon. """
#
# def set_bold():
# """ The sent text will be appear in bold. """
#
# def unset_bold():
# """ Remove the bold option. """
#
# def print_line(data):
# """ Performs a line break to the given text. """
#
# def print_inline(data):
# """ Print a given text in a unique line. """
#
# def print_barcode(code):
# """ Print a barcode representing the given code. """
#
# def print_qrcode(code):
# """ Print a qrcode representing the given code. """
#
# def cut_paper():
# """ Performs a paper cutting. """
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
. Output only the next line. | def print_qrcode(self, code): |
Predict the next line after this snippet: <|code_start|>class Package:
SIZE = 22
def __init__(self, raw_data):
self.code = None
self.price_per_kg = None
self.total_price = None
self.weight = None
self._parse(raw_data)
def _parse(self, data):
if not data:
return
elif ord(data[0]) != STX or len(data) != self.SIZE:
raise InvalidReply("Received inconsistent data")
self.weight = Decimal(data[1:7]) / (10 ** QUANTITY_PRECISION)
self.price_per_kg = Decimal(data[8:14]) / (10 ** PRICE_PRECISION)
self.total_price = Decimal(data[15:21]) / (10 ** PRICE_PRECISION)
@implementer(IScale)
class MicP15(SerialBase):
CMD_PREFIX = "\x05"
EOL_DELIMIT = chr(ETX)
model_name = "Micheletti P15"
def __init__(self, device, consts=None):
SerialBase.__init__(self, device)
<|code_end|>
using the current file's imports:
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.exceptions import InvalidReply
from stoqdrivers.interfaces import IScale, IScaleInfo
from stoqdrivers.serialbase import SerialBase, SerialPort
and any relevant context from other files:
# Path: stoqdrivers/exceptions.py
# class InvalidReply(DriverError):
# "Invalid reply received"
#
# Path: stoqdrivers/interfaces.py
# class IScale(IDevice):
# """ This interface describes how to interacts with scales.
# """
#
# def read_data():
# """ Read informations of the scale, returning an object
# that implements IScaleInfo interface.
# """
#
# class IScaleInfo(Interface):
# """ This interface list the data read by the scale """
# weight = Attribute("The weight read")
# price_per_kg = Attribute("The KG read")
# total_price = Attribute("The total price. It is equivalent to "
# "price_per_kg * weight")
# code = Attribute("The product code")
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
#
# class SerialPort(Serial):
#
# def __init__(self, device, baudrate=9600):
# # WARNING: Never change these default options, some drivers are based
# # on this to work. Maybe we should change this and make all the driver
# # specify its options, but right now I think that's ok, since these
# # options are common to most of the drivers.
# Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
# parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
# write_timeout=3)
# self.setDTR(True)
# self.flushInput()
# self.flushOutput()
. Output only the next line. | self._package = None |
Using the snippet: <|code_start|> model_name = "Micheletti P15"
def __init__(self, device, consts=None):
SerialBase.__init__(self, device)
self._package = None
def _get_package(self):
# PESO, PRECO, TOTAL
# b'\x02 0800 99999 79999\x03'
reply = self.writeline('')
# The sum is just because readline (called internally by writeline)
# remove the EOL_DELIMIT from the package received and we need send
# to Package's constructor the whole data.
return Package(reply + MicP15.EOL_DELIMIT)
#
# IScale implementation
#
def read_data(self):
return self._get_package()
if __name__ == "__main__":
port = SerialPort('/dev/ttyS0')
r = MicP15(port)
data = r.read_data()
print("WEIGHT:", data.weight)
<|code_end|>
, determine the next line of code. You have imports:
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.exceptions import InvalidReply
from stoqdrivers.interfaces import IScale, IScaleInfo
from stoqdrivers.serialbase import SerialBase, SerialPort
and context (class names, function names, or code) available:
# Path: stoqdrivers/exceptions.py
# class InvalidReply(DriverError):
# "Invalid reply received"
#
# Path: stoqdrivers/interfaces.py
# class IScale(IDevice):
# """ This interface describes how to interacts with scales.
# """
#
# def read_data():
# """ Read informations of the scale, returning an object
# that implements IScaleInfo interface.
# """
#
# class IScaleInfo(Interface):
# """ This interface list the data read by the scale """
# weight = Attribute("The weight read")
# price_per_kg = Attribute("The KG read")
# total_price = Attribute("The total price. It is equivalent to "
# "price_per_kg * weight")
# code = Attribute("The product code")
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
#
# class SerialPort(Serial):
#
# def __init__(self, device, baudrate=9600):
# # WARNING: Never change these default options, some drivers are based
# # on this to work. Maybe we should change this and make all the driver
# # specify its options, but right now I think that's ok, since these
# # options are common to most of the drivers.
# Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
# parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
# write_timeout=3)
# self.setDTR(True)
# self.flushInput()
# self.flushOutput()
. Output only the next line. | print("PRICE BY KG:", data.price_per_kg) |
Continue the code snippet: <|code_start|>
@implementer(IScale)
class MicP15(SerialBase):
CMD_PREFIX = "\x05"
EOL_DELIMIT = chr(ETX)
model_name = "Micheletti P15"
def __init__(self, device, consts=None):
SerialBase.__init__(self, device)
self._package = None
def _get_package(self):
# PESO, PRECO, TOTAL
# b'\x02 0800 99999 79999\x03'
reply = self.writeline('')
# The sum is just because readline (called internally by writeline)
# remove the EOL_DELIMIT from the package received and we need send
# to Package's constructor the whole data.
return Package(reply + MicP15.EOL_DELIMIT)
#
# IScale implementation
#
def read_data(self):
return self._get_package()
<|code_end|>
. Use current file imports:
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.exceptions import InvalidReply
from stoqdrivers.interfaces import IScale, IScaleInfo
from stoqdrivers.serialbase import SerialBase, SerialPort
and context (classes, functions, or code) from other files:
# Path: stoqdrivers/exceptions.py
# class InvalidReply(DriverError):
# "Invalid reply received"
#
# Path: stoqdrivers/interfaces.py
# class IScale(IDevice):
# """ This interface describes how to interacts with scales.
# """
#
# def read_data():
# """ Read informations of the scale, returning an object
# that implements IScaleInfo interface.
# """
#
# class IScaleInfo(Interface):
# """ This interface list the data read by the scale """
# weight = Attribute("The weight read")
# price_per_kg = Attribute("The KG read")
# total_price = Attribute("The total price. It is equivalent to "
# "price_per_kg * weight")
# code = Attribute("The product code")
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
#
# class SerialPort(Serial):
#
# def __init__(self, device, baudrate=9600):
# # WARNING: Never change these default options, some drivers are based
# # on this to work. Maybe we should change this and make all the driver
# # specify its options, but right now I think that's ok, since these
# # options are common to most of the drivers.
# Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
# parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
# write_timeout=3)
# self.setDTR(True)
# self.flushInput()
# self.flushOutput()
. Output only the next line. | if __name__ == "__main__": |
Using the snippet: <|code_start|> self.price_per_kg = None
self.total_price = None
self.weight = None
self._parse(raw_data)
def _parse(self, data):
if not data:
return
elif ord(data[0]) != STX or len(data) != self.SIZE:
raise InvalidReply("Received inconsistent data")
self.weight = Decimal(data[1:7]) / (10 ** QUANTITY_PRECISION)
self.price_per_kg = Decimal(data[8:14]) / (10 ** PRICE_PRECISION)
self.total_price = Decimal(data[15:21]) / (10 ** PRICE_PRECISION)
@implementer(IScale)
class MicP15(SerialBase):
CMD_PREFIX = "\x05"
EOL_DELIMIT = chr(ETX)
model_name = "Micheletti P15"
def __init__(self, device, consts=None):
SerialBase.__init__(self, device)
self._package = None
def _get_package(self):
# PESO, PRECO, TOTAL
# b'\x02 0800 99999 79999\x03'
<|code_end|>
, determine the next line of code. You have imports:
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.exceptions import InvalidReply
from stoqdrivers.interfaces import IScale, IScaleInfo
from stoqdrivers.serialbase import SerialBase, SerialPort
and context (class names, function names, or code) available:
# Path: stoqdrivers/exceptions.py
# class InvalidReply(DriverError):
# "Invalid reply received"
#
# Path: stoqdrivers/interfaces.py
# class IScale(IDevice):
# """ This interface describes how to interacts with scales.
# """
#
# def read_data():
# """ Read informations of the scale, returning an object
# that implements IScaleInfo interface.
# """
#
# class IScaleInfo(Interface):
# """ This interface list the data read by the scale """
# weight = Attribute("The weight read")
# price_per_kg = Attribute("The KG read")
# total_price = Attribute("The total price. It is equivalent to "
# "price_per_kg * weight")
# code = Attribute("The product code")
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
#
# class SerialPort(Serial):
#
# def __init__(self, device, baudrate=9600):
# # WARNING: Never change these default options, some drivers are based
# # on this to work. Maybe we should change this and make all the driver
# # specify its options, but right now I think that's ok, since these
# # options are common to most of the drivers.
# Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
# parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
# write_timeout=3)
# self.setDTR(True)
# self.flushInput()
# self.flushOutput()
. Output only the next line. | reply = self.writeline('') |
Based on the snippet: <|code_start|> def __init__(self, device, consts=None):
SerialBase.__init__(self, device)
self._package = None
def _get_package(self):
# PESO, PRECO, TOTAL
# b'\x02 0800 99999 79999\x03'
reply = self.writeline('')
# The sum is just because readline (called internally by writeline)
# remove the EOL_DELIMIT from the package received and we need send
# to Package's constructor the whole data.
return Package(reply + MicP15.EOL_DELIMIT)
#
# IScale implementation
#
def read_data(self):
return self._get_package()
if __name__ == "__main__":
port = SerialPort('/dev/ttyS0')
r = MicP15(port)
data = r.read_data()
print("WEIGHT:", data.weight)
print("PRICE BY KG:", data.price_per_kg)
print("TOTAL PRICE:", data.total_price)
<|code_end|>
, predict the immediate next line with the help of imports:
from decimal import Decimal
from zope.interface import implementer
from stoqdrivers.exceptions import InvalidReply
from stoqdrivers.interfaces import IScale, IScaleInfo
from stoqdrivers.serialbase import SerialBase, SerialPort
and context (classes, functions, sometimes code) from other files:
# Path: stoqdrivers/exceptions.py
# class InvalidReply(DriverError):
# "Invalid reply received"
#
# Path: stoqdrivers/interfaces.py
# class IScale(IDevice):
# """ This interface describes how to interacts with scales.
# """
#
# def read_data():
# """ Read informations of the scale, returning an object
# that implements IScaleInfo interface.
# """
#
# class IScaleInfo(Interface):
# """ This interface list the data read by the scale """
# weight = Attribute("The weight read")
# price_per_kg = Attribute("The KG read")
# total_price = Attribute("The total price. It is equivalent to "
# "price_per_kg * weight")
# code = Attribute("The product code")
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
#
# class SerialPort(Serial):
#
# def __init__(self, device, baudrate=9600):
# # WARNING: Never change these default options, some drivers are based
# # on this to work. Maybe we should change this and make all the driver
# # specify its options, but right now I think that's ok, since these
# # options are common to most of the drivers.
# Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
# parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
# write_timeout=3)
# self.setDTR(True)
# self.flushInput()
# self.flushOutput()
. Output only the next line. | print("CODE:", data.code) |
Based on the snippet: <|code_start|>#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
#
def main(args):
usage = "usage: %prog [options] command [args]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-t', '--type',
action="store",
dest="type",
default="printers",
help='Device type')
parser.add_option('-b', '--brand',
action="store",
dest="brand",
help='Device brand')
parser.add_option('-m', '--model',
action="store",
dest="model",
help='Device model')
parser.add_option('-p', '--port',
<|code_end|>
, predict the immediate next line with the help of imports:
import optparse
import sys
import pprint
from stoqdrivers.serialbase import SerialPort
from stoqdrivers.utils import get_obj_from_module
and context (classes, functions, sometimes code) from other files:
# Path: stoqdrivers/serialbase.py
# class SerialPort(Serial):
#
# def __init__(self, device, baudrate=9600):
# # WARNING: Never change these default options, some drivers are based
# # on this to work. Maybe we should change this and make all the driver
# # specify its options, but right now I think that's ok, since these
# # options are common to most of the drivers.
# Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
# parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
# write_timeout=3)
# self.setDTR(True)
# self.flushInput()
# self.flushOutput()
#
# Path: stoqdrivers/utils.py
# def get_obj_from_module(module_name, obj_name):
# module = import_module(module_name)
# try:
# return getattr(module, obj_name)
# except AttributeError:
# raise ImportError("Can't find class %s for module %s" % (module_name, module_name))
. Output only the next line. | action="store", |
Based on the snippet: <|code_start|># You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
#
def main(args):
usage = "usage: %prog [options] command [args]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-t', '--type',
action="store",
dest="type",
default="printers",
help='Device type')
parser.add_option('-b', '--brand',
action="store",
dest="brand",
help='Device brand')
parser.add_option('-m', '--model',
action="store",
dest="model",
help='Device model')
parser.add_option('-p', '--port',
action="store",
<|code_end|>
, predict the immediate next line with the help of imports:
import optparse
import sys
import pprint
from stoqdrivers.serialbase import SerialPort
from stoqdrivers.utils import get_obj_from_module
and context (classes, functions, sometimes code) from other files:
# Path: stoqdrivers/serialbase.py
# class SerialPort(Serial):
#
# def __init__(self, device, baudrate=9600):
# # WARNING: Never change these default options, some drivers are based
# # on this to work. Maybe we should change this and make all the driver
# # specify its options, but right now I think that's ok, since these
# # options are common to most of the drivers.
# Serial.__init__(self, device, baudrate=baudrate, bytesize=EIGHTBITS,
# parity=PARITY_NONE, stopbits=STOPBITS_ONE, timeout=3,
# write_timeout=3)
# self.setDTR(True)
# self.flushInput()
# self.flushOutput()
#
# Path: stoqdrivers/utils.py
# def get_obj_from_module(module_name, obj_name):
# module = import_module(module_name)
# try:
# return getattr(module, obj_name)
# except AttributeError:
# raise ImportError("Can't find class %s for module %s" % (module_name, module_name))
. Output only the next line. | dest="port", |
Given the following code snippet before the placeholder: <|code_start|>## Copyright (C) 2016 Stoq Tecnologia <http://stoq.link>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
try:
has_usb = True
except ImportError:
has_usb = False
# Based on python-escpos's escpos.printer.Usb:
#
# https://github.com/python-escpos/python-escpos/blob/master/src/escpos/printer.py
<|code_end|>
, predict the next line using imports from the current file:
import usb.core
import usb.util
from stoqdrivers.exceptions import USBDriverError
from stoqdrivers.utils import str2bytes
and context including class names, function names, and sometimes code from other files:
# Path: stoqdrivers/exceptions.py
# class USBDriverError(DriverError):
# """A USB Error that was raised by StoqDrivers (and not pyusb)"""
#
# Path: stoqdrivers/utils.py
# def str2bytes(text):
# if isinstance(text, bytes):
# return text
# return bytes(ord(i) for i in text)
. Output only the next line. | class UsbBase(object): |
Given the following code snippet before the placeholder: <|code_start|># Author(s): Henrique Romano <henrique@async.com.br>
#
@implementer(IBarcodeReader)
class BaseBarcodeReader(SerialBase):
# Should be defined in subclasses
model_name = None
def __init__(self, port, consts=None):
SerialBase.__init__(self, port)
def get_code(self):
return self.readline()
def get_supported_barcode_readers():
result = {}
for brand, module_names in [('metrologic', ['MC630'])]:
result[brand] = []
for module_name in module_names:
module = import_module("stoqdrivers.readers.barcode.%s.%s" % (brand, module_name))
try:
obj = getattr(module, module_name)
except AttributeError:
raise ImportError("Can't find class %s for module %s" % (module_name, module_name))
if not IBarcodeReader.implementedBy(obj):
<|code_end|>
, predict the next line using imports from the current file:
from importlib import import_module
from zope.interface import implementer
from stoqdrivers.interfaces import IBarcodeReader
from stoqdrivers.serialbase import SerialBase
and context including class names, function names, and sometimes code from other files:
# Path: stoqdrivers/interfaces.py
# class IBarcodeReader(IDevice):
# """ Interface specification describing how to interacts with barcode
# readers.
# """
#
# def get_code():
# """ Returns the code read. Note that this function should be
# called only when there are data received (you can use
# notify_read() to be notified when data was received), or it
# will block in loop waiting the data.
# """
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
. Output only the next line. | raise TypeError("The driver %s %s doesn't implements a " |
Given the following code snippet before the placeholder: <|code_start|># it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
# Author(s): Henrique Romano <henrique@async.com.br>
#
@implementer(IBarcodeReader)
class BaseBarcodeReader(SerialBase):
# Should be defined in subclasses
model_name = None
def __init__(self, port, consts=None):
SerialBase.__init__(self, port)
def get_code(self):
<|code_end|>
, predict the next line using imports from the current file:
from importlib import import_module
from zope.interface import implementer
from stoqdrivers.interfaces import IBarcodeReader
from stoqdrivers.serialbase import SerialBase
and context including class names, function names, and sometimes code from other files:
# Path: stoqdrivers/interfaces.py
# class IBarcodeReader(IDevice):
# """ Interface specification describing how to interacts with barcode
# readers.
# """
#
# def get_code():
# """ Returns the code read. Note that this function should be
# called only when there are data received (you can use
# notify_read() to be notified when data was received), or it
# will block in loop waiting the data.
# """
#
# Path: stoqdrivers/serialbase.py
# class SerialBase(object):
#
# # All commands will have this prefixed
# CMD_PREFIX = '\x1b'
# CMD_SUFFIX = ''
#
# # used by readline()
# EOL_DELIMIT = '\r'
#
# # Most serial printers allow connecting a cash drawer to them. You can then
# # open the drawer, and also check its status. Some models, for instance,
# # the Radiant drawers, use inverted logic to describe whether they are
# # open, specified by this attribute, settable via BaseDevice config.
# inverted_drawer = False
#
# def __init__(self, port):
# self.set_port(port)
#
# def set_port(self, port):
# self._port = port
#
# def get_port(self):
# return self._port
#
# def fileno(self):
# return self._port.fileno()
#
# def writeline(self, data):
# self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
# return self.readline()
#
# def write(self, data):
# # pyserial is expecting bytes but we work with str in stoqdrivers
# data = str2bytes(data)
# log.debug(">>> %r (%d bytes)" % (data, len(data)))
# self._port.write(data)
#
# def read(self, n_bytes):
# # stoqdrivers is expecting str but pyserial will reply with bytes
# data = self._port.read(n_bytes)
# return bytes2str(data)
#
# def readline(self):
# out = ''
# a = 0
# retries = 10
# while True:
# if a > retries:
# raise DriverError(_("Timeout communicating with fiscal "
# "printer"))
#
# c = self.read(1)
# if not c:
# a += 1
# print('take %s' % a)
# continue
# a = 0
# if c == self.EOL_DELIMIT:
# log.debug('<<< %r' % out)
# return out
# out += c
#
# def open(self):
# if not self._port.is_open:
# self._port.open()
#
# def close(self):
# if self._port.is_open:
# # Flush whaterver is pending to write, since port.close() will close it
# # *imediatally*, losing what was pending to write.
# self._port.flush()
# self._port.close()
. Output only the next line. | return self.readline() |
Next line prediction: <|code_start|>## Copyright (C) 2016 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
@implementer(INonFiscalPrinter)
class MP4200TH(MP2100TH):
supported = True
model_name = "Bematech MP4200 TH"
max_characters = 64
<|code_end|>
. Use current file imports:
(from zope.interface import implementer
from stoqdrivers.printers.bematech.MP2100TH import MP2100TH, ESC
from stoqdrivers.interfaces import INonFiscalPrinter)
and context including class names, function names, or small code snippets from other files:
# Path: stoqdrivers/printers/bematech/MP2100TH.py
# SI = '\x0f'
# TXT_BOLD_ON = ESC + 'E'
# TXT_BOLD_OFF = ESC + 'F'
# FONT_REGULAR = ESC + 'H'
# FONT_CONDENSED = ESC + SI
# DOUBLE_HEIGHT_OFF = ESC + 'd0'
# DOUBLE_HEIGHT_ON = ESC + 'd1'
# PAPER_FULL_CUT = ESC + '\x6d'
# CHARSET_MAP = {
# 'cp850': '\x32',
# 'utf8': '\x38',
# }
# class MP2100TH(SerialBase, EscPosMixin):
# def __init__(self, port, consts=None):
# def print_qrcode(self, code):
# def separator(self):
# def open_drawer(self):
# def is_drawer_open(self):
# def set_charset(self, charset='cp850'):
# def print_barcode(self, code):
# def _setup_commandset(self, commset='\x30'):
# def _print_configuration(self):
#
# Path: stoqdrivers/interfaces.py
# class INonFiscalPrinter(IDevice):
# """ Interface used to formatting texts in non fiscal printers.
# """
#
# max_characters = Attribute("The maximum characters per line")
#
# def centralize():
# """ Centralize the text to be sent to coupon. """
#
# def descentralize():
# """ Descentralize the text to be sent to coupon. """
#
# def set_bold():
# """ The sent text will be appear in bold. """
#
# def unset_bold():
# """ Remove the bold option. """
#
# def print_line(data):
# """ Performs a line break to the given text. """
#
# def print_inline(data):
# """ Print a given text in a unique line. """
#
# def print_barcode(code):
# """ Print a barcode representing the given code. """
#
# def print_qrcode(code):
# """ Print a qrcode representing the given code. """
#
# def cut_paper():
# """ Performs a paper cutting. """
. Output only the next line. | def cut_paper(self): |
Given snippet: <|code_start|>## Copyright (C) 2016 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
@implementer(INonFiscalPrinter)
class MP4200TH(MP2100TH):
supported = True
model_name = "Bematech MP4200 TH"
max_characters = 64
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from zope.interface import implementer
from stoqdrivers.printers.bematech.MP2100TH import MP2100TH, ESC
from stoqdrivers.interfaces import INonFiscalPrinter
and context:
# Path: stoqdrivers/printers/bematech/MP2100TH.py
# SI = '\x0f'
# TXT_BOLD_ON = ESC + 'E'
# TXT_BOLD_OFF = ESC + 'F'
# FONT_REGULAR = ESC + 'H'
# FONT_CONDENSED = ESC + SI
# DOUBLE_HEIGHT_OFF = ESC + 'd0'
# DOUBLE_HEIGHT_ON = ESC + 'd1'
# PAPER_FULL_CUT = ESC + '\x6d'
# CHARSET_MAP = {
# 'cp850': '\x32',
# 'utf8': '\x38',
# }
# class MP2100TH(SerialBase, EscPosMixin):
# def __init__(self, port, consts=None):
# def print_qrcode(self, code):
# def separator(self):
# def open_drawer(self):
# def is_drawer_open(self):
# def set_charset(self, charset='cp850'):
# def print_barcode(self, code):
# def _setup_commandset(self, commset='\x30'):
# def _print_configuration(self):
#
# Path: stoqdrivers/interfaces.py
# class INonFiscalPrinter(IDevice):
# """ Interface used to formatting texts in non fiscal printers.
# """
#
# max_characters = Attribute("The maximum characters per line")
#
# def centralize():
# """ Centralize the text to be sent to coupon. """
#
# def descentralize():
# """ Descentralize the text to be sent to coupon. """
#
# def set_bold():
# """ The sent text will be appear in bold. """
#
# def unset_bold():
# """ Remove the bold option. """
#
# def print_line(data):
# """ Performs a line break to the given text. """
#
# def print_inline(data):
# """ Print a given text in a unique line. """
#
# def print_barcode(code):
# """ Print a barcode representing the given code. """
#
# def print_qrcode(code):
# """ Print a qrcode representing the given code. """
#
# def cut_paper():
# """ Performs a paper cutting. """
which might include code, classes, or functions. Output only the next line. | def cut_paper(self): |
Given snippet: <|code_start|># vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2016 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
@implementer(INonFiscalPrinter)
class MP4200TH(MP2100TH):
supported = True
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from zope.interface import implementer
from stoqdrivers.printers.bematech.MP2100TH import MP2100TH, ESC
from stoqdrivers.interfaces import INonFiscalPrinter
and context:
# Path: stoqdrivers/printers/bematech/MP2100TH.py
# SI = '\x0f'
# TXT_BOLD_ON = ESC + 'E'
# TXT_BOLD_OFF = ESC + 'F'
# FONT_REGULAR = ESC + 'H'
# FONT_CONDENSED = ESC + SI
# DOUBLE_HEIGHT_OFF = ESC + 'd0'
# DOUBLE_HEIGHT_ON = ESC + 'd1'
# PAPER_FULL_CUT = ESC + '\x6d'
# CHARSET_MAP = {
# 'cp850': '\x32',
# 'utf8': '\x38',
# }
# class MP2100TH(SerialBase, EscPosMixin):
# def __init__(self, port, consts=None):
# def print_qrcode(self, code):
# def separator(self):
# def open_drawer(self):
# def is_drawer_open(self):
# def set_charset(self, charset='cp850'):
# def print_barcode(self, code):
# def _setup_commandset(self, commset='\x30'):
# def _print_configuration(self):
#
# Path: stoqdrivers/interfaces.py
# class INonFiscalPrinter(IDevice):
# """ Interface used to formatting texts in non fiscal printers.
# """
#
# max_characters = Attribute("The maximum characters per line")
#
# def centralize():
# """ Centralize the text to be sent to coupon. """
#
# def descentralize():
# """ Descentralize the text to be sent to coupon. """
#
# def set_bold():
# """ The sent text will be appear in bold. """
#
# def unset_bold():
# """ Remove the bold option. """
#
# def print_line(data):
# """ Performs a line break to the given text. """
#
# def print_inline(data):
# """ Print a given text in a unique line. """
#
# def print_barcode(code):
# """ Print a barcode representing the given code. """
#
# def print_qrcode(code):
# """ Print a qrcode representing the given code. """
#
# def cut_paper():
# """ Performs a paper cutting. """
which might include code, classes, or functions. Output only the next line. | model_name = "Bematech MP4200 TH" |
Here is a snippet: <|code_start|> backend.barrier()
nsec = timer.median
nbytes = x.nbytes + y.nbytes + A_d._matrix_d.nbytes
gbps = nbytes / nsec * 1e-9
frac = gbps / args.stream * 100
nthreads = backend.get_max_threads()
name = backend.__class__.__name__
print("csrmm %s, %d threads, %d nnz, batch %d, %2.0f GB, %2.2f GB/s, %2.0f%% STREAM" % \
(name, nthreads, len(A.data), args.batch, nbytes/1e9, gbps, frac), flush=True)
def factors(n):
while n > 1:
for i in range(2, n + 1):
if n % i == 0:
n //= i
yield i
break
def apf(x, y, z):
fx = set(factors(x))
fy = set(factors(y))
fz = set(factors(z))
return np.average(list(fx|fy|fz))
def fft_search(args):
shape = (308,208,480)
<|code_end|>
. Write the next line using the current file imports:
import time
import argparse
import numpy as np
import scipy.sparse as spp
from indigo.util import rand64c, randM, Timer
from indigo.backends.cuda import CudaBackend
from indigo.backends.mkl import MklBackend
and context from other files:
# Path: indigo/util.py
# def rand64c(*shape, order='F'):
# def randM(M, N, density):
# def __init__(self, event, **kwargs):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, traceback):
# def fmt(k, v):
# A = A_r + 1j * A_i
# class profile(object):
, which may include functions, classes, or code. Output only the next line. | minosf, maxosf = 1.25, 1.5 |
Given the code snippet: <|code_start|>
XYZ = np.prod(x.shape[:3])
nsec = timer.median / args.trials
nflops = 5 * XYZ * np.log2(XYZ)
nbytes = 4 * x.nbytes
roofline = args.stream*1e9 * (nflops / nbytes)
frac = (nflops / nsec) / roofline * 100
nthreads = backend.get_max_threads()
name = backend.__class__.__name__
print("fft, %s, %d threads, batch %d, %2.2f GFlops/s, %2.0f%% Roofline" % \
(name, nthreads, args.batch, nflops/nsec/1e9, frac), flush=True)
def benchmark_csrmm(backend, args):
N = 150 # problem scale ~ image edge length
XYZ = N**3 # number of columns
pXYZ = int(8 * N**3 * 1.35**3) # number of rows (8 coils, 1.35 oversampling factor)
# make one nonzero per row along diagonal
indptrs = np.arange(pXYZ+1, dtype=np.int32)
indices = np.arange(pXYZ, dtype=np.int32) % XYZ
data = np.ones(pXYZ, dtype=np.complex64)
A = spp.csr_matrix((data,indices,indptrs), shape=(pXYZ,XYZ), dtype=np.complex64)
x = rand64c( XYZ,args.batch)
y = rand64c(pXYZ,args.batch)
A_d = backend.SpMatrix(A)
x_d = backend.copy_array(x)
<|code_end|>
, generate the next line using the imports in this file:
import time
import argparse
import numpy as np
import scipy.sparse as spp
from indigo.util import rand64c, randM, Timer
from indigo.backends.cuda import CudaBackend
from indigo.backends.mkl import MklBackend
and context (functions, classes, or occasionally code) from other files:
# Path: indigo/util.py
# def rand64c(*shape, order='F'):
# def randM(M, N, density):
# def __init__(self, event, **kwargs):
# def __enter__(self):
# def __exit__(self, exc_type, exc_value, traceback):
# def fmt(k, v):
# A = A_r + 1j * A_i
# class profile(object):
. Output only the next line. | y_d = backend.copy_array(y) |
Continue the code snippet: <|code_start|>#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
__all__ = ['evaluations', 'svm_load_model', 'svm_predict', 'svm_read_problem',
'svm_save_model', 'svm_train'] + svm_all
sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path
<|code_end|>
. Use current file imports:
from builtins import str, bytes, dict, int
from builtins import object, range
from builtins import map, zip, filter
from .libsvm import *
from .libsvm import __all__ as svm_all
import os
import sys
and context (classes, functions, or code) from other files:
# Path: pattern/vector/svm/libsvm.py
# C_SVC = 0
# NU_SVC = 1
# ONE_CLASS = 2
# EPSILON_SVR = 3
# NU_SVR = 4
# LINEAR = 0
# POLY = 1
# RBF = 2
# SIGMOID = 3
# PRECOMPUTED = 4
# PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
# def print_null(s):
# def genFields(names, types):
# def fillprototype(f, restype, argtypes):
# def __str__(self):
# def gen_svm_nodearray(xi, feature_max=None, isKernel=None):
# def __init__(self, y, x, isKernel=None):
# def __init__(self, options = None):
# def __str__(self):
# def set_to_default_values(self):
# def parse_options(self, options):
# def __init__(self):
# def __del__(self):
# def get_svm_type(self):
# def get_nr_class(self):
# def get_svr_probability(self):
# def get_labels(self):
# def get_sv_indices(self):
# def get_nr_sv(self):
# def is_probability_model(self):
# def get_sv_coef(self):
# def get_SV(self):
# def toPyModel(model_ptr):
# class svm_node(Structure):
# class svm_problem(Structure):
# class svm_parameter(Structure):
# class svm_model(Structure):
. Output only the next line. | def svm_read_problem(data_file_name): |
Using the snippet: <|code_start|>
# Module pattern.graph.commonsense implements a semantic network of commonsense.
# It contains a Concept class (Node subclass), Relation class (Edge subclass),
# and a Commonsense class (Graph subclass).
# It contains about 10,000 manually annotated relations between mundane concepts,
# for example gondola is-related-to romance, or spoon is-related-to soup.
# This is the PERCEPTION dataset. See the visualizer at:
# http://nodebox.net/perception/
# Relation.type can be:
# - is-a,
# - is-part-of,
# - is-opposite-of,
# - is-property-of,
# - is-related-to,
# - is-same-as,
# - is-effect-of.
g = Commonsense()
g.add_node("spork")
g.add_edge("spork", "spoon", type="is-a")
# Concept.halo a list of concepts surrounding the given concept,
# and as such reinforce its meaning:
print()
print(g["spoon"].halo) # fork, etiquette, slurp, hot, soup, mouth, etc.
# Concept.properties is a list of properties (= adjectives) in the halo,
# sorted by betweenness centrality:
print()
<|code_end|>
, determine the next line of code. You have imports:
from builtins import str, bytes, dict, int
from pattern.graph.commonsense import Commonsense
import os
import sys
and context (class names, function names, or code) available:
# Path: pattern/graph/commonsense.py
# class Commonsense(Graph):
#
# def __init__(self, data=os.path.join(MODULE, "commonsense.csv"), **kwargs):
# """ A semantic network of commonsense, using different relation types:
# - is-a,
# - is-part-of,
# - is-opposite-of,
# - is-property-of,
# - is-related-to,
# - is-same-as,
# - is-effect-of.
# """
# Graph.__init__(self, **kwargs)
# self._properties = None
# # Load data from the given path,
# # a CSV-file of (concept1, relation, concept2, context, weight)-items.
# if data is not None:
# s = open(data, encoding = 'utf-8').read()
# s = s.strip(BOM_UTF8)
# s = ((v.strip("\"") for v in r.split(",")) for r in s.splitlines())
# for concept1, relation, concept2, context, weight in s:
# self.add_edge(concept1, concept2,
# type = relation,
# context = context,
# weight = min(int(weight) * 0.1, 1.0))
#
# @property
# def concepts(self):
# return self.nodes
#
# @property
# def relations(self):
# return self.edges
#
# @property
# def properties(self):
# """ Yields all concepts that are properties (i.e., adjectives).
# For example: "cold is-property-of winter" => "cold".
# """
# if self._properties is None:
# #self._properties = set(e.node1.id for e in self.edges if e.type == "is-property-of")
# self._properties = (e for e in self.edges if e.context == "properties")
# self._properties = set(chain(*((e.node1.id, e.node2.id) for e in self._properties)))
# return self._properties
#
# def add_node(self, id, *args, **kwargs):
# """ Returns a Concept (Node subclass).
# """
# self._properties = None
# kwargs.setdefault("base", Concept)
# return Graph.add_node(self, id, *args, **kwargs)
#
# def add_edge(self, id1, id2, *args, **kwargs):
# """ Returns a Relation between two concepts (Edge subclass).
# """
# self._properties = None
# kwargs.setdefault("base", Relation)
# return Graph.add_edge(self, id1, id2, *args, **kwargs)
#
# def remove(self, x):
# self._properties = None
# Graph.remove(self, x)
#
# def similarity(self, concept1, concept2, k=3, heuristic=COMMONALITY):
# """ Returns the similarity of the given concepts,
# by cross-comparing shortest path distance between k concept properties.
# A given concept can also be a flat list of properties, e.g. ["creepy"].
# The given heuristic is a tuple of two functions:
# 1) function(concept) returns a list of salient properties,
# 2) function(edge) returns the cost for traversing this edge (0.0-1.0).
# """
# if isinstance(concept1, str):
# concept1 = self[concept1]
# if isinstance(concept2, str):
# concept2 = self[concept2]
# if isinstance(concept1, Node):
# concept1 = heuristic[0](concept1)
# if isinstance(concept2, Node):
# concept2 = heuristic[0](concept2)
# if isinstance(concept1, list):
# concept1 = [isinstance(n, Node) and n or self[n] for n in concept1]
# if isinstance(concept2, list):
# concept2 = [isinstance(n, Node) and n or self[n] for n in concept2]
# h = lambda id1, id2: heuristic[1](self.edge(id1, id2))
# w = 0.0
# for p1 in concept1[:k]:
# for p2 in concept2[:k]:
# p = self.shortest_path(p1, p2, heuristic=h)
# w += 1.0 / (p is None and 1e10 or len(p))
# return w / k
#
# def nearest_neighbors(self, concept, concepts=[], k=3):
# """ Returns the k most similar concepts from the given list.
# """
# return sorted(concepts, key=lambda candidate: self.similarity(concept, candidate, k), reverse=True)
#
# similar = neighbors = nn = nearest_neighbors
#
# def taxonomy(self, concept, depth=3, fringe=2):
# """ Returns a list of concepts that are descendants of the given concept, using "is-a" relations.
# Creates a subgraph of "is-a" related concepts up to the given depth,
# then takes the fringe (i.e., leaves) of the subgraph.
# """
# def traversable(node, edge):
# # Follow parent-child edges.
# return edge.node2 == node and edge.type == "is-a"
# if not isinstance(concept, Node):
# concept = self[concept]
# g = self.copy(nodes=concept.flatten(depth, traversable))
# g = g.fringe(depth=fringe)
# g = [self[n.id] for n in g if n != concept]
# return g
#
# field = semantic_field = taxonomy
. Output only the next line. | print(g["spoon"].properties) # hot |
Next line prediction: <|code_start|> "three" : 3, "thirteen" : 13, "fifty" : 50,
"four" : 4, "fourteen" : 14, "sixty" : 60,
"five" : 5, "fifteen" : 15, "seventy" : 70,
"six" : 6, "sixteen" : 16, "eighty" : 80,
"seven" : 7, "seventeen" : 17, "ninety" : 90,
"eight" : 8, "eighteen" : 18,
"nine" : 9, "nineteen" : 19
}
NUMERALS_INVERSE = dict((i, w) for w, i in NUMERALS.items()) # 0 => "zero"
NUMERALS_VERBOSE = {
"half" : ( 1, 0.5),
"dozen" : (12, 0.0),
"score" : (20, 0.0)
}
ORDER = ["hundred", "thousand"] + [m + "illion" for m in ("m", "b", "tr",
"quadr",
"quint",
"sext",
"sept",
"oct",
"non",
"dec",
"undec",
"duodec",
"tredec",
"quattuordec",
"quindec",
"sexdec",
<|code_end|>
. Use current file imports:
(from builtins import str, bytes, dict, int
from builtins import map, zip, filter
from builtins import object, range
from math import log, ceil
from pattern.text.en.inflect import pluralize, referenced
import os
import sys
import re)
and context including class names, function names, or small code snippets from other files:
# Path: pattern/text/en/inflect.py
# def pluralize(word, pos=NOUN, custom={}, classical=True):
# """ Returns the plural of a given word, e.g., child => children.
# Handles nouns and adjectives, using classical inflection by default
# (i.e., where "matrix" pluralizes to "matrices" and not "matrixes").
# The custom dictionary is for user-defined replacements.
# """
# if word in custom:
# return custom[word]
# # Recurse genitives.
# # Remove the apostrophe and any trailing -s,
# # form the plural of the resultant noun, and then append an apostrophe (dog's => dogs').
# if word.endswith(("'", "'s")):
# w = word.rstrip("'s")
# w = pluralize(w, pos, custom, classical)
# if w.endswith("s"):
# return w + "'"
# else:
# return w + "'s"
# # Recurse compound words
# # (e.g., Postmasters General, mothers-in-law, Roman deities).
# w = word.replace("-", " ").split(" ")
# if len(w) > 1:
# if w[1] == "general" or \
# w[1] == "General" and \
# w[0] not in plural_categories["general-generals"]:
# return word.replace(w[0], pluralize(w[0], pos, custom, classical))
# elif w[1] in plural_prepositions:
# return word.replace(w[0], pluralize(w[0], pos, custom, classical))
# else:
# return word.replace(w[-1], pluralize(w[-1], pos, custom, classical))
# # Only a very few number of adjectives inflect.
# n = range(len(plural_rules))
# if pos.startswith(ADJECTIVE):
# n = [0, 1]
# # Apply pluralization rules.
# for i in n:
# for suffix, inflection, category, classic in plural_rules[i]:
# # A general rule, or a classic rule in classical mode.
# if category is None:
# if not classic or (classic and classical):
# if suffix.search(word) is not None:
# return suffix.sub(inflection, word)
# # A rule pertaining to a specific category of words.
# if category is not None:
# if word in plural_categories[category] and (not classic or (classic and classical)):
# if suffix.search(word) is not None:
# return suffix.sub(inflection, word)
# return word
#
# def referenced(word, article=INDEFINITE):
# """ Returns a string with the article + the word.
# """
# return "%s %s" % (_article(word, article), word)
. Output only the next line. | "septemdec", |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
router = Router()
def func_a(): pass
func_b = copy_func(func_a, 'func_b')
func_c = copy_func(func_a, 'func_c')
func_d = copy_func(func_a, 'func_d')
func_e = copy_func(func_a, 'func_e')
func_f = copy_func(func_a, 'func_f')
func_g = copy_func(func_a, 'func_g')
func_h = copy_func(func_a, 'func_h')
func_i = copy_func(func_a, 'func_i')
func_j = copy_func(func_a, 'func_j')
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from bustard.router import Router
from .utils import copy_func
and context (functions, classes, or occasionally code) from other files:
# Path: bustard/router.py
# class Router:
# def __init__(self):
# self._urls_regex_map = {}
# self._urls_builer_map = {}
#
# def register(self, path, func, methods=None):
# url_builder = URLBuilder(path)
# url_match_re = re.compile(url_builder.url_regex)
#
# methods = set([x.upper() for x in methods or ['GET']])
# if 'GET' in methods and 'HEAD' not in methods:
# methods.add('HEAD')
#
# FuncPair = collections.namedtuple('FuncPair', ('func', 'methods'))
# self._urls_regex_map[url_match_re] = FuncPair(func, methods)
# self._urls_builer_map[url_builder] = FuncPair(func, methods)
#
# def get_func(self, path):
# """
# :return: (func, methods)
# """
# for url_match, func_pair in self._urls_regex_map.items():
# m = url_match.match(path)
# if m is not None:
# return func_pair.func, func_pair.methods, m.groupdict()
# return None, None, None
#
# def url_for(self, func_name, **kwargs):
# for url_builder, func_pair in self._urls_builer_map.items():
# func = func_pair.func
# if func.__name__ == func_name:
# return url_builder.build_url(**kwargs)
# return ''
#
# Path: tests/utils.py
# def copy_func(func, name=None):
# new_func = types.FunctionType(
# func.__code__, func.__globals__,
# name or func.__name__,
# func.__defaults__, func.__closure__
# )
# new_func.__dict__.update(func.__dict__)
# return new_func
. Output only the next line. | router.register('/a', func_a, methods=['GET', 'POST']) |
Based on the snippet: <|code_start|>router.register('/e/(?P<id>\d+)', func_e, methods=['POST'])
router.register('/f/(?P<id>\d+)/(?P<code>\w+)', func_f, methods=['GET'])
# /<int:id>
router.register('/g/<id>', func_g, methods=['GET', 'POST'])
router.register('/h/<int:id>', func_h, methods=['GET', 'PUT'])
router.register('/i/<float:id>', func_i, methods=['GET', 'POST'])
router.register('/j/<path:path>', func_j, methods=['PUT', 'POST'])
@pytest.mark.parametrize('path, func_name, methods, kwargs', [
# /path
('/a', 'func_a', {'GET', 'POST', 'HEAD'}, {}),
('/a/b', None, None, None),
('/b/c/', 'func_b', {'DELETE', 'POST'}, {}),
('/b/c/d', None, None, None),
('/c/d/f', 'func_c', {'PATCH', 'PUT'}, {}),
('/c/d/g', None, None, None),
# regex
('/d/1', 'func_d', {'GET', 'HEAD'}, {'id': '1'}),
('/d/a', None, None, None),
('/e/2', 'func_e', {'POST'}, {'id': '2'}),
('/e/e', None, None, None),
('/f/3/c', 'func_f', {'GET', 'HEAD'}, {'id': '3', 'code': 'c'}),
('/f/3/c/d', None, None, None),
# /<int:id>, /<float:id>, /<path:path>
('/g/e', 'func_g', {'GET', 'POST', 'HEAD'}, {'id': 'e'}),
('/h/8', 'func_h', {'GET', 'PUT', 'HEAD'}, {'id': '8'}),
('/h/a', None, None, None),
('/i/2.3', 'func_i', {'GET', 'POST', 'HEAD'}, {'id': '2.3'}),
('/i/a', None, None, None),
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from bustard.router import Router
from .utils import copy_func
and context (classes, functions, sometimes code) from other files:
# Path: bustard/router.py
# class Router:
# def __init__(self):
# self._urls_regex_map = {}
# self._urls_builer_map = {}
#
# def register(self, path, func, methods=None):
# url_builder = URLBuilder(path)
# url_match_re = re.compile(url_builder.url_regex)
#
# methods = set([x.upper() for x in methods or ['GET']])
# if 'GET' in methods and 'HEAD' not in methods:
# methods.add('HEAD')
#
# FuncPair = collections.namedtuple('FuncPair', ('func', 'methods'))
# self._urls_regex_map[url_match_re] = FuncPair(func, methods)
# self._urls_builer_map[url_builder] = FuncPair(func, methods)
#
# def get_func(self, path):
# """
# :return: (func, methods)
# """
# for url_match, func_pair in self._urls_regex_map.items():
# m = url_match.match(path)
# if m is not None:
# return func_pair.func, func_pair.methods, m.groupdict()
# return None, None, None
#
# def url_for(self, func_name, **kwargs):
# for url_builder, func_pair in self._urls_builer_map.items():
# func = func_pair.func
# if func.__name__ == func_name:
# return url_builder.build_url(**kwargs)
# return ''
#
# Path: tests/utils.py
# def copy_func(func, name=None):
# new_func = types.FunctionType(
# func.__code__, func.__globals__,
# name or func.__name__,
# func.__defaults__, func.__closure__
# )
# new_func.__dict__.update(func.__dict__)
# return new_func
. Output only the next line. | ('/j/a/b/c/', 'func_j', {'PUT', 'POST'}, {'path': 'a/b/c/'}), |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
class Request:
def __init__(self, environ):
self.environ = environ
<|code_end|>
. Use current file imports:
(import cgi
import io
import json
from http.cookies import SimpleCookie
from .constants import HTTP_STATUS_CODES
from .utils import (
json_dumps_default, MultiDict, parse_query_string,
to_header_key, to_text, to_bytes, parse_basic_auth_header
))
and context including class names, function names, or small code snippets from other files:
# Path: bustard/constants.py
# HTTP_STATUS_CODES = {
# 100: 'Continue',
# 101: 'Switching Protocols',
# 102: 'Processing',
# 200: 'OK',
# 201: 'Created',
# 202: 'Accepted',
# 203: 'Non Authoritative Information',
# 204: 'No Content',
# 205: 'Reset Content',
# 206: 'Partial Content',
# 207: 'Multi Status',
# 226: 'IM Used', # see RFC 3229
# 300: 'Multiple Choices',
# 301: 'Moved Permanently',
# 302: 'Found',
# 303: 'See Other',
# 304: 'Not Modified',
# 305: 'Use Proxy',
# 307: 'Temporary Redirect',
# 400: 'Bad Request',
# 401: 'Unauthorized',
# 402: 'Payment Required', # unused
# 403: 'Forbidden',
# 404: 'Not Found',
# 405: 'Method Not Allowed',
# 406: 'Not Acceptable',
# 407: 'Proxy Authentication Required',
# 408: 'Request Timeout',
# 409: 'Conflict',
# 410: 'Gone',
# 411: 'Length Required',
# 412: 'Precondition Failed',
# 413: 'Request Entity Too Large',
# 414: 'Request URI Too Long',
# 415: 'Unsupported Media Type',
# 416: 'Requested Range Not Satisfiable',
# 417: 'Expectation Failed',
# 418: 'I\'m a teapot', # see RFC 2324
# 422: 'Unprocessable Entity',
# 423: 'Locked',
# 424: 'Failed Dependency',
# 426: 'Upgrade Required',
# 428: 'Precondition Required', # see RFC 6585
# 429: 'Too Many Requests',
# 431: 'Request Header Fields Too Large',
# 449: 'Retry With', # proprietary MS extension
# 451: 'Unavailable For Legal Reasons',
# 500: 'Internal Server Error',
# 501: 'Not Implemented',
# 502: 'Bad Gateway',
# 503: 'Service Unavailable',
# 504: 'Gateway Timeout',
# 505: 'HTTP Version Not Supported',
# 507: 'Insufficient Storage',
# 510: 'Not Extended'
# }
#
# Path: bustard/utils.py
# def json_dumps_default(obj):
# if isinstance(obj, collections.UserDict):
# return obj.to_dict()
# return obj
#
# class MultiDict(collections.UserDict):
#
# def getlist(self, key):
# return self.data[key]
#
# def to_dict(self):
# return self.data
#
# def __getitem__(self, key):
# return self.data[key][-1]
#
# def __setitem__(self, key, value):
# if isinstance(value, (list, tuple)):
# self.data[key] = list(value)
# else:
# self.data[key] = [value]
#
# def __repr__(self):
# return '{}({})'.format(self.__class__.__name__, self.data)
#
# def parse_query_string(query_string, encoding='utf-8'):
# query_dict = collections.defaultdict(list)
# for query_item in query_string.split('&'):
# if '=' not in query_item:
# continue
# keyword, value = query_item.split('=', 1)
# value = urllib.parse.unquote_plus(value)
# query_dict[keyword].append(to_text(value, encoding=encoding))
# return query_dict
#
# def to_header_key(key):
# return '-'.join(x.capitalize() for x in key.split('-'))
#
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
#
# def to_bytes(bt, encoding='utf-8'):
# if isinstance(bt, collections.ByteString):
# return bt
# elif isinstance(bt, str):
# return bt.encode(encoding)
# else:
# return bytes(bt)
#
# def parse_basic_auth_header(value):
# try:
# auth_type, auth_info = to_bytes(value).split(None, 1)
# except ValueError:
# return
# auth_type = auth_type.lower()
#
# if auth_type == b'basic':
# try:
# username, password = base64.b64decode(auth_info).split(b':', 1)
# except (binascii.Error, ValueError):
# return
#
# return Authorization(
# to_text(auth_type),
# username=to_text(username),
# password=to_text(password)
# )
. Output only the next line. | @property |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
class Request:
def __init__(self, environ):
self.environ = environ
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import cgi
import io
import json
from http.cookies import SimpleCookie
from .constants import HTTP_STATUS_CODES
from .utils import (
json_dumps_default, MultiDict, parse_query_string,
to_header_key, to_text, to_bytes, parse_basic_auth_header
)
and context:
# Path: bustard/constants.py
# HTTP_STATUS_CODES = {
# 100: 'Continue',
# 101: 'Switching Protocols',
# 102: 'Processing',
# 200: 'OK',
# 201: 'Created',
# 202: 'Accepted',
# 203: 'Non Authoritative Information',
# 204: 'No Content',
# 205: 'Reset Content',
# 206: 'Partial Content',
# 207: 'Multi Status',
# 226: 'IM Used', # see RFC 3229
# 300: 'Multiple Choices',
# 301: 'Moved Permanently',
# 302: 'Found',
# 303: 'See Other',
# 304: 'Not Modified',
# 305: 'Use Proxy',
# 307: 'Temporary Redirect',
# 400: 'Bad Request',
# 401: 'Unauthorized',
# 402: 'Payment Required', # unused
# 403: 'Forbidden',
# 404: 'Not Found',
# 405: 'Method Not Allowed',
# 406: 'Not Acceptable',
# 407: 'Proxy Authentication Required',
# 408: 'Request Timeout',
# 409: 'Conflict',
# 410: 'Gone',
# 411: 'Length Required',
# 412: 'Precondition Failed',
# 413: 'Request Entity Too Large',
# 414: 'Request URI Too Long',
# 415: 'Unsupported Media Type',
# 416: 'Requested Range Not Satisfiable',
# 417: 'Expectation Failed',
# 418: 'I\'m a teapot', # see RFC 2324
# 422: 'Unprocessable Entity',
# 423: 'Locked',
# 424: 'Failed Dependency',
# 426: 'Upgrade Required',
# 428: 'Precondition Required', # see RFC 6585
# 429: 'Too Many Requests',
# 431: 'Request Header Fields Too Large',
# 449: 'Retry With', # proprietary MS extension
# 451: 'Unavailable For Legal Reasons',
# 500: 'Internal Server Error',
# 501: 'Not Implemented',
# 502: 'Bad Gateway',
# 503: 'Service Unavailable',
# 504: 'Gateway Timeout',
# 505: 'HTTP Version Not Supported',
# 507: 'Insufficient Storage',
# 510: 'Not Extended'
# }
#
# Path: bustard/utils.py
# def json_dumps_default(obj):
# if isinstance(obj, collections.UserDict):
# return obj.to_dict()
# return obj
#
# class MultiDict(collections.UserDict):
#
# def getlist(self, key):
# return self.data[key]
#
# def to_dict(self):
# return self.data
#
# def __getitem__(self, key):
# return self.data[key][-1]
#
# def __setitem__(self, key, value):
# if isinstance(value, (list, tuple)):
# self.data[key] = list(value)
# else:
# self.data[key] = [value]
#
# def __repr__(self):
# return '{}({})'.format(self.__class__.__name__, self.data)
#
# def parse_query_string(query_string, encoding='utf-8'):
# query_dict = collections.defaultdict(list)
# for query_item in query_string.split('&'):
# if '=' not in query_item:
# continue
# keyword, value = query_item.split('=', 1)
# value = urllib.parse.unquote_plus(value)
# query_dict[keyword].append(to_text(value, encoding=encoding))
# return query_dict
#
# def to_header_key(key):
# return '-'.join(x.capitalize() for x in key.split('-'))
#
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
#
# def to_bytes(bt, encoding='utf-8'):
# if isinstance(bt, collections.ByteString):
# return bt
# elif isinstance(bt, str):
# return bt.encode(encoding)
# else:
# return bytes(bt)
#
# def parse_basic_auth_header(value):
# try:
# auth_type, auth_info = to_bytes(value).split(None, 1)
# except ValueError:
# return
# auth_type = auth_type.lower()
#
# if auth_type == b'basic':
# try:
# username, password = base64.b64decode(auth_info).split(b':', 1)
# except (binascii.Error, ValueError):
# return
#
# return Authorization(
# to_text(auth_type),
# username=to_text(username),
# password=to_text(password)
# )
which might include code, classes, or functions. Output only the next line. | @property |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
class Request:
def __init__(self, environ):
self.environ = environ
<|code_end|>
using the current file's imports:
import cgi
import io
import json
from http.cookies import SimpleCookie
from .constants import HTTP_STATUS_CODES
from .utils import (
json_dumps_default, MultiDict, parse_query_string,
to_header_key, to_text, to_bytes, parse_basic_auth_header
)
and any relevant context from other files:
# Path: bustard/constants.py
# HTTP_STATUS_CODES = {
# 100: 'Continue',
# 101: 'Switching Protocols',
# 102: 'Processing',
# 200: 'OK',
# 201: 'Created',
# 202: 'Accepted',
# 203: 'Non Authoritative Information',
# 204: 'No Content',
# 205: 'Reset Content',
# 206: 'Partial Content',
# 207: 'Multi Status',
# 226: 'IM Used', # see RFC 3229
# 300: 'Multiple Choices',
# 301: 'Moved Permanently',
# 302: 'Found',
# 303: 'See Other',
# 304: 'Not Modified',
# 305: 'Use Proxy',
# 307: 'Temporary Redirect',
# 400: 'Bad Request',
# 401: 'Unauthorized',
# 402: 'Payment Required', # unused
# 403: 'Forbidden',
# 404: 'Not Found',
# 405: 'Method Not Allowed',
# 406: 'Not Acceptable',
# 407: 'Proxy Authentication Required',
# 408: 'Request Timeout',
# 409: 'Conflict',
# 410: 'Gone',
# 411: 'Length Required',
# 412: 'Precondition Failed',
# 413: 'Request Entity Too Large',
# 414: 'Request URI Too Long',
# 415: 'Unsupported Media Type',
# 416: 'Requested Range Not Satisfiable',
# 417: 'Expectation Failed',
# 418: 'I\'m a teapot', # see RFC 2324
# 422: 'Unprocessable Entity',
# 423: 'Locked',
# 424: 'Failed Dependency',
# 426: 'Upgrade Required',
# 428: 'Precondition Required', # see RFC 6585
# 429: 'Too Many Requests',
# 431: 'Request Header Fields Too Large',
# 449: 'Retry With', # proprietary MS extension
# 451: 'Unavailable For Legal Reasons',
# 500: 'Internal Server Error',
# 501: 'Not Implemented',
# 502: 'Bad Gateway',
# 503: 'Service Unavailable',
# 504: 'Gateway Timeout',
# 505: 'HTTP Version Not Supported',
# 507: 'Insufficient Storage',
# 510: 'Not Extended'
# }
#
# Path: bustard/utils.py
# def json_dumps_default(obj):
# if isinstance(obj, collections.UserDict):
# return obj.to_dict()
# return obj
#
# class MultiDict(collections.UserDict):
#
# def getlist(self, key):
# return self.data[key]
#
# def to_dict(self):
# return self.data
#
# def __getitem__(self, key):
# return self.data[key][-1]
#
# def __setitem__(self, key, value):
# if isinstance(value, (list, tuple)):
# self.data[key] = list(value)
# else:
# self.data[key] = [value]
#
# def __repr__(self):
# return '{}({})'.format(self.__class__.__name__, self.data)
#
# def parse_query_string(query_string, encoding='utf-8'):
# query_dict = collections.defaultdict(list)
# for query_item in query_string.split('&'):
# if '=' not in query_item:
# continue
# keyword, value = query_item.split('=', 1)
# value = urllib.parse.unquote_plus(value)
# query_dict[keyword].append(to_text(value, encoding=encoding))
# return query_dict
#
# def to_header_key(key):
# return '-'.join(x.capitalize() for x in key.split('-'))
#
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
#
# def to_bytes(bt, encoding='utf-8'):
# if isinstance(bt, collections.ByteString):
# return bt
# elif isinstance(bt, str):
# return bt.encode(encoding)
# else:
# return bytes(bt)
#
# def parse_basic_auth_header(value):
# try:
# auth_type, auth_info = to_bytes(value).split(None, 1)
# except ValueError:
# return
# auth_type = auth_type.lower()
#
# if auth_type == b'basic':
# try:
# username, password = base64.b64decode(auth_info).split(b':', 1)
# except (binascii.Error, ValueError):
# return
#
# return Authorization(
# to_text(auth_type),
# username=to_text(username),
# password=to_text(password)
# )
. Output only the next line. | @property |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
class Request:
def __init__(self, environ):
self.environ = environ
@property
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import cgi
import io
import json
from http.cookies import SimpleCookie
from .constants import HTTP_STATUS_CODES
from .utils import (
json_dumps_default, MultiDict, parse_query_string,
to_header_key, to_text, to_bytes, parse_basic_auth_header
)
and context:
# Path: bustard/constants.py
# HTTP_STATUS_CODES = {
# 100: 'Continue',
# 101: 'Switching Protocols',
# 102: 'Processing',
# 200: 'OK',
# 201: 'Created',
# 202: 'Accepted',
# 203: 'Non Authoritative Information',
# 204: 'No Content',
# 205: 'Reset Content',
# 206: 'Partial Content',
# 207: 'Multi Status',
# 226: 'IM Used', # see RFC 3229
# 300: 'Multiple Choices',
# 301: 'Moved Permanently',
# 302: 'Found',
# 303: 'See Other',
# 304: 'Not Modified',
# 305: 'Use Proxy',
# 307: 'Temporary Redirect',
# 400: 'Bad Request',
# 401: 'Unauthorized',
# 402: 'Payment Required', # unused
# 403: 'Forbidden',
# 404: 'Not Found',
# 405: 'Method Not Allowed',
# 406: 'Not Acceptable',
# 407: 'Proxy Authentication Required',
# 408: 'Request Timeout',
# 409: 'Conflict',
# 410: 'Gone',
# 411: 'Length Required',
# 412: 'Precondition Failed',
# 413: 'Request Entity Too Large',
# 414: 'Request URI Too Long',
# 415: 'Unsupported Media Type',
# 416: 'Requested Range Not Satisfiable',
# 417: 'Expectation Failed',
# 418: 'I\'m a teapot', # see RFC 2324
# 422: 'Unprocessable Entity',
# 423: 'Locked',
# 424: 'Failed Dependency',
# 426: 'Upgrade Required',
# 428: 'Precondition Required', # see RFC 6585
# 429: 'Too Many Requests',
# 431: 'Request Header Fields Too Large',
# 449: 'Retry With', # proprietary MS extension
# 451: 'Unavailable For Legal Reasons',
# 500: 'Internal Server Error',
# 501: 'Not Implemented',
# 502: 'Bad Gateway',
# 503: 'Service Unavailable',
# 504: 'Gateway Timeout',
# 505: 'HTTP Version Not Supported',
# 507: 'Insufficient Storage',
# 510: 'Not Extended'
# }
#
# Path: bustard/utils.py
# def json_dumps_default(obj):
# if isinstance(obj, collections.UserDict):
# return obj.to_dict()
# return obj
#
# class MultiDict(collections.UserDict):
#
# def getlist(self, key):
# return self.data[key]
#
# def to_dict(self):
# return self.data
#
# def __getitem__(self, key):
# return self.data[key][-1]
#
# def __setitem__(self, key, value):
# if isinstance(value, (list, tuple)):
# self.data[key] = list(value)
# else:
# self.data[key] = [value]
#
# def __repr__(self):
# return '{}({})'.format(self.__class__.__name__, self.data)
#
# def parse_query_string(query_string, encoding='utf-8'):
# query_dict = collections.defaultdict(list)
# for query_item in query_string.split('&'):
# if '=' not in query_item:
# continue
# keyword, value = query_item.split('=', 1)
# value = urllib.parse.unquote_plus(value)
# query_dict[keyword].append(to_text(value, encoding=encoding))
# return query_dict
#
# def to_header_key(key):
# return '-'.join(x.capitalize() for x in key.split('-'))
#
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
#
# def to_bytes(bt, encoding='utf-8'):
# if isinstance(bt, collections.ByteString):
# return bt
# elif isinstance(bt, str):
# return bt.encode(encoding)
# else:
# return bytes(bt)
#
# def parse_basic_auth_header(value):
# try:
# auth_type, auth_info = to_bytes(value).split(None, 1)
# except ValueError:
# return
# auth_type = auth_type.lower()
#
# if auth_type == b'basic':
# try:
# username, password = base64.b64decode(auth_info).split(b':', 1)
# except (binascii.Error, ValueError):
# return
#
# return Authorization(
# to_text(auth_type),
# username=to_text(username),
# password=to_text(password)
# )
which might include code, classes, or functions. Output only the next line. | def method(self): |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
class Request:
def __init__(self, environ):
self.environ = environ
<|code_end|>
, predict the immediate next line with the help of imports:
import cgi
import io
import json
from http.cookies import SimpleCookie
from .constants import HTTP_STATUS_CODES
from .utils import (
json_dumps_default, MultiDict, parse_query_string,
to_header_key, to_text, to_bytes, parse_basic_auth_header
)
and context (classes, functions, sometimes code) from other files:
# Path: bustard/constants.py
# HTTP_STATUS_CODES = {
# 100: 'Continue',
# 101: 'Switching Protocols',
# 102: 'Processing',
# 200: 'OK',
# 201: 'Created',
# 202: 'Accepted',
# 203: 'Non Authoritative Information',
# 204: 'No Content',
# 205: 'Reset Content',
# 206: 'Partial Content',
# 207: 'Multi Status',
# 226: 'IM Used', # see RFC 3229
# 300: 'Multiple Choices',
# 301: 'Moved Permanently',
# 302: 'Found',
# 303: 'See Other',
# 304: 'Not Modified',
# 305: 'Use Proxy',
# 307: 'Temporary Redirect',
# 400: 'Bad Request',
# 401: 'Unauthorized',
# 402: 'Payment Required', # unused
# 403: 'Forbidden',
# 404: 'Not Found',
# 405: 'Method Not Allowed',
# 406: 'Not Acceptable',
# 407: 'Proxy Authentication Required',
# 408: 'Request Timeout',
# 409: 'Conflict',
# 410: 'Gone',
# 411: 'Length Required',
# 412: 'Precondition Failed',
# 413: 'Request Entity Too Large',
# 414: 'Request URI Too Long',
# 415: 'Unsupported Media Type',
# 416: 'Requested Range Not Satisfiable',
# 417: 'Expectation Failed',
# 418: 'I\'m a teapot', # see RFC 2324
# 422: 'Unprocessable Entity',
# 423: 'Locked',
# 424: 'Failed Dependency',
# 426: 'Upgrade Required',
# 428: 'Precondition Required', # see RFC 6585
# 429: 'Too Many Requests',
# 431: 'Request Header Fields Too Large',
# 449: 'Retry With', # proprietary MS extension
# 451: 'Unavailable For Legal Reasons',
# 500: 'Internal Server Error',
# 501: 'Not Implemented',
# 502: 'Bad Gateway',
# 503: 'Service Unavailable',
# 504: 'Gateway Timeout',
# 505: 'HTTP Version Not Supported',
# 507: 'Insufficient Storage',
# 510: 'Not Extended'
# }
#
# Path: bustard/utils.py
# def json_dumps_default(obj):
# if isinstance(obj, collections.UserDict):
# return obj.to_dict()
# return obj
#
# class MultiDict(collections.UserDict):
#
# def getlist(self, key):
# return self.data[key]
#
# def to_dict(self):
# return self.data
#
# def __getitem__(self, key):
# return self.data[key][-1]
#
# def __setitem__(self, key, value):
# if isinstance(value, (list, tuple)):
# self.data[key] = list(value)
# else:
# self.data[key] = [value]
#
# def __repr__(self):
# return '{}({})'.format(self.__class__.__name__, self.data)
#
# def parse_query_string(query_string, encoding='utf-8'):
# query_dict = collections.defaultdict(list)
# for query_item in query_string.split('&'):
# if '=' not in query_item:
# continue
# keyword, value = query_item.split('=', 1)
# value = urllib.parse.unquote_plus(value)
# query_dict[keyword].append(to_text(value, encoding=encoding))
# return query_dict
#
# def to_header_key(key):
# return '-'.join(x.capitalize() for x in key.split('-'))
#
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
#
# def to_bytes(bt, encoding='utf-8'):
# if isinstance(bt, collections.ByteString):
# return bt
# elif isinstance(bt, str):
# return bt.encode(encoding)
# else:
# return bytes(bt)
#
# def parse_basic_auth_header(value):
# try:
# auth_type, auth_info = to_bytes(value).split(None, 1)
# except ValueError:
# return
# auth_type = auth_type.lower()
#
# if auth_type == b'basic':
# try:
# username, password = base64.b64decode(auth_info).split(b':', 1)
# except (binascii.Error, ValueError):
# return
#
# return Authorization(
# to_text(auth_type),
# username=to_text(username),
# password=to_text(password)
# )
. Output only the next line. | @property |
Using the snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
class WSGIServer:
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = True
default_request_version = 'HTTP/1.1'
server_version = 'WSGIServer/0.1'
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
<|code_end|>
, determine the next line of code. You have imports:
import datetime
import io
import socket
import sys
import time
import urllib
from .utils import to_text, to_bytes
and context (class names, function names, or code) available:
# Path: bustard/utils.py
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
#
# def to_bytes(bt, encoding='utf-8'):
# if isinstance(bt, collections.ByteString):
# return bt
# elif isinstance(bt, str):
# return bt.encode(encoding)
# else:
# return bytes(bt)
. Output only the next line. | monthname = [None, |
Predict the next line for this snippet: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
class WSGIServer:
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = True
default_request_version = 'HTTP/1.1'
server_version = 'WSGIServer/0.1'
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def __init__(self, server_address):
# 创建 socket
self.socket = socket.socket(self.address_family, self.socket_type)
# 绑定
self.server_bind(server_address)
# 监听
self.server_activate()
# 基本的 environ
self.setup_environ()
self.headers_set = []
<|code_end|>
with the help of current file imports:
import datetime
import io
import socket
import sys
import time
import urllib
from .utils import to_text, to_bytes
and context from other files:
# Path: bustard/utils.py
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
#
# def to_bytes(bt, encoding='utf-8'):
# if isinstance(bt, collections.ByteString):
# return bt
# elif isinstance(bt, str):
# return bt.encode(encoding)
# else:
# return bytes(bt)
, which may contain function names, class names, or code. Output only the next line. | def server_bind(self, server_address): |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
http_methods = ('get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch')
class View:
decorators = ()
def dispatch_request(self, request, *args, **kwargs):
method = request.method.lower()
if method == 'head' and not hasattr(self, 'head'):
method = 'get'
view_func = getattr(self, method)
return view_func(request, *args, **kwargs)
@classmethod
def as_view(cls, name=None, *class_args, **class_kwargs):
def view(request, *args, **kwargs):
instance = view.view_class(*class_args, **class_kwargs)
return instance.dispatch_request(request, *args, **kwargs)
for decorator in cls.decorators:
view = decorator(view)
view.view_class = cls
view.__name__ = name or cls.__name__
<|code_end|>
. Write the next line using the current file imports:
import mimetypes
import os
from .exceptions import NotFound
from .http import Response
and context from other files:
# Path: bustard/exceptions.py
# class NotFound(HTTPException):
# def __init__(self):
# self.response = Response(NOTFOUND_HTML, status_code=404)
#
# Path: bustard/http.py
# class Response:
#
# def __init__(self, content=b'', status_code=200,
# content_type='text/html; charset=utf-8',
# headers=None):
# self._content = content
# self._status_code = status_code
# _headers = headers or {}
# _headers.setdefault('Content-Type', content_type)
# if isinstance(_headers, Headers):
# self._headers = _headers
# else:
# self._headers = Headers(_headers)
# self._cookies = SimpleCookie()
# self._load_cookies_from_headers()
#
# def _load_cookies_from_headers(self):
# cookies = self._headers.to_dict().pop('Set-Cookie', [])
# for cookie in cookies:
# self._cookies.load(cookie)
#
# @property
# def content(self):
# return self._content
#
# @content.setter
# def content(self, value):
# if isinstance(value, str):
# value = value.encode('utf-8')
# self._content = value
# body = data = content
#
# def get_data(self):
# return self._content
#
# @property
# def content_type(self, value):
# return self.headers.get('Content-Type', '')
#
# @content_type.setter
# def content_type(self, value):
# self.headers['Content-Type'] = value
#
# @property
# def content_length(self):
# return int(self.headers.get('Content-Length', '0'))
#
# @property
# def status_code(self):
# return self._status_code
#
# @status_code.setter
# def status_code(self, value):
# self._status_code = value
#
# @property
# def status(self):
# code = self._status_code
# return response_status_string(code)
#
# @property
# def headers(self):
# return self._headers
#
# @headers.setter
# def headers(self, value):
# self._headers = Headers(value)
#
# @property
# def content_type(self):
# return self._headers.get('Content-Type', '')
#
# @content_type.setter
# def content_type(self, value):
# self._headers['Content-Type'] = value
#
# @property
# def cookies(self):
# return self._cookies
#
# def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
# domain=None, secure=False, httponly=False):
# cookie = cookie_dump(
# key, value=value, max_age=max_age, expires=expires, path=path,
# domain=domain, secure=secure, httponly=httponly
# )
# self._cookies.load(cookie)
#
# def delete_cookie(self, key, max_age=0,
# expires='Thu, 01-Jan-1970 00:00:00 GMT'):
# self.set_cookie(key, value='', max_age=max_age, expires=expires)
#
# @property
# def headers_list(self):
# # normal headers
# headers_list = list(self.headers.to_list())
#
# # set-cookies
# headers_list.extend(
# ('Set-Cookie', value.OutputString())
# for value in self.cookies.values()
# )
# return headers_list
#
# def json(self):
# return json.loads(to_text(self.data))
#
# def __repr__(self):
# return '<{} [{}]>'.format(self.__class__.__name__, self.status_code)
, which may include functions, classes, or code. Output only the next line. | methods = [] |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
http_methods = ('get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch')
class View:
decorators = ()
def dispatch_request(self, request, *args, **kwargs):
method = request.method.lower()
if method == 'head' and not hasattr(self, 'head'):
method = 'get'
view_func = getattr(self, method)
return view_func(request, *args, **kwargs)
@classmethod
def as_view(cls, name=None, *class_args, **class_kwargs):
def view(request, *args, **kwargs):
instance = view.view_class(*class_args, **class_kwargs)
return instance.dispatch_request(request, *args, **kwargs)
for decorator in cls.decorators:
<|code_end|>
, determine the next line of code. You have imports:
import mimetypes
import os
from .exceptions import NotFound
from .http import Response
and context (class names, function names, or code) available:
# Path: bustard/exceptions.py
# class NotFound(HTTPException):
# def __init__(self):
# self.response = Response(NOTFOUND_HTML, status_code=404)
#
# Path: bustard/http.py
# class Response:
#
# def __init__(self, content=b'', status_code=200,
# content_type='text/html; charset=utf-8',
# headers=None):
# self._content = content
# self._status_code = status_code
# _headers = headers or {}
# _headers.setdefault('Content-Type', content_type)
# if isinstance(_headers, Headers):
# self._headers = _headers
# else:
# self._headers = Headers(_headers)
# self._cookies = SimpleCookie()
# self._load_cookies_from_headers()
#
# def _load_cookies_from_headers(self):
# cookies = self._headers.to_dict().pop('Set-Cookie', [])
# for cookie in cookies:
# self._cookies.load(cookie)
#
# @property
# def content(self):
# return self._content
#
# @content.setter
# def content(self, value):
# if isinstance(value, str):
# value = value.encode('utf-8')
# self._content = value
# body = data = content
#
# def get_data(self):
# return self._content
#
# @property
# def content_type(self, value):
# return self.headers.get('Content-Type', '')
#
# @content_type.setter
# def content_type(self, value):
# self.headers['Content-Type'] = value
#
# @property
# def content_length(self):
# return int(self.headers.get('Content-Length', '0'))
#
# @property
# def status_code(self):
# return self._status_code
#
# @status_code.setter
# def status_code(self, value):
# self._status_code = value
#
# @property
# def status(self):
# code = self._status_code
# return response_status_string(code)
#
# @property
# def headers(self):
# return self._headers
#
# @headers.setter
# def headers(self, value):
# self._headers = Headers(value)
#
# @property
# def content_type(self):
# return self._headers.get('Content-Type', '')
#
# @content_type.setter
# def content_type(self, value):
# self._headers['Content-Type'] = value
#
# @property
# def cookies(self):
# return self._cookies
#
# def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
# domain=None, secure=False, httponly=False):
# cookie = cookie_dump(
# key, value=value, max_age=max_age, expires=expires, path=path,
# domain=domain, secure=secure, httponly=httponly
# )
# self._cookies.load(cookie)
#
# def delete_cookie(self, key, max_age=0,
# expires='Thu, 01-Jan-1970 00:00:00 GMT'):
# self.set_cookie(key, value='', max_age=max_age, expires=expires)
#
# @property
# def headers_list(self):
# # normal headers
# headers_list = list(self.headers.to_list())
#
# # set-cookies
# headers_list.extend(
# ('Set-Cookie', value.OutputString())
# for value in self.cookies.values()
# )
# return headers_list
#
# def json(self):
# return json.loads(to_text(self.data))
#
# def __repr__(self):
# return '<{} [{}]>'.format(self.__class__.__name__, self.status_code)
. Output only the next line. | view = decorator(view) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
app = Bustard()
@app.route('/set/<value>')
def set_session(request, value):
request.session['name'] = value
return 'hello {}'.format(value)
@app.route('/')
def get_session(request):
<|code_end|>
, generate the next line using the imports in this file:
from bustard.app import Bustard
and context (functions, classes, or occasionally code) from other files:
# Path: bustard/app.py
# class Bustard:
# session_class = sessions.MemorySession
# before_request_hooks = (sessions.before_request_hook,)
# after_request_hooks = (sessions.after_request_hook,)
#
# def __init__(self, name='', template_dir='',
# template_default_context=None):
# self.name = name
# self._router = Router()
# self.template_dir = template_dir
# if template_default_context is not None:
# self.template_default_context = template_default_context
# else:
# self.template_default_context = {}
# self.template_default_context.setdefault('url_for', self.url_for)
#
# self._before_request_hooks = []
# self._before_request_hooks.extend(self.before_request_hooks)
# self._after_request_hooks = []
# self._after_request_hooks.extend(self.after_request_hooks)
#
# self._config = {}
# self._config.update(CONFIGURE)
#
# @property
# def config(self):
# return self._config
#
# def render_template(self, template_name, **kwargs):
# return render_template(
# template_name, template_dir=self.template_dir,
# default_context=self.template_default_context,
# context=kwargs
# ).encode('utf-8')
#
# def url_for(self, func_name, _request=None, _external=False, **kwargs):
# url = self._router.url_for(func_name, **kwargs)
# if _external:
# request = _request
# url = '{}://{}{}'.format(request.scheme, request.host, url)
# return url
#
# def url_resolve(self, path):
# """url -> view
#
# :return: (func, methods, func_kwargs)
# """
# return self._router.get_func(path)
#
# def __call__(self, environ, start_response):
# """for wsgi server"""
# self.start_response = start_response
# path = environ['PATH_INFO']
# method = environ['REQUEST_METHOD']
# func, methods, func_kwargs = self.url_resolve(path)
#
# try:
# if func is None:
# self.notfound()
# if method not in methods:
# self.abort(405)
# request = Request(environ)
# result = self.handle_before_request_hooks(request, view_func=func)
# if isinstance(result, Response):
# response = result
# else:
# response = self.handle_view(request, func, func_kwargs)
# self.handle_after_request_hooks(request, response, view_func=func)
# except HTTPException as ex:
# response = ex.response
#
# return self._start_response(response)
#
# def handle_view(self, request, view_func, func_kwargs):
# result = view_func(request, **func_kwargs)
# if isinstance(result, (list, tuple)):
# response = Response(content=result[0],
# status_code=result[1],
# headers=result[2])
# elif isinstance(result, Response):
# response = result
# else:
# response = Response(result)
# return response
#
# def _start_response(self, response):
# body = response.body
# status_code = response.status
# headers_list = response.headers_list
# self.start_response(status_code, headers_list)
#
# if isinstance(body, collections.Iterator):
# return (to_bytes(x) for x in body)
# else:
# return [to_bytes(body)]
#
# def route(self, path, methods=None):
#
# def wrapper(view_func):
# self._router.register(path, view_func, methods)
# return view_func
#
# return wrapper
#
# def add_url_rule(self, path, view_func):
# methods = view_func.methods
# self.route(path, methods=methods)(view_func)
#
# def before_request(self, func):
# self._before_request_hooks.append(func)
# return func
#
# def handle_before_request_hooks(self, request, view_func):
# hooks = self._before_request_hooks
# for hook in hooks:
# if len(inspect.signature(hook).parameters) > 1:
# result = hook(request, view_func, self)
# else:
# result = hook(request)
# if isinstance(result, Response):
# return result
#
# def after_request(self, func):
# self._after_request_hooks.append(func)
# return func
#
# def handle_after_request_hooks(self, request, response, view_func):
# hooks = self._after_request_hooks
# for hook in hooks:
# if len(inspect.signature(hook).parameters) > 2:
# hook(request, response, view_func, self)
# else:
# hook(request, response)
#
# def notfound(self):
# raise NotFound()
#
# def abort(self, code):
# raise HTTPException(Response(status_code=code))
#
# def make_response(self, content=b'', **kwargs):
# if isinstance(content, Response):
# return content
# return Response(content, **kwargs)
#
# def test_client(self):
# return Client(self)
#
# def run(self, host='127.0.0.1', port=5000):
# address = (host, port)
# httpd = WSGIRefServer(host, port)
# print('WSGIServer: Serving HTTP on %s ...\n' % str(address))
# httpd.run(self)
. Output only the next line. | value = request.session.get('name', '') |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
app = Bustard()
@app.route('/')
def helloword(request):
return 'hello world'
if __name__ == '__main__':
<|code_end|>
, determine the next line of code. You have imports:
from bustard.app import Bustard
and context (class names, function names, or code) available:
# Path: bustard/app.py
# class Bustard:
# session_class = sessions.MemorySession
# before_request_hooks = (sessions.before_request_hook,)
# after_request_hooks = (sessions.after_request_hook,)
#
# def __init__(self, name='', template_dir='',
# template_default_context=None):
# self.name = name
# self._router = Router()
# self.template_dir = template_dir
# if template_default_context is not None:
# self.template_default_context = template_default_context
# else:
# self.template_default_context = {}
# self.template_default_context.setdefault('url_for', self.url_for)
#
# self._before_request_hooks = []
# self._before_request_hooks.extend(self.before_request_hooks)
# self._after_request_hooks = []
# self._after_request_hooks.extend(self.after_request_hooks)
#
# self._config = {}
# self._config.update(CONFIGURE)
#
# @property
# def config(self):
# return self._config
#
# def render_template(self, template_name, **kwargs):
# return render_template(
# template_name, template_dir=self.template_dir,
# default_context=self.template_default_context,
# context=kwargs
# ).encode('utf-8')
#
# def url_for(self, func_name, _request=None, _external=False, **kwargs):
# url = self._router.url_for(func_name, **kwargs)
# if _external:
# request = _request
# url = '{}://{}{}'.format(request.scheme, request.host, url)
# return url
#
# def url_resolve(self, path):
# """url -> view
#
# :return: (func, methods, func_kwargs)
# """
# return self._router.get_func(path)
#
# def __call__(self, environ, start_response):
# """for wsgi server"""
# self.start_response = start_response
# path = environ['PATH_INFO']
# method = environ['REQUEST_METHOD']
# func, methods, func_kwargs = self.url_resolve(path)
#
# try:
# if func is None:
# self.notfound()
# if method not in methods:
# self.abort(405)
# request = Request(environ)
# result = self.handle_before_request_hooks(request, view_func=func)
# if isinstance(result, Response):
# response = result
# else:
# response = self.handle_view(request, func, func_kwargs)
# self.handle_after_request_hooks(request, response, view_func=func)
# except HTTPException as ex:
# response = ex.response
#
# return self._start_response(response)
#
# def handle_view(self, request, view_func, func_kwargs):
# result = view_func(request, **func_kwargs)
# if isinstance(result, (list, tuple)):
# response = Response(content=result[0],
# status_code=result[1],
# headers=result[2])
# elif isinstance(result, Response):
# response = result
# else:
# response = Response(result)
# return response
#
# def _start_response(self, response):
# body = response.body
# status_code = response.status
# headers_list = response.headers_list
# self.start_response(status_code, headers_list)
#
# if isinstance(body, collections.Iterator):
# return (to_bytes(x) for x in body)
# else:
# return [to_bytes(body)]
#
# def route(self, path, methods=None):
#
# def wrapper(view_func):
# self._router.register(path, view_func, methods)
# return view_func
#
# return wrapper
#
# def add_url_rule(self, path, view_func):
# methods = view_func.methods
# self.route(path, methods=methods)(view_func)
#
# def before_request(self, func):
# self._before_request_hooks.append(func)
# return func
#
# def handle_before_request_hooks(self, request, view_func):
# hooks = self._before_request_hooks
# for hook in hooks:
# if len(inspect.signature(hook).parameters) > 1:
# result = hook(request, view_func, self)
# else:
# result = hook(request)
# if isinstance(result, Response):
# return result
#
# def after_request(self, func):
# self._after_request_hooks.append(func)
# return func
#
# def handle_after_request_hooks(self, request, response, view_func):
# hooks = self._after_request_hooks
# for hook in hooks:
# if len(inspect.signature(hook).parameters) > 2:
# hook(request, response, view_func, self)
# else:
# hook(request, response)
#
# def notfound(self):
# raise NotFound()
#
# def abort(self, code):
# raise HTTPException(Response(status_code=code))
#
# def make_response(self, content=b'', **kwargs):
# if isinstance(content, Response):
# return content
# return Response(content, **kwargs)
#
# def test_client(self):
# return Client(self)
#
# def run(self, host='127.0.0.1', port=5000):
# address = (host, port)
# httpd = WSGIRefServer(host, port)
# print('WSGIServer: Serving HTTP on %s ...\n' % str(address))
# httpd.run(self)
. Output only the next line. | app.run() |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
pg_uri = os.environ.get(
'BUSTARD_TEST_PG_URI',
'postgresql://dbuser:password@localhost/exampledb'
)
@pytest.yield_fixture
def model():
yield orm.Model
orm.MetaData.tables = {}
orm.MetaData.indexes = []
@pytest.mark.parametrize('fieldclass, data_type', [
('CharField', 'varchar'),
('IntegerField', 'integer'),
('DateField', 'date'),
<|code_end|>
, predict the next line using imports from the current file:
import os
import pytest
from bustard import orm
and context including class names, function names, and sometimes code from other files:
# Path: bustard/orm.py
# class MetaData:
# class Field(metaclass=abc.ABCMeta):
# class ModelMetaClass(type):
# class Model(metaclass=ModelMetaClass):
# class TextField(Field):
# class CharField(TextField):
# class IntegerField(Field):
# class DateField(Field):
# class DateTimeField(Field):
# class BooleanField(Field):
# class UUIDField(Field):
# class JSONField(Field):
# class AutoField(Field):
# class ForeignKey:
# class Index:
# class Engine:
# class Session:
# class QuerySet:
# def index_sqls(cls):
# def create_all(cls, bind):
# def drop_all(cls, bind):
# def __init__(self, name=None, max_length=None, default=None,
# server_default=None, unique=False, nullable=True,
# index=False, primary_key=False, foreign_key=None):
# def __get__(self, instance, owner):
# def __set__(self, instance, value):
# def __lt__(self, value):
# def __le__(self, value):
# def __eq__(self, value):
# def __ne__(self, value):
# def __gt__(self, value):
# def __ge__(self, value):
# def like(self, value):
# def desc(self):
# def is_(self, value):
# def is_not(self, value):
# def not_in(self, value):
# def in_(self, value):
# def to_sql(self):
# def default_value(self):
# def name_sql(self):
# def _collect_fields(attr_dict, model):
# def _get_table_name(attr_dict):
# def _auto_column_name(attr_dict):
# def _collect_indexes(table_name, attr_dict):
# def __init__(cls, name, bases, attr_dict):
# def __prepare__(cls, name, bases):
# def __init__(self, **kwargs):
# def default_dict(self):
# def table_sql(cls):
# def sql_values(self):
# def __init__(self, max_length=None, **kwargs):
# def __init__(self, column, onupdate=None, ondelete=None):
# def to_sql(self):
# def __init__(self, name, table_name, column_name, unique=False):
# def to_sql(self):
# def __init__(self, uri):
# def connect(self):
# def close(self):
# def __init__(self, bind=None):
# def configure(cls, bind):
# def connect(self):
# def execute(self, sql, args):
# def fetchone(self):
# def fetchmany(self, size=None):
# def fetchall(self):
# def commit(self):
# def rollback(self):
# def close(self):
# def insert(self, instance):
# def update(self, instance):
# def delete(self, instance):
# def query(self, model):
# def transaction(self):
# def __init__(self, session, model):
# def clone(self):
# def limit(self, number):
# def offset(self, number):
# def order_by(self, *args):
# def filter(self, *args, **kwargs):
# def count(self):
# def update(self, **kwargs):
# def delete(self):
# def _build_where_sql(self):
# def _build_limit_sql(self):
# def _build_offset_sql(self):
# def _build_order_by_sql(self):
# def _build_select_sql(self, count=False):
# def _build_update_sql(self, sql_values):
# def _build_delete_sql(self):
# def _execute(self):
# def _clean_data(self):
# def __len__(self):
# def __getitem__(self, index):
# def __iter__(self):
. Output only the next line. | ('DateTimeField', 'timestamp'), |
Here is a snippet: <|code_start|>
headers['foo'] = ['v1', 'v2']
assert headers['foo'] == 'v2'
assert headers.get_all('Foo') == ['v1', 'v2']
@pytest.mark.parametrize('url, code', [
('http://a.com', None),
('/a/b/c', 301),
])
def test_redirect(url, code):
kwargs = {'url': url}
if code:
kwargs['code'] = code
response = redirect(**kwargs)
assert response.status_code == (code or 302)
assert response.headers['location'] == url
@pytest.mark.parametrize('obj', [
{'a': 1, 'b': 2},
{'a': 'b', 'headers': Headers({'a': 'b'})},
{},
])
def test_jsonify(obj):
response = jsonify(obj)
assert response.json() is not None
@pytest.mark.parametrize('code, result', [
<|code_end|>
. Write the next line using the current file imports:
import pytest
from bustard.http import jsonify, Headers, redirect, response_status_string
and context from other files:
# Path: bustard/http.py
# def jsonify(*args, **kwargs):
# data = json.dumps(dict(*args, **kwargs), indent=2, sort_keys=True,
# separators=(', ', ': '), default=json_dumps_default)
# data = data.encode('utf-8')
# response = Response(data + b'\n', content_type='application/json')
# response.headers['Content-Length'] = str(len(response.data))
# return response
#
# class Headers(MultiDict):
#
# def add(self, key, value):
# key = to_text(to_header_key(key))
# if isinstance(value, (tuple, list)):
# self.data.setdefault(key, []).extend(map(to_text, value))
# else:
# self.data.setdefault(key, []).append(to_text(value))
#
# def set(self, key, value):
# self.__setitem__(key, value)
#
# def get_all(self, key):
# key = to_header_key(key)
# return self.data[key]
#
# @classmethod
# def from_list(cls, headers_list):
# headers = cls()
# for (k, v) in headers_list:
# headers.add(k, v)
# return headers
#
# def to_list(self):
# return [
# (k, v)
# for k, values in self.to_dict().items()
# for v in values
# ]
#
# def __getitem__(self, key):
# key = to_header_key(key)
# return super(Headers, self).__getitem__(key)
#
# def __setitem__(self, key, value):
# key = to_text(to_header_key(key))
# if isinstance(value, (list, tuple)):
# value = list(map(to_text, value))
# else:
# value = to_text(value)
# super(Headers, self).__setitem__(key, value)
#
# def redirect(url, code=302):
# response = Response(status_code=code)
# response.headers['Location'] = url
# return response
#
# def response_status_string(code):
# """e.g. ``200 OK`` """
# mean = HTTP_STATUS_CODES.get(code, 'unknown').upper()
# return '{code} {mean}'.format(code=code, mean=mean)
, which may include functions, classes, or code. Output only the next line. | (200, '200 OK'), |
Given snippet: <|code_start|> headers.set('Name', 'v')
assert headers.get_all('Name') == ['v']
headers['a'] = 'b'
assert headers['a'] == 'b'
assert headers.get_all('a') == ['b']
def test_value_list(self):
headers = Headers()
headers.add('name', ['value', 'v2'])
assert headers.get_all('name') == ['value', 'v2']
assert set(headers.to_list()) == {('Name', 'value'), ('Name', 'v2')}
h2 = Headers.from_list(
[('name', 'v1'), ('Name', 'v2'), ('key', 'value')]
)
assert set(h2.to_list()) == {
('Name', 'v1'), ('Name', 'v2'), ('Key', 'value')
}
headers['foo'] = ['v1', 'v2']
assert headers['foo'] == 'v2'
assert headers.get_all('Foo') == ['v1', 'v2']
@pytest.mark.parametrize('url, code', [
('http://a.com', None),
('/a/b/c', 301),
])
def test_redirect(url, code):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from bustard.http import jsonify, Headers, redirect, response_status_string
and context:
# Path: bustard/http.py
# def jsonify(*args, **kwargs):
# data = json.dumps(dict(*args, **kwargs), indent=2, sort_keys=True,
# separators=(', ', ': '), default=json_dumps_default)
# data = data.encode('utf-8')
# response = Response(data + b'\n', content_type='application/json')
# response.headers['Content-Length'] = str(len(response.data))
# return response
#
# class Headers(MultiDict):
#
# def add(self, key, value):
# key = to_text(to_header_key(key))
# if isinstance(value, (tuple, list)):
# self.data.setdefault(key, []).extend(map(to_text, value))
# else:
# self.data.setdefault(key, []).append(to_text(value))
#
# def set(self, key, value):
# self.__setitem__(key, value)
#
# def get_all(self, key):
# key = to_header_key(key)
# return self.data[key]
#
# @classmethod
# def from_list(cls, headers_list):
# headers = cls()
# for (k, v) in headers_list:
# headers.add(k, v)
# return headers
#
# def to_list(self):
# return [
# (k, v)
# for k, values in self.to_dict().items()
# for v in values
# ]
#
# def __getitem__(self, key):
# key = to_header_key(key)
# return super(Headers, self).__getitem__(key)
#
# def __setitem__(self, key, value):
# key = to_text(to_header_key(key))
# if isinstance(value, (list, tuple)):
# value = list(map(to_text, value))
# else:
# value = to_text(value)
# super(Headers, self).__setitem__(key, value)
#
# def redirect(url, code=302):
# response = Response(status_code=code)
# response.headers['Location'] = url
# return response
#
# def response_status_string(code):
# """e.g. ``200 OK`` """
# mean = HTTP_STATUS_CODES.get(code, 'unknown').upper()
# return '{code} {mean}'.format(code=code, mean=mean)
which might include code, classes, or functions. Output only the next line. | kwargs = {'url': url} |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
class TestHeaders:
def test_normal(self):
headers = Headers({'User-Agent': 'firefox/34'})
assert headers['User-Agent'] == 'firefox/34'
assert headers['user-agent'] == headers['User-Agent']
headers.add('name', 'value')
headers.add('name', 'value2')
assert headers['name'] == headers['Name'] == 'value2'
assert headers.get_all('Name') == ['value', 'value2']
headers.set('Name', 'v')
assert headers.get_all('Name') == ['v']
headers['a'] = 'b'
assert headers['a'] == 'b'
assert headers.get_all('a') == ['b']
def test_value_list(self):
headers = Headers()
<|code_end|>
with the help of current file imports:
import pytest
from bustard.http import jsonify, Headers, redirect, response_status_string
and context from other files:
# Path: bustard/http.py
# def jsonify(*args, **kwargs):
# data = json.dumps(dict(*args, **kwargs), indent=2, sort_keys=True,
# separators=(', ', ': '), default=json_dumps_default)
# data = data.encode('utf-8')
# response = Response(data + b'\n', content_type='application/json')
# response.headers['Content-Length'] = str(len(response.data))
# return response
#
# class Headers(MultiDict):
#
# def add(self, key, value):
# key = to_text(to_header_key(key))
# if isinstance(value, (tuple, list)):
# self.data.setdefault(key, []).extend(map(to_text, value))
# else:
# self.data.setdefault(key, []).append(to_text(value))
#
# def set(self, key, value):
# self.__setitem__(key, value)
#
# def get_all(self, key):
# key = to_header_key(key)
# return self.data[key]
#
# @classmethod
# def from_list(cls, headers_list):
# headers = cls()
# for (k, v) in headers_list:
# headers.add(k, v)
# return headers
#
# def to_list(self):
# return [
# (k, v)
# for k, values in self.to_dict().items()
# for v in values
# ]
#
# def __getitem__(self, key):
# key = to_header_key(key)
# return super(Headers, self).__getitem__(key)
#
# def __setitem__(self, key, value):
# key = to_text(to_header_key(key))
# if isinstance(value, (list, tuple)):
# value = list(map(to_text, value))
# else:
# value = to_text(value)
# super(Headers, self).__setitem__(key, value)
#
# def redirect(url, code=302):
# response = Response(status_code=code)
# response.headers['Location'] = url
# return response
#
# def response_status_string(code):
# """e.g. ``200 OK`` """
# mean = HTTP_STATUS_CODES.get(code, 'unknown').upper()
# return '{code} {mean}'.format(code=code, mean=mean)
, which may contain function names, class names, or code. Output only the next line. | headers.add('name', ['value', 'v2']) |
Predict the next line for this snippet: <|code_start|>class Template:
TOKEN_VARIABLE_START = '{{'
TOKEN_VARIABLE_END = '}}'
TOKEN_TAG_START = '{%'
TOKEN_TAG_END = '%}'
TOKEN_COMMENT_START = '{#'
TOKEN_COMMENT_END = '#}'
FUNC_WHITELIST = TEMPLATE_BUILTIN_FUNC_WHITELIST
def __init__(self, text, default_context=None,
pre_compile=True,
indent=0, template_dir='',
func_name='__render_function',
result_var='__result',
auto_escape=True
):
self.re_tokens = re.compile(r'''(?x)(
(?:{token_variable_start} .+? {token_variable_end})
|(?:{token_tag_start} .+? {token_tag_end})
|(?:{token_comment_start}.*?{token_comment_end})
)'''.format(token_variable_start=re.escape(self.TOKEN_VARIABLE_START),
token_variable_end=re.escape(self.TOKEN_VARIABLE_END),
token_tag_start=re.escape(self.TOKEN_TAG_START),
token_tag_end=re.escape(self.TOKEN_TAG_END),
token_comment_start=re.escape(self.TOKEN_COMMENT_START),
token_comment_end=re.escape(self.TOKEN_COMMENT_END),
)
)
# {{ variable }}
self.re_variable = re.compile(r'''
<|code_end|>
with the help of current file imports:
import builtins
import os
import re
from .constants import TEMPLATE_BUILTIN_FUNC_WHITELIST
from .utils import to_text
and context from other files:
# Path: bustard/constants.py
# TEMPLATE_BUILTIN_FUNC_WHITELIST = (
# 'all',
# 'unicode',
# 'isinstance',
# 'dict',
# 'format',
# 'repr',
# 'sorted',
# 'list',
# 'iter',
# 'round',
# 'cmp',
# 'set',
# 'bytes',
# 'reduce',
# 'slice',
# 'sum',
# 'getattr',
# 'abs',
# 'hash',
# 'len',
# 'ord',
# 'filter',
# 'range',
# 'pow',
# 'float',
# 'divmod',
# 'enumerate',
# 'basestring',
# 'zip',
# 'hex',
# 'long',
# 'next',
# 'chr',
# 'xrange',
# 'type',
# 'tuple',
# 'reversed',
# 'hasattr',
# 'delattr',
# 'setattr',
# 'str',
# 'int',
# 'unichr',
# 'min',
# 'any',
# 'complex',
# 'bool',
# 'map',
# 'max',
# 'object',
# 'callable',
# )
#
# Path: bustard/utils.py
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
, which may contain function names, class names, or code. Output only the next line. | {token_variable_start} .+? {token_variable_end} |
Predict the next line for this snippet: <|code_start|> auto_escape=True
):
self.re_tokens = re.compile(r'''(?x)(
(?:{token_variable_start} .+? {token_variable_end})
|(?:{token_tag_start} .+? {token_tag_end})
|(?:{token_comment_start}.*?{token_comment_end})
)'''.format(token_variable_start=re.escape(self.TOKEN_VARIABLE_START),
token_variable_end=re.escape(self.TOKEN_VARIABLE_END),
token_tag_start=re.escape(self.TOKEN_TAG_START),
token_tag_end=re.escape(self.TOKEN_TAG_END),
token_comment_start=re.escape(self.TOKEN_COMMENT_START),
token_comment_end=re.escape(self.TOKEN_COMMENT_END),
)
)
# {{ variable }}
self.re_variable = re.compile(r'''
{token_variable_start} .+? {token_variable_end}
'''.format(
token_variable_start=re.escape(self.TOKEN_VARIABLE_START),
token_variable_end=re.escape(self.TOKEN_VARIABLE_END)
), re.VERBOSE)
# {# comment #}
self.re_comment = re.compile(r'''
{token_comment_start}.*?{token_comment_end}
'''.format(
token_comment_start=re.escape(self.TOKEN_COMMENT_START),
token_comment_end=re.escape(self.TOKEN_COMMENT_END)
), re.VERBOSE)
# {% tag %}
self.re_tag = re.compile(r'''
<|code_end|>
with the help of current file imports:
import builtins
import os
import re
from .constants import TEMPLATE_BUILTIN_FUNC_WHITELIST
from .utils import to_text
and context from other files:
# Path: bustard/constants.py
# TEMPLATE_BUILTIN_FUNC_WHITELIST = (
# 'all',
# 'unicode',
# 'isinstance',
# 'dict',
# 'format',
# 'repr',
# 'sorted',
# 'list',
# 'iter',
# 'round',
# 'cmp',
# 'set',
# 'bytes',
# 'reduce',
# 'slice',
# 'sum',
# 'getattr',
# 'abs',
# 'hash',
# 'len',
# 'ord',
# 'filter',
# 'range',
# 'pow',
# 'float',
# 'divmod',
# 'enumerate',
# 'basestring',
# 'zip',
# 'hex',
# 'long',
# 'next',
# 'chr',
# 'xrange',
# 'type',
# 'tuple',
# 'reversed',
# 'hasattr',
# 'delattr',
# 'setattr',
# 'str',
# 'int',
# 'unichr',
# 'min',
# 'any',
# 'complex',
# 'bool',
# 'map',
# 'max',
# 'object',
# 'callable',
# )
#
# Path: bustard/utils.py
# def to_text(st, encoding='utf-8'):
# if isinstance(st, str):
# return st
# elif isinstance(st, collections.ByteString):
# return st.decode(encoding)
# else:
# return str(st)
, which may contain function names, class names, or code. Output only the next line. | {token_tag_start}.*?{token_tag_end} |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
class HTTPException(Exception):
def __init__(self, response):
self.response = response
class NotFound(HTTPException):
<|code_end|>
using the current file's imports:
from .constants import NOTFOUND_HTML
from .http import Response
and any relevant context from other files:
# Path: bustard/constants.py
# NOTFOUND_HTML = b"""
# <html>
# <h1>404 Not Found</h1>
# </html>
# """
#
# Path: bustard/http.py
# class Response:
#
# def __init__(self, content=b'', status_code=200,
# content_type='text/html; charset=utf-8',
# headers=None):
# self._content = content
# self._status_code = status_code
# _headers = headers or {}
# _headers.setdefault('Content-Type', content_type)
# if isinstance(_headers, Headers):
# self._headers = _headers
# else:
# self._headers = Headers(_headers)
# self._cookies = SimpleCookie()
# self._load_cookies_from_headers()
#
# def _load_cookies_from_headers(self):
# cookies = self._headers.to_dict().pop('Set-Cookie', [])
# for cookie in cookies:
# self._cookies.load(cookie)
#
# @property
# def content(self):
# return self._content
#
# @content.setter
# def content(self, value):
# if isinstance(value, str):
# value = value.encode('utf-8')
# self._content = value
# body = data = content
#
# def get_data(self):
# return self._content
#
# @property
# def content_type(self, value):
# return self.headers.get('Content-Type', '')
#
# @content_type.setter
# def content_type(self, value):
# self.headers['Content-Type'] = value
#
# @property
# def content_length(self):
# return int(self.headers.get('Content-Length', '0'))
#
# @property
# def status_code(self):
# return self._status_code
#
# @status_code.setter
# def status_code(self, value):
# self._status_code = value
#
# @property
# def status(self):
# code = self._status_code
# return response_status_string(code)
#
# @property
# def headers(self):
# return self._headers
#
# @headers.setter
# def headers(self, value):
# self._headers = Headers(value)
#
# @property
# def content_type(self):
# return self._headers.get('Content-Type', '')
#
# @content_type.setter
# def content_type(self, value):
# self._headers['Content-Type'] = value
#
# @property
# def cookies(self):
# return self._cookies
#
# def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
# domain=None, secure=False, httponly=False):
# cookie = cookie_dump(
# key, value=value, max_age=max_age, expires=expires, path=path,
# domain=domain, secure=secure, httponly=httponly
# )
# self._cookies.load(cookie)
#
# def delete_cookie(self, key, max_age=0,
# expires='Thu, 01-Jan-1970 00:00:00 GMT'):
# self.set_cookie(key, value='', max_age=max_age, expires=expires)
#
# @property
# def headers_list(self):
# # normal headers
# headers_list = list(self.headers.to_list())
#
# # set-cookies
# headers_list.extend(
# ('Set-Cookie', value.OutputString())
# for value in self.cookies.values()
# )
# return headers_list
#
# def json(self):
# return json.loads(to_text(self.data))
#
# def __repr__(self):
# return '<{} [{}]>'.format(self.__class__.__name__, self.status_code)
. Output only the next line. | def __init__(self): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.