Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Continue the code snippet: <|code_start|>
# show depots in organization
class DepotInline(admin.TabularInline):
model = Depot
extra = 0
can_delete = False
show_change_link = True
<|code_end|>
. Use current file imports:
from depot.models import Depot, Organization
from django.contrib import admin
and context (classes, functions, or code) from other files:
# Path: depot/models.py
# class Depot(models.Model):
# """
# A depot has a name and many depot managers.
#
# :author: Leo Tappe
# :author: Benedikt Seidl
# """
#
# name = models.CharField(max_length=32)
# description = models.CharField(max_length=256, blank=True)
# organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
# manager_users = models.ManyToManyField(User, blank=True)
# manager_groups = models.ManyToManyField(Group, blank=True)
# active = models.BooleanField(default=True)
#
# def managed_by(self, user):
# """
# Depots are managed by superusers, the organization's managers,
# any manager user and any user in a manager group.
# """
#
# return (user.is_superuser
# or self.organization.managed_by(user)
# or self.manager_users.filter(id=user.id).exists()
# or self.manager_groups.filter(id__in=user.groups.all()).exists())
#
# def show_internal_items(self, user):
# """
# Internal items can be seen by superusers and organization members.
# """
#
# return user.is_superuser or self.organization.is_member(user)
#
# def visible_items(self, user):
# """
# Return the list of items the user is allowed to see in this depot.
# """
#
# if self.show_internal_items(user):
# return self.active_items.all()
# else:
# return self.public_items.all()
#
# @property
# def managers(self):
# """
# The list of users explicitly listed as managers of this depot.
# Does not include any organization managers or superusers which are
# not added to the depot.
# """
#
# return User.objects.filter(
# models.Q(id__in=self.manager_users.all())
# | models.Q(groups__in=self.manager_groups.all())
# ).distinct()
#
# @property
# def public_items(self):
# """
# List all items with the visibility set to public.
# """
#
# return self.item_set.filter(visibility=Item.VISIBILITY_PUBLIC)
#
# @property
# def active_items(self):
# return self.item_set.filter(
# models.Q(visibility=Item.VISIBILITY_PUBLIC)
# | models.Q(visibility=Item.VISIBILITY_INTERNAL)
# )
#
# @staticmethod
# def filter_by_user(user):
# """
# Filter for depots managed by the given user
# """
#
# return (models.Q(organization__managers__id=user.id)
# | models.Q(manager_users__id=user.id)
# | models.Q(manager_groups__id__in=user.groups.all()))
#
# def __str__(self):
# return self.name
#
# class Organization(models.Model):
# """
# Representation of an organization,
# such as FSMPI, FSMB or ASTA.
#
# An organization is defined by a list of user groups,
# in our case LDAP groups. It is managed by a list of
# users and has a list of depots.
#
# :author: Leo Tappe
# :author: Benedikt Seidl
# """
#
# name = models.CharField(max_length=32)
# groups = models.ManyToManyField(Group, blank=True)
# managers = models.ManyToManyField(User, blank=True)
#
# def managed_by(self, user):
# """
# Organizations are managed by superusers and organization managers.
# """
#
# return user.is_superuser or self.managers.filter(id=user.id).exists()
#
# def is_member(self, user):
# """
# Checks if the user is in one of the groups defined in this organization.
# """
#
# return self.groups.filter(id__in=user.groups.all()).exists()
#
# @property
# def active_depots(self):
# """
# Returns all depots in this organization which have the active flag set.
# """
#
# return self.depot_set.filter(active=True)
#
# @staticmethod
# def filter_by_user(user):
# """
# Filter for organizations managed by the given user
# """
#
# return models.Q(managers__id=user.id)
#
# def __str__(self):
# return self.name
. Output only the next line. | fields = ('name', 'description', 'active') |
Using the snippet: <|code_start|>
def increase_header_level(elem, doc):
if type(elem)==Header:
if elem.level < 6:
elem.level += 1
else:
return []
<|code_end|>
, determine the next line of code. You have imports:
from panflute import run_filter, Header
and context (class names, function names, or code) available:
# Path: panflute/elements.py
# class Header(Block):
# """Header
#
# :param args: contents of the header
# :type args: :class:`Inline`
# :param level: level of the header (1 is the largest and 6 the smallest)
# :type level: ``int``
# :param identifier: element identifier (usually unique)
# :type identifier: :class:`str`
# :param classes: class names of the element
# :type classes: :class:`list` of :class:`str`
# :param attributes: additional attributes
# :type attributes: :class:`dict`
# :Base: :class:`Block`
#
# :Example:
#
# >>> title = [Str('Monty'), Space, Str('Python')]
# >>> header = Header(*title, level=2, identifier='toc')
# >>> header.level += 1
# """
# __slots__ = ['level', '_content', 'identifier', 'classes', 'attributes']
# _children = ['content']
#
# def __init__(self, *args, level=1,
# identifier='', classes=[], attributes={}):
# self.level = check_type(level, int)
# if not 0 < self.level <= 10:
# raise TypeError('Header level not between 1 and 10')
# self._set_ica(identifier, classes, attributes)
# self._set_content(args, Inline)
#
# def _slots_to_json(self):
# return [self.level, self._ica_to_json(), self.content.to_json()]
#
# Path: panflute/io.py
# def run_filter(action, *args, **kwargs):
# """
# Wrapper for :func:`.run_filters`
#
# Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
#
# See :func:`.run_filters`
# """
# return run_filters([action], *args, **kwargs)
. Output only the next line. | def main(doc=None): |
Given the following code snippet before the placeholder: <|code_start|>
def increase_header_level(elem, doc):
if type(elem)==Header:
if elem.level < 6:
<|code_end|>
, predict the next line using imports from the current file:
from panflute import run_filter, Header
and context including class names, function names, and sometimes code from other files:
# Path: panflute/elements.py
# class Header(Block):
# """Header
#
# :param args: contents of the header
# :type args: :class:`Inline`
# :param level: level of the header (1 is the largest and 6 the smallest)
# :type level: ``int``
# :param identifier: element identifier (usually unique)
# :type identifier: :class:`str`
# :param classes: class names of the element
# :type classes: :class:`list` of :class:`str`
# :param attributes: additional attributes
# :type attributes: :class:`dict`
# :Base: :class:`Block`
#
# :Example:
#
# >>> title = [Str('Monty'), Space, Str('Python')]
# >>> header = Header(*title, level=2, identifier='toc')
# >>> header.level += 1
# """
# __slots__ = ['level', '_content', 'identifier', 'classes', 'attributes']
# _children = ['content']
#
# def __init__(self, *args, level=1,
# identifier='', classes=[], attributes={}):
# self.level = check_type(level, int)
# if not 0 < self.level <= 10:
# raise TypeError('Header level not between 1 and 10')
# self._set_ica(identifier, classes, attributes)
# self._set_content(args, Inline)
#
# def _slots_to_json(self):
# return [self.level, self._ica_to_json(), self.content.to_json()]
#
# Path: panflute/io.py
# def run_filter(action, *args, **kwargs):
# """
# Wrapper for :func:`.run_filters`
#
# Receive a Pandoc document from stdin, apply the *action* function to each element, and write it back to stdout.
#
# See :func:`.run_filters`
# """
# return run_filters([action], *args, **kwargs)
. Output only the next line. | elem.level += 1 |
Next line prediction: <|code_start|>
def myChordFunctionAirliner(Epsilon):
"""User-defined function describing the variation of chord as a function of
the leading edge coordinate"""
ChordLengths = np.array([0.5, 0.3792, 0.2867, 0.232, 0.1763, 0.1393, 0.1155,
0.093, 0.0713, 0.055, 0.007])
EpsArray = np.linspace(0, 1, 11)
return np.interp(Epsilon, EpsArray, ChordLengths)
def myAirfoilFunctionAirliner(Epsilon, LEPoint, ChordFunct, ChordFactor,
DihedralFunct, TwistFunct):
"""Defines the variation of cross section as a function of Epsilon"""
AfChord = ((ChordFactor*ChordFunct(Epsilon)) /
np.cos(np.radians(TwistFunct(Epsilon))))
Af = primitives.Airfoil(LEPoint, ChordLength=AfChord,
Rotation=DihedralFunct(Epsilon),
Twist=TwistFunct(Epsilon),
CRMProfile=True, CRM_Epsilon=Epsilon)
return Af
def mySweepAngleFunctionAirliner(Epsilon):
"""User-defined function describing the variation of sweep angle as a function
of the leading edge coordinate"""
SweepAngles = np.array([90, 87, 35, 35, 35, 35, 35, 35, 35, 35, 80])
EpsArray = np.linspace(0, 1, 11)
<|code_end|>
. Use current file imports:
(import numpy as np
import airconics
from airconics import primitives
from airconics import liftingsurface
from OCC.Display.SimpleGui import init_display)
and context including class names, function names, or small code snippets from other files:
# Path: airconics/primitives.py
# class Airfoil(object):
# def __init__(self,
# LeadingEdgePoint=[0., 0., 0.],
# ChordLength=1,
# Rotation=0,
# Twist=0,
# SeligProfile=None,
# Naca4Profile=None,
# Naca5Profile=None,
# CRMProfile=None,
# CRM_Epsilon=0.,
# InterpProfile=None,
# Epsilon=0, Af1=None, Af2=None, Eps1=0, Eps2=1,
# EnforceSharpTE=False):
# def points(self):
# def points(self, newpoints):
# def _make_airfoil(self, SeligProfile, Naca4Profile, Naca5Profile,
# CRMProfile, CRM_Epsilon,
# InterpProfile, Epsilon, Af1, Af2, Eps1, Eps2):
# def _fitAirfoiltoPoints(self):
# def _AirfoilPointsSeligFormat(self, SeligProfile):
# def _NACA4cambercurve(self, MaxCamberLocTenthChord, MaxCamberPercChord):
# def _NACA4halfthickness(self, ChordCoord, MaxThicknessPercChord):
# def _camberplusthickness(self, ChordCoord, zcam, dzcamdx, t):
# def _mergesurfaces(self, xu, zu, xl, zl):
# def _NACA4digitPnts(self, MaxCamberPercChord, MaxCamberLocTenthChord,
# MaxThicknessPercChord):
# def _TransformAirfoil(self):
# def AddAirfoilFromSeligFile(self, SeligProfile, Smoothing=1):
# def AddNACA4(self, Naca4Profile, Smoothing=1):
# def AddCRMLinear(self, CRM_Epsilon, Smoothing=1):
# def AddLinear2(self, Eps, Af1, Af2, Eps1=0, Eps2=1):
# N = np.shape(self._points)[0]
# N = len(data)
# RLE = 1.1019*(MaxThicknessPercChord/100.0)**2.0
#
# Path: airconics/liftingsurface.py
# def airfoilfunct(ProfileFunct):
# def AirfoilFunct(Epsilon, LEPoint, ChordFunct, ChordFactor,
# DihedralFunct, TwistFunct):
# def uniform_parametric_function(epsilon):
# def __init__(self, ApexPoint=gp_Pnt(0, 0, 0),
# SweepFunct=False,
# DihedralFunct=False,
# TwistFunct=False,
# ChordFunct=False,
# AirfoilFunct=False,
# ChordFactor=1,
# ScaleFactor=1,
# OptimizeChordScale=0,
# LooseSurf=1,
# SegmentNo=11,
# TipRequired=False,
# max_degree=8,
# continuity=GeomAbs_C2,
# construct_geometry=True,
# ):
# def ApexPoint(self):
# def ApexPoint(self, newApexPoint):
# def SweepFunct(self):
# def SweepFunct(self, newSweepFunct):
# def DihedralFunct(self):
# def DihedralFunct(self, newDihedralFunct):
# def TwistFunct(self):
# def TwistFunct(self, newTwistFunct):
# def ChordFunct(self):
# def ChordFunct(self, newChordFunct):
# def AirfoilFunct(self):
# def AirfoilFunct(self, newAirfoilFunct):
# def NSegments(self):
# def NSegments(self, newNSegments):
# def ChordFactor(self):
# def ChordFactor(self, newChordFactor):
# def ScaleFactor(self):
# def ScaleFactor(self, newScaleFactor):
# def Sections(self):
# def CreateConstructionGeometry(self):
# def Build(self):
# def GenerateLeadingEdge(self):
# def GenerateSectionCurves(self):
# def GenerateLiftingSurface(self):
# def CalculateProjectedArea(self):
# def CalculateSemiSpan(self):
# def CalculateAspectRatio(self):
# def Fit_BlendedTipDevice(self, rootchord_norm, spanfraction=0.1, cant=40,
# transition=0.1, sweep=40, taper=0.7):
# def AirfoilFunctWinglet(Epsilon):
# class LiftingSurface(AirconicsShape):
# LS = act.AddSurfaceLoft(self._Sections,
# max_degree=self.max_degree,
# continuity=self.Cont,
# solid=False)
# SA = act.CalculateSurfaceArea(LSPsegment)
# BB = self.Extents(as_vec=True)
# AR = ((ActualSemiSpan) ** 2.0) / (LSP_area)
. Output only the next line. | return np.interp(Epsilon, EpsArray, SweepAngles) |
Given the following code snippet before the placeholder: <|code_start|># (planform similar to that of the Boeing 787 family)
# ==============================================================================
def myDihedralFunctionAirliner(Epsilon):
"""User-defined function describing the variation of dihedral as a function
of the leading edge coordinate"""
BaseDihedral = 7
# A simple model of a loaded wing shape:
return BaseDihedral + Epsilon*Epsilon*10
def myTwistFunctionAirliner(Epsilon):
"""User-defined function describing the variation of twist as a function
of the leading edge coordinate. The coefficients of the polynomial below
come from the following twist values taken off the CRM (used for the AIAA
drag prediction workshops):
Epsilon = 0: twist = 4.24
Epsilon =0.3: twist = 0.593
Epsilon = 1: twist = -3.343"""
return -(6.53*Epsilon*Epsilon - 14.1*Epsilon + 4.24)
def myChordFunctionAirliner(Epsilon):
"""User-defined function describing the variation of chord as a function of
the leading edge coordinate"""
ChordLengths = np.array([0.5, 0.3792, 0.2867, 0.232, 0.1763, 0.1393, 0.1155,
0.093, 0.0713, 0.055, 0.007])
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import airconics
from airconics import primitives
from airconics import liftingsurface
from OCC.Display.SimpleGui import init_display
and context including class names, function names, and sometimes code from other files:
# Path: airconics/primitives.py
# class Airfoil(object):
# def __init__(self,
# LeadingEdgePoint=[0., 0., 0.],
# ChordLength=1,
# Rotation=0,
# Twist=0,
# SeligProfile=None,
# Naca4Profile=None,
# Naca5Profile=None,
# CRMProfile=None,
# CRM_Epsilon=0.,
# InterpProfile=None,
# Epsilon=0, Af1=None, Af2=None, Eps1=0, Eps2=1,
# EnforceSharpTE=False):
# def points(self):
# def points(self, newpoints):
# def _make_airfoil(self, SeligProfile, Naca4Profile, Naca5Profile,
# CRMProfile, CRM_Epsilon,
# InterpProfile, Epsilon, Af1, Af2, Eps1, Eps2):
# def _fitAirfoiltoPoints(self):
# def _AirfoilPointsSeligFormat(self, SeligProfile):
# def _NACA4cambercurve(self, MaxCamberLocTenthChord, MaxCamberPercChord):
# def _NACA4halfthickness(self, ChordCoord, MaxThicknessPercChord):
# def _camberplusthickness(self, ChordCoord, zcam, dzcamdx, t):
# def _mergesurfaces(self, xu, zu, xl, zl):
# def _NACA4digitPnts(self, MaxCamberPercChord, MaxCamberLocTenthChord,
# MaxThicknessPercChord):
# def _TransformAirfoil(self):
# def AddAirfoilFromSeligFile(self, SeligProfile, Smoothing=1):
# def AddNACA4(self, Naca4Profile, Smoothing=1):
# def AddCRMLinear(self, CRM_Epsilon, Smoothing=1):
# def AddLinear2(self, Eps, Af1, Af2, Eps1=0, Eps2=1):
# N = np.shape(self._points)[0]
# N = len(data)
# RLE = 1.1019*(MaxThicknessPercChord/100.0)**2.0
#
# Path: airconics/liftingsurface.py
# def airfoilfunct(ProfileFunct):
# def AirfoilFunct(Epsilon, LEPoint, ChordFunct, ChordFactor,
# DihedralFunct, TwistFunct):
# def uniform_parametric_function(epsilon):
# def __init__(self, ApexPoint=gp_Pnt(0, 0, 0),
# SweepFunct=False,
# DihedralFunct=False,
# TwistFunct=False,
# ChordFunct=False,
# AirfoilFunct=False,
# ChordFactor=1,
# ScaleFactor=1,
# OptimizeChordScale=0,
# LooseSurf=1,
# SegmentNo=11,
# TipRequired=False,
# max_degree=8,
# continuity=GeomAbs_C2,
# construct_geometry=True,
# ):
# def ApexPoint(self):
# def ApexPoint(self, newApexPoint):
# def SweepFunct(self):
# def SweepFunct(self, newSweepFunct):
# def DihedralFunct(self):
# def DihedralFunct(self, newDihedralFunct):
# def TwistFunct(self):
# def TwistFunct(self, newTwistFunct):
# def ChordFunct(self):
# def ChordFunct(self, newChordFunct):
# def AirfoilFunct(self):
# def AirfoilFunct(self, newAirfoilFunct):
# def NSegments(self):
# def NSegments(self, newNSegments):
# def ChordFactor(self):
# def ChordFactor(self, newChordFactor):
# def ScaleFactor(self):
# def ScaleFactor(self, newScaleFactor):
# def Sections(self):
# def CreateConstructionGeometry(self):
# def Build(self):
# def GenerateLeadingEdge(self):
# def GenerateSectionCurves(self):
# def GenerateLiftingSurface(self):
# def CalculateProjectedArea(self):
# def CalculateSemiSpan(self):
# def CalculateAspectRatio(self):
# def Fit_BlendedTipDevice(self, rootchord_norm, spanfraction=0.1, cant=40,
# transition=0.1, sweep=40, taper=0.7):
# def AirfoilFunctWinglet(Epsilon):
# class LiftingSurface(AirconicsShape):
# LS = act.AddSurfaceLoft(self._Sections,
# max_degree=self.max_degree,
# continuity=self.Cont,
# solid=False)
# SA = act.CalculateSurfaceArea(LSPsegment)
# BB = self.Extents(as_vec=True)
# AR = ((ActualSemiSpan) ** 2.0) / (LSP_area)
. Output only the next line. | EpsArray = np.linspace(0, 1, 11) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
# @Author: p-chambers
# @Date: 2016-07-26 17:37:52
# @Last Modified by: p-chambers
# @Last Modified time: 2016-10-03 12:21:03
def SimpleDihedralFunction(Epsilon):
"""User-defined function describing the variation of dihedral as a function
of the leading edge coordinate"""
return np.zeros_like(Epsilon)
def SimpleTwistFunction(Epsilon):
"""User-defined function describing the variation of twist as a function
of the leading edge coordinate. The coefficients of the polynomial below
come from the following twist values taken off the CRM (used for the AIAA
drag prediction workshops):
Epsilon = 0: twist = 4.24
Epsilon =0.3: twist = 0.593
Epsilon = 1: twist = -3.343"""
return np.zeros_like(Epsilon)
<|code_end|>
. Use current file imports:
(import numpy as np
import airconics
from airconics import primitives
from airconics import liftingsurface
from OCC.Display.SimpleGui import init_display)
and context including class names, function names, or small code snippets from other files:
# Path: airconics/primitives.py
# class Airfoil(object):
# def __init__(self,
# LeadingEdgePoint=[0., 0., 0.],
# ChordLength=1,
# Rotation=0,
# Twist=0,
# SeligProfile=None,
# Naca4Profile=None,
# Naca5Profile=None,
# CRMProfile=None,
# CRM_Epsilon=0.,
# InterpProfile=None,
# Epsilon=0, Af1=None, Af2=None, Eps1=0, Eps2=1,
# EnforceSharpTE=False):
# def points(self):
# def points(self, newpoints):
# def _make_airfoil(self, SeligProfile, Naca4Profile, Naca5Profile,
# CRMProfile, CRM_Epsilon,
# InterpProfile, Epsilon, Af1, Af2, Eps1, Eps2):
# def _fitAirfoiltoPoints(self):
# def _AirfoilPointsSeligFormat(self, SeligProfile):
# def _NACA4cambercurve(self, MaxCamberLocTenthChord, MaxCamberPercChord):
# def _NACA4halfthickness(self, ChordCoord, MaxThicknessPercChord):
# def _camberplusthickness(self, ChordCoord, zcam, dzcamdx, t):
# def _mergesurfaces(self, xu, zu, xl, zl):
# def _NACA4digitPnts(self, MaxCamberPercChord, MaxCamberLocTenthChord,
# MaxThicknessPercChord):
# def _TransformAirfoil(self):
# def AddAirfoilFromSeligFile(self, SeligProfile, Smoothing=1):
# def AddNACA4(self, Naca4Profile, Smoothing=1):
# def AddCRMLinear(self, CRM_Epsilon, Smoothing=1):
# def AddLinear2(self, Eps, Af1, Af2, Eps1=0, Eps2=1):
# N = np.shape(self._points)[0]
# N = len(data)
# RLE = 1.1019*(MaxThicknessPercChord/100.0)**2.0
#
# Path: airconics/liftingsurface.py
# def airfoilfunct(ProfileFunct):
# def AirfoilFunct(Epsilon, LEPoint, ChordFunct, ChordFactor,
# DihedralFunct, TwistFunct):
# def uniform_parametric_function(epsilon):
# def __init__(self, ApexPoint=gp_Pnt(0, 0, 0),
# SweepFunct=False,
# DihedralFunct=False,
# TwistFunct=False,
# ChordFunct=False,
# AirfoilFunct=False,
# ChordFactor=1,
# ScaleFactor=1,
# OptimizeChordScale=0,
# LooseSurf=1,
# SegmentNo=11,
# TipRequired=False,
# max_degree=8,
# continuity=GeomAbs_C2,
# construct_geometry=True,
# ):
# def ApexPoint(self):
# def ApexPoint(self, newApexPoint):
# def SweepFunct(self):
# def SweepFunct(self, newSweepFunct):
# def DihedralFunct(self):
# def DihedralFunct(self, newDihedralFunct):
# def TwistFunct(self):
# def TwistFunct(self, newTwistFunct):
# def ChordFunct(self):
# def ChordFunct(self, newChordFunct):
# def AirfoilFunct(self):
# def AirfoilFunct(self, newAirfoilFunct):
# def NSegments(self):
# def NSegments(self, newNSegments):
# def ChordFactor(self):
# def ChordFactor(self, newChordFactor):
# def ScaleFactor(self):
# def ScaleFactor(self, newScaleFactor):
# def Sections(self):
# def CreateConstructionGeometry(self):
# def Build(self):
# def GenerateLeadingEdge(self):
# def GenerateSectionCurves(self):
# def GenerateLiftingSurface(self):
# def CalculateProjectedArea(self):
# def CalculateSemiSpan(self):
# def CalculateAspectRatio(self):
# def Fit_BlendedTipDevice(self, rootchord_norm, spanfraction=0.1, cant=40,
# transition=0.1, sweep=40, taper=0.7):
# def AirfoilFunctWinglet(Epsilon):
# class LiftingSurface(AirconicsShape):
# LS = act.AddSurfaceLoft(self._Sections,
# max_degree=self.max_degree,
# continuity=self.Cont,
# solid=False)
# SA = act.CalculateSurfaceArea(LSPsegment)
# BB = self.Extents(as_vec=True)
# AR = ((ActualSemiSpan) ** 2.0) / (LSP_area)
. Output only the next line. | def SimpleChordFunction(Epsilon): |
Next line prediction: <|code_start|> Rotation=DihedralFunct(Epsilon),
Twist=TwistFunct(Epsilon),
Naca4Profile='0012')
return Af
def SimpleSweepFunction(Epsilon):
"""User-defined function describing the variation of sweep angle as a function
of the leading edge coordinate"""
return np.zeros_like(Epsilon)
if __name__ == "__main__":
# Initialise the display
display, start_display, add_menu, add_function_to_menu = init_display()
# Position of the apex of the wing
P = (0,0,0)
# Class definition
NSeg = 10
# Instantiate the class
ChordFactor = 1
ScaleFactor = 50
# First try a standard CRM airfoil:
# Af_crm = airconics.primitives.Airfoil([0., 6., 1.], CRMProfile=True, CRM_Epsilon=0.8)
# display.DisplayShape(Af_crm.Curve, update=True, color='GREEN');
Wing = liftingsurface.LiftingSurface(P, SimpleSweepFunction,
<|code_end|>
. Use current file imports:
(import numpy as np
import airconics
from airconics import primitives
from airconics import liftingsurface
from OCC.Display.SimpleGui import init_display)
and context including class names, function names, or small code snippets from other files:
# Path: airconics/primitives.py
# class Airfoil(object):
# def __init__(self,
# LeadingEdgePoint=[0., 0., 0.],
# ChordLength=1,
# Rotation=0,
# Twist=0,
# SeligProfile=None,
# Naca4Profile=None,
# Naca5Profile=None,
# CRMProfile=None,
# CRM_Epsilon=0.,
# InterpProfile=None,
# Epsilon=0, Af1=None, Af2=None, Eps1=0, Eps2=1,
# EnforceSharpTE=False):
# def points(self):
# def points(self, newpoints):
# def _make_airfoil(self, SeligProfile, Naca4Profile, Naca5Profile,
# CRMProfile, CRM_Epsilon,
# InterpProfile, Epsilon, Af1, Af2, Eps1, Eps2):
# def _fitAirfoiltoPoints(self):
# def _AirfoilPointsSeligFormat(self, SeligProfile):
# def _NACA4cambercurve(self, MaxCamberLocTenthChord, MaxCamberPercChord):
# def _NACA4halfthickness(self, ChordCoord, MaxThicknessPercChord):
# def _camberplusthickness(self, ChordCoord, zcam, dzcamdx, t):
# def _mergesurfaces(self, xu, zu, xl, zl):
# def _NACA4digitPnts(self, MaxCamberPercChord, MaxCamberLocTenthChord,
# MaxThicknessPercChord):
# def _TransformAirfoil(self):
# def AddAirfoilFromSeligFile(self, SeligProfile, Smoothing=1):
# def AddNACA4(self, Naca4Profile, Smoothing=1):
# def AddCRMLinear(self, CRM_Epsilon, Smoothing=1):
# def AddLinear2(self, Eps, Af1, Af2, Eps1=0, Eps2=1):
# N = np.shape(self._points)[0]
# N = len(data)
# RLE = 1.1019*(MaxThicknessPercChord/100.0)**2.0
#
# Path: airconics/liftingsurface.py
# def airfoilfunct(ProfileFunct):
# def AirfoilFunct(Epsilon, LEPoint, ChordFunct, ChordFactor,
# DihedralFunct, TwistFunct):
# def uniform_parametric_function(epsilon):
# def __init__(self, ApexPoint=gp_Pnt(0, 0, 0),
# SweepFunct=False,
# DihedralFunct=False,
# TwistFunct=False,
# ChordFunct=False,
# AirfoilFunct=False,
# ChordFactor=1,
# ScaleFactor=1,
# OptimizeChordScale=0,
# LooseSurf=1,
# SegmentNo=11,
# TipRequired=False,
# max_degree=8,
# continuity=GeomAbs_C2,
# construct_geometry=True,
# ):
# def ApexPoint(self):
# def ApexPoint(self, newApexPoint):
# def SweepFunct(self):
# def SweepFunct(self, newSweepFunct):
# def DihedralFunct(self):
# def DihedralFunct(self, newDihedralFunct):
# def TwistFunct(self):
# def TwistFunct(self, newTwistFunct):
# def ChordFunct(self):
# def ChordFunct(self, newChordFunct):
# def AirfoilFunct(self):
# def AirfoilFunct(self, newAirfoilFunct):
# def NSegments(self):
# def NSegments(self, newNSegments):
# def ChordFactor(self):
# def ChordFactor(self, newChordFactor):
# def ScaleFactor(self):
# def ScaleFactor(self, newScaleFactor):
# def Sections(self):
# def CreateConstructionGeometry(self):
# def Build(self):
# def GenerateLeadingEdge(self):
# def GenerateSectionCurves(self):
# def GenerateLiftingSurface(self):
# def CalculateProjectedArea(self):
# def CalculateSemiSpan(self):
# def CalculateAspectRatio(self):
# def Fit_BlendedTipDevice(self, rootchord_norm, spanfraction=0.1, cant=40,
# transition=0.1, sweep=40, taper=0.7):
# def AirfoilFunctWinglet(Epsilon):
# class LiftingSurface(AirconicsShape):
# LS = act.AddSurfaceLoft(self._Sections,
# max_degree=self.max_degree,
# continuity=self.Cont,
# solid=False)
# SA = act.CalculateSurfaceArea(LSPsegment)
# BB = self.Extents(as_vec=True)
# AR = ((ActualSemiSpan) ** 2.0) / (LSP_area)
. Output only the next line. | SimpleDihedralFunction, |
Predict the next line after this snippet: <|code_start|> """User-defined function describing the variation of the fin sweep angle as
a function of the leading edge coordinate"""
# Data for SweepAngles is included in the airconics distribution:
# load using pkg_resources
rawdata = resource_string(__name__, 'airliner_fin_sweep.dat')
SweepAngles = np.fromstring(rawdata, sep='\n')
EpsArray = np.linspace(0, 1, np.size(SweepAngles))
return np.interp(Epsilon, EpsArray, SweepAngles)
# Tailplane spaniwse definition functions
def myDihedralFunctionTP(Epsilon):
return 7.6
def myTwistFunctionTP(Epsilon):
return 0
def myChordFunctionTP(Epsilon):
"""User-defined function describing the variation of the tailplane chord as
a function of the leading edge coordinate"""
# Data for Chordlengths is included in the airconics distribution:
# load using pkg_resources
rawdata = resource_string(__name__, 'airliner_TP_chords.dat')
ChordLengths = np.fromstring(rawdata, sep='\n')
EpsArray = np.linspace(0, 1, np.size(ChordLengths))
<|code_end|>
using the current file's imports:
import numpy as np
from airconics import primitives
from pkg_resources import resource_string
and any relevant context from other files:
# Path: airconics/primitives.py
# class Airfoil(object):
# def __init__(self,
# LeadingEdgePoint=[0., 0., 0.],
# ChordLength=1,
# Rotation=0,
# Twist=0,
# SeligProfile=None,
# Naca4Profile=None,
# Naca5Profile=None,
# CRMProfile=None,
# CRM_Epsilon=0.,
# InterpProfile=None,
# Epsilon=0, Af1=None, Af2=None, Eps1=0, Eps2=1,
# EnforceSharpTE=False):
# def points(self):
# def points(self, newpoints):
# def _make_airfoil(self, SeligProfile, Naca4Profile, Naca5Profile,
# CRMProfile, CRM_Epsilon,
# InterpProfile, Epsilon, Af1, Af2, Eps1, Eps2):
# def _fitAirfoiltoPoints(self):
# def _AirfoilPointsSeligFormat(self, SeligProfile):
# def _NACA4cambercurve(self, MaxCamberLocTenthChord, MaxCamberPercChord):
# def _NACA4halfthickness(self, ChordCoord, MaxThicknessPercChord):
# def _camberplusthickness(self, ChordCoord, zcam, dzcamdx, t):
# def _mergesurfaces(self, xu, zu, xl, zl):
# def _NACA4digitPnts(self, MaxCamberPercChord, MaxCamberLocTenthChord,
# MaxThicknessPercChord):
# def _TransformAirfoil(self):
# def AddAirfoilFromSeligFile(self, SeligProfile, Smoothing=1):
# def AddNACA4(self, Naca4Profile, Smoothing=1):
# def AddCRMLinear(self, CRM_Epsilon, Smoothing=1):
# def AddLinear2(self, Eps, Af1, Af2, Eps1=0, Eps2=1):
# N = np.shape(self._points)[0]
# N = len(data)
# RLE = 1.1019*(MaxThicknessPercChord/100.0)**2.0
. Output only the next line. | return np.interp(Epsilon, EpsArray, ChordLengths) |
Given the code snippet: <|code_start|>
# A deliberate copy of GitAdapter's error message:
ERR_MSG = "fatal: Not a hg repository (or any of the parent directories): .hg"
def get_hlib_client_and_path():
try:
client = hglib.open()
repopath = client.root()
return client, repopath
except Exception:
raise NoVCSError(ERR_MSG)
class HgAdapter(VcsAdapter):
def __init__(self):
get_hlib_client_and_path()
def get_modified_notebooks(self):
# initialize the mercurial client:
client, repopath = get_hlib_client_and_path()
# Gather unmerged files:
unmerged = [path for (status, path) in client.resolve(listfiles=True)
if status == 'U']
nb_diff = []
for status, path in client.status(all=True):
# if the file has been modified since last commit:
<|code_end|>
, generate the next line using the imports in this file:
import hglib
import os
import StringIO
from .vcs_adapter import VcsAdapter
from .vcs_adapter import NoVCSError
and context (functions, classes, or occasionally code) from other files:
# Path: nbdiff/adapter/vcs_adapter.py
# class VcsAdapter(object):
#
# def get_modified_notebooks(self):
# raise NotImplementedError("Subclass must implement abstract method")
#
# def filter_modified_notebooks(self, file_hooks):
# modified_notebooks = []
# for item in file_hooks:
# if re.search('.ipynb$', item[2]):
# modified_notebooks.append(item)
#
# return modified_notebooks
#
# def get_unmerged_notebooks(self):
# raise NotImplementedError("Subclass must implement abstract method")
#
# def filter_unmerged_notebooks(self, file_hooks):
# unmerged_notebooks = []
# for item in file_hooks:
# if re.search('.ipynb$', item[3]):
# unmerged_notebooks.append(item)
#
# return unmerged_notebooks
#
# def stage_file(self, file, contents=None):
# raise NotImplementedError("Subclass must implement abstract method")
#
# Path: nbdiff/adapter/vcs_adapter.py
# class NoVCSError(Exception):
# def __init__(self, value):
# self.value = value
#
# def __str__(self):
# return repr(self.value)
. Output only the next line. | if status == 'M' and path not in unmerged: |
Given snippet: <|code_start|> )
output_array = [line.split() for line in output.splitlines()]
if not output_array:
return []
if len(output_array) % 3 != 0: # TODO should be something else
sys.stderr.write(
"Can't find the conflicting notebook. Quitting.\n")
sys.exit(-1)
hash_list = []
for index in xrange(0, len(output_array), 3):
local_hash = output_array[index + 1][1]
base_hash = output_array[index][1]
remote_hash = output_array[index + 2][1]
file_name = output_array[index][3]
hash_list.append((local_hash, base_hash, remote_hash, file_name))
file_hooks = []
git_root_path = subprocess.check_output(
"git rev-parse --show-toplevel".split()
).splitlines()[0]
for hash in hash_list:
local = subprocess.Popen(
['git', 'show', hash[0]],
stdout=subprocess.PIPE
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import subprocess
import os
from .vcs_adapter import VcsAdapter
from .vcs_adapter import NoVCSError
and context:
# Path: nbdiff/adapter/vcs_adapter.py
# class VcsAdapter(object):
#
# def get_modified_notebooks(self):
# raise NotImplementedError("Subclass must implement abstract method")
#
# def filter_modified_notebooks(self, file_hooks):
# modified_notebooks = []
# for item in file_hooks:
# if re.search('.ipynb$', item[2]):
# modified_notebooks.append(item)
#
# return modified_notebooks
#
# def get_unmerged_notebooks(self):
# raise NotImplementedError("Subclass must implement abstract method")
#
# def filter_unmerged_notebooks(self, file_hooks):
# unmerged_notebooks = []
# for item in file_hooks:
# if re.search('.ipynb$', item[3]):
# unmerged_notebooks.append(item)
#
# return unmerged_notebooks
#
# def stage_file(self, file, contents=None):
# raise NotImplementedError("Subclass must implement abstract method")
#
# Path: nbdiff/adapter/vcs_adapter.py
# class NoVCSError(Exception):
# def __init__(self, value):
# self.value = value
#
# def __str__(self):
# return repr(self.value)
which might include code, classes, or functions. Output only the next line. | ) |
Continue the code snippet: <|code_start|> # ignore unmerged files, get unique names
fnames = list(set(fnames) - set(unmerged_array_names))
git_root_path = subprocess.check_output(
"git rev-parse --show-toplevel".split()
).splitlines()[0]
nb_diff = []
for name in fnames:
head_version_show = subprocess.Popen(
['git', 'show', 'HEAD:' + name],
stdout=subprocess.PIPE
)
absolute_file_path = os.path.join(git_root_path, name)
if os.path.exists(absolute_file_path):
current_local_notebook = open(absolute_file_path)
committed_notebook = head_version_show.stdout
nb_diff.append((current_local_notebook,
committed_notebook,
name))
return super(GitAdapter, self).filter_modified_notebooks(nb_diff)
def get_unmerged_notebooks(self):
output = subprocess.check_output(
"git ls-files --unmerged --full-name".split()
)
<|code_end|>
. Use current file imports:
import sys
import subprocess
import os
from .vcs_adapter import VcsAdapter
from .vcs_adapter import NoVCSError
and context (classes, functions, or code) from other files:
# Path: nbdiff/adapter/vcs_adapter.py
# class VcsAdapter(object):
#
# def get_modified_notebooks(self):
# raise NotImplementedError("Subclass must implement abstract method")
#
# def filter_modified_notebooks(self, file_hooks):
# modified_notebooks = []
# for item in file_hooks:
# if re.search('.ipynb$', item[2]):
# modified_notebooks.append(item)
#
# return modified_notebooks
#
# def get_unmerged_notebooks(self):
# raise NotImplementedError("Subclass must implement abstract method")
#
# def filter_unmerged_notebooks(self, file_hooks):
# unmerged_notebooks = []
# for item in file_hooks:
# if re.search('.ipynb$', item[3]):
# unmerged_notebooks.append(item)
#
# return unmerged_notebooks
#
# def stage_file(self, file, contents=None):
# raise NotImplementedError("Subclass must implement abstract method")
#
# Path: nbdiff/adapter/vcs_adapter.py
# class NoVCSError(Exception):
# def __init__(self, value):
# self.value = value
#
# def __str__(self):
# return repr(self.value)
. Output only the next line. | output_array = [line.split() for line in output.splitlines()] |
Given the following code snippet before the placeholder: <|code_start|>
def test_home():
client = app.test_client()
result = client.get('/1')
assert result.status_code == 200
def test_notebookjson():
client = app.test_client()
app.add_notebook({'metadata': {}}, 'foo.ipynb')
result = client.get('/notebooks/test_notebook0')
assert result.status_code == 200
def fake_callback(contents, filename):
fake_callback.called = True
app.shutdown_callback(fake_callback)
contents = ''
<|code_end|>
, predict the next line using imports from the current file:
from nbdiff.server.local_server import (
app,
)
and context including class names, function names, and sometimes code from other files:
# Path: nbdiff/server/local_server.py
# class NbFlask(Flask):
# def shutdown_callback(self, callback):
# def add_notebook(self, nb, fname):
# def nbdiff_static(filename):
# def home(notebookid):
# def notebookjson(notebookid):
# def notebook(notebookid):
# def shutdown():
# def static_url(path, **kwargs):
. Output only the next line. | result = client.put('/notebooks/test_notebook0', contents) |
Given snippet: <|code_start|> file = open(path, 'w')
file.write("modified")
file.close()
self.assertEqual(d.read('first/second/1.ipynb'), 'modified')
os.chdir(os.path.join(d.path, 'first'))
os.remove(path)
adapter = HgAdapter()
result = adapter.get_modified_notebooks()
self.assertTrue(len(result) == 0)
@tempdir()
def test_get_modified_notebooks(self, d):
os.chdir(d.path)
client = hglib.init()
client.open()
d.makedir('first')
d.makedir('first/second')
path = d.write('first/second/1.ipynb', 'initial')
self.assertEqual(d.read('first/second/1.ipynb'), 'initial')
client.add('first/second/1.ipynb')
client.commit("message")
file = open(path, 'w')
file.write("modified")
file.close()
self.assertEqual(d.read('first/second/1.ipynb'), 'modified')
os.chdir(os.path.join(d.path, 'first'))
adapter = HgAdapter()
result = adapter.get_modified_notebooks()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import hglib
import os
import unittest
from nbdiff.adapter.hg_adapter import HgAdapter
from testfixtures import tempdir
and context:
# Path: nbdiff/adapter/hg_adapter.py
# class HgAdapter(VcsAdapter):
#
# def __init__(self):
# get_hlib_client_and_path()
#
# def get_modified_notebooks(self):
# # initialize the mercurial client:
# client, repopath = get_hlib_client_and_path()
# # Gather unmerged files:
# unmerged = [path for (status, path) in client.resolve(listfiles=True)
# if status == 'U']
# nb_diff = []
# for status, path in client.status(all=True):
# # if the file has been modified since last commit:
# if status == 'M' and path not in unmerged:
# # returned by client.status is relative to the repo location
# abspath = os.path.join(repopath, path)
# if os.path.exists(abspath):
# current_local_notebook = open(abspath)
# # Unlike 'git ls-files', client.cat returns the file
# # contents as a plain string. To mantain compatibility
# # with GitAdapter, we have to supply the string as a
# # file-like stream. A StringIO object behaves as a file
# # handle and can be used for this purpose.
# notebook = client.cat([abspath])
# committed_notebook = StringIO.StringIO(notebook)
#
# nb_diff.append((current_local_notebook,
# committed_notebook,
# path))
#
# return super(HgAdapter, self).filter_modified_notebooks(nb_diff)
#
# def get_unmerged_notebooks(self):
# client, repopath = get_hlib_client_and_path()
# # Gather unmerged files:
# unmerged = [path for (status, path) in client.resolve(listfiles=True)
# if status == 'U']
# if not unmerged:
# return []
#
# nb_diff = []
#
# local_remote_hash = client.identify(id=True).split('+')
# local_hash = local_remote_hash[0]
# remote_hash = local_remote_hash[1]
# base_hash = client.log("ancestor('" + local_hash +
# "', '" + remote_hash + "')")[0][1]
#
# for status, path in client.status(all=True):
# if path in unmerged:
# abspath = os.path.join(repopath, path)
# name = os.path.basename(abspath)
#
# local_nb_str = client.cat([name], rev=local_hash)
# remote_nb_str = client.cat([name], rev=remote_hash)
# base_nb_str = client.cat([name], rev=base_hash)
#
# local_notebook = StringIO.StringIO(local_nb_str)
# remote_notebook = StringIO.StringIO(remote_nb_str)
# base = StringIO.StringIO(base_nb_str)
#
# nb_diff.append((local_notebook,
# base,
# remote_notebook,
# abspath))
# return super(HgAdapter, self).filter_unmerged_notebooks(nb_diff)
#
# def stage_file(self, file, contents=None):
# pass
which might include code, classes, or functions. Output only the next line. | self.assertTrue(len(result) == 1) |
Continue the code snippet: <|code_start|> assert isinstance(
rs.get_class(classname).newInstance(),
ducmd.DiffURLCommand
)
def test_process(self):
ducmd.redirect = mock_redirect
ducmd.render_template = mock_render_template
mainurl = "https://raw.githubusercontent.com/"
mainurl = mainurl + "tarmstrong/nbdiff/master/scripts/"
before = mainurl+"example-notebooks/diff/0/before.ipynb"
after = mainurl+"example-notebooks/diff/0/after.ipynb"
session = db.db_session()
request = stub(form={'beforeURL': before, 'afterURL': after})
response = ducmd.DiffURLCommand().process(request, None, session)
assert "/Comparison/" in response
split = str.split(response, "/")
assert split[-1].isdigit()
class MergeCommandTest(unittest.TestCase):
def test_newInstance(self):
classname = "nbdiff.server.command.MergeCommand"
assert isinstance(
rs.get_class(classname).newInstance(),
mcmd.MergeCommand
)
def test_process(self):
<|code_end|>
. Use current file imports:
import os
import unittest
import nbdiff.server.command.AboutUsCommand as aucmd
import nbdiff.server.command.ComparisonCommand as ccmd
import nbdiff.server.command.ContactUsCommand as cucmd
import nbdiff.server.command.DiffCommand as dcmd
import nbdiff.server.command.DiffURLCommand as ducmd
import nbdiff.server.command.FaqCommand as fcmd
import nbdiff.server.command.MergeCommand as mcmd
import nbdiff.server.command.MergeURLCommand as mucmd
import nbdiff.server.command.NotebookRequestCommand as nrcmd
import nbdiff.server.command.ResourceRequestCommand as rrcmd
import nbdiff.server.command.SaveNotebookCommand as sncmd
import nbdiff.server.command.UploadCommand as ucmd
import nbdiff.server.remote_server as rs
import nbdiff.server.database as db
import bitarray
from pretend import stub
from nbdiff.server.database.nbdiffModel import nbdiffModel
and context (classes, functions, or code) from other files:
# Path: nbdiff/server/database/nbdiffModel.py
# class nbdiffModel(Base):
# __tablename__ = 'nbdiffResult'
# id = Column(Integer, primary_key=True)
# notebook = Column('notebook', Binary)
#
# def __init__(self, notebook):
# self.notebook = notebook
#
# def __repr__(self):
# return '<Notebook %r>' % (self.notebook)
. Output only the next line. | mcmd.render_template = mock_render_template |
Continue the code snippet: <|code_start|> elif args.model == 'ssadgm':
X_train_lbl, y_train_lbl, X_train_unl, y_train_unl \
= data.split_semisup(X_train, y_train, n_lbl=args.n_labeled)
model = models.SSADGM(X_labeled=X_train_lbl, y_labeled=y_train_lbl, n_out=n_out,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p)
X_train, y_train = X_train_unl, y_train_unl
else:
raise ValueError('Invalid model')
# train model
model.fit(X_train, y_train, X_val, y_val,
n_epoch=args.epochs, n_batch=args.n_batch,
logname=args.logname)
def plot(args):
curves = []
for f in args.logfiles:
x, y = fig.parselog(f, yi=args.col)
curves.append( (x,y) )
if args.type == 'two':
fig.plot_many(args.out, curves, names=[], double=args.double)
elif args.type == 'many':
fig.plot_many(args.out, curves, args.logfiles, double=args.double)
elif args.type == 'one-vs-many':
main_curve = curves[0]
other_curves = curves[1:]
double_main = True if args.double else False
fig.plot_one_vs_many(args.out, main_curve, other_curves, double_main)
elif args.type == 'many-vs-many':
<|code_end|>
. Use current file imports:
import argparse
import models
import numpy as np
from util import data, fig, launch
and context (classes, functions, or code) from other files:
# Path: util/data.py
# def whiten(X_train, X_valid):
# def load_cifar10():
# def _progress(count, block_size, total_size):
# def load_CIFAR_batch(filename):
# def load_mnist():
# def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
# def load_mnist_images(filename):
# def load_mnist_labels(filename):
# def load_svhn():
# def download(filename, source="https://github.com/smlaine2/tempens/raw/master/data/svhn/"):
# def load_svhn_files(filenames):
# def split_semisup(X, y, n_lbl):
# def load_digits():
# def load_noise(n=100,d=5):
# def load_h5(h5_path):
# def nudge_dataset(X, Y):
# def prepare_dataset(X_train, y_train, X_test, y_test, aug_translation=0, zca=False):
# def whiten_norm(x):
# def __init__(self, regularization=1e-5, x=None):
# def fit(self, x):
# def apply(self, x):
# def invert(self, x):
# DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# X = datadict['data']
# Y = datadict['labels']
# X = X.reshape(10000, 3, 32, 32).astype("float32")
# Y = np.array(Y, dtype=np.uint8)
# X, Y = load_CIFAR_batch(f)
# X = np.asarray(digits.data, 'float32')
# X, Y = nudge_dataset(X, digits.target)
# X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
# X = X.reshape((n,1,d,d))
# Y = np.array(Y, dtype=np.uint8)
# X = np.random.randint(2,size=(n,1,d,d)).astype('float32')
# Y = np.random.randint(2,size=(n,)).astype(np.uint8)
# X = np.array(hf.get('data'))
# Y = np.array(hf.get('label'))
# X = np.concatenate([X] +
# [np.apply_along_axis(shift, 1, X, vector)
# for vector in direction_vectors])
# Y = np.concatenate([Y for _ in range(5)], axis=0)
# U, S, V = np.linalg.svd(sigma)
# class ZCA(object):
#
# Path: util/fig.py
# def parselog(fname, xi=0, yi=2):
# def plot_many(fname, curves, names=None, double=[]):
# def plot_one_vs_many(fname, main_curve, curves, double_main=False):
# def plot_many_vs_many(fname, curves1, curves2, double1=False, double2=False):
# def _flip_and_stretch(curves, idx=[]):
#
# Path: util/launch.py
# def print_grid(args):
. Output only the next line. | curves2 = [] |
Predict the next line for this snippet: <|code_start|> elif args.model == 'resnet':
model = models.Resnet(n_dim=n_dim, n_out=n_out, n_chan=n_channels,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p)
elif args.model == 'vae':
model = models.VAE(n_dim=n_dim, n_out=n_out, n_chan=n_channels, n_batch=args.n_batch,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p,
model='bernoulli' if args.dataset in ('digits', 'mnist')
else 'gaussian')
elif args.model == 'convvae':
model = models.ConvVAE(n_dim=n_dim, n_out=n_out, n_chan=n_channels, n_batch=args.n_batch,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p,
model='bernoulli' if args.dataset in ('digits', 'mnist')
else 'gaussian')
elif args.model == 'convadgm':
model = models.ConvADGM(n_dim=n_dim, n_out=n_out, n_chan=n_channels, n_batch=args.n_batch,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p,
model='bernoulli' if args.dataset in ('digits', 'mnist')
else 'gaussian')
elif args.model == 'sbn':
model = models.SBN(n_dim=n_dim, n_out=n_out, n_chan=n_channels,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p)
elif args.model == 'adgm':
model = models.ADGM(n_dim=n_dim, n_out=n_out, n_chan=n_channels, n_batch=args.n_batch,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p,
model='bernoulli' if args.dataset in ('digits', 'mnist')
else 'gaussian')
elif args.model == 'hdgm':
model = models.HDGM(n_dim=n_dim, n_out=n_out, n_chan=n_channels, n_batch=args.n_batch,
n_superbatch=args.n_superbatch, opt_alg=args.alg, opt_params=p)
elif args.model == 'dadgm':
<|code_end|>
with the help of current file imports:
import argparse
import models
import numpy as np
from util import data, fig, launch
and context from other files:
# Path: util/data.py
# def whiten(X_train, X_valid):
# def load_cifar10():
# def _progress(count, block_size, total_size):
# def load_CIFAR_batch(filename):
# def load_mnist():
# def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
# def load_mnist_images(filename):
# def load_mnist_labels(filename):
# def load_svhn():
# def download(filename, source="https://github.com/smlaine2/tempens/raw/master/data/svhn/"):
# def load_svhn_files(filenames):
# def split_semisup(X, y, n_lbl):
# def load_digits():
# def load_noise(n=100,d=5):
# def load_h5(h5_path):
# def nudge_dataset(X, Y):
# def prepare_dataset(X_train, y_train, X_test, y_test, aug_translation=0, zca=False):
# def whiten_norm(x):
# def __init__(self, regularization=1e-5, x=None):
# def fit(self, x):
# def apply(self, x):
# def invert(self, x):
# DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# X = datadict['data']
# Y = datadict['labels']
# X = X.reshape(10000, 3, 32, 32).astype("float32")
# Y = np.array(Y, dtype=np.uint8)
# X, Y = load_CIFAR_batch(f)
# X = np.asarray(digits.data, 'float32')
# X, Y = nudge_dataset(X, digits.target)
# X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
# X = X.reshape((n,1,d,d))
# Y = np.array(Y, dtype=np.uint8)
# X = np.random.randint(2,size=(n,1,d,d)).astype('float32')
# Y = np.random.randint(2,size=(n,)).astype(np.uint8)
# X = np.array(hf.get('data'))
# Y = np.array(hf.get('label'))
# X = np.concatenate([X] +
# [np.apply_along_axis(shift, 1, X, vector)
# for vector in direction_vectors])
# Y = np.concatenate([Y for _ in range(5)], axis=0)
# U, S, V = np.linalg.svd(sigma)
# class ZCA(object):
#
# Path: util/fig.py
# def parselog(fname, xi=0, yi=2):
# def plot_many(fname, curves, names=None, double=[]):
# def plot_one_vs_many(fname, main_curve, curves, double_main=False):
# def plot_many_vs_many(fname, curves1, curves2, double1=False, double2=False):
# def _flip_and_stretch(curves, idx=[]):
#
# Path: util/launch.py
# def print_grid(args):
, which may contain function names, class names, or code. Output only the next line. | model = models.DADGM(n_dim=n_dim, n_out=n_out, n_chan=n_channels, |
Based on the snippet: <|code_start|> choices=['two', 'many', 'one-vs-many', 'many-vs-many'])
plot_parser.add_argument('--out', required=True)
plot_parser.add_argument('--double', nargs='+', default=[0], type=int)
plot_parser.add_argument('--col', type=int, default=2)
plot_parser.add_argument('--log2', nargs='+')
# grid
grid_parser = subparsers.add_parser('grid',
help='Print command for hyperparameter grid search')
grid_parser.set_defaults(func=grid)
grid_parser.add_argument('--dataset', default='mnist')
grid_parser.add_argument('--model', default='softmax')
grid_parser.add_argument('-e', '--epochs', type=int, default=10)
grid_parser.add_argument('-l', '--logname', default='mnist-run')
grid_parser.add_argument('--alg', nargs='+')
grid_parser.add_argument('--lr', type=float, default=[1e-3], nargs='+')
grid_parser.add_argument('--b1', type=float, default=[0.9], nargs='+')
grid_parser.add_argument('--b2', type=float, default=[0.999], nargs='+')
grid_parser.add_argument('--n_batch', type=int, default=[100], nargs='+')
return parser
# ----------------------------------------------------------------------------
def train(args):
np.random.seed(1234)
if args.dataset == 'digits':
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import models
import numpy as np
from util import data, fig, launch
and context (classes, functions, sometimes code) from other files:
# Path: util/data.py
# def whiten(X_train, X_valid):
# def load_cifar10():
# def _progress(count, block_size, total_size):
# def load_CIFAR_batch(filename):
# def load_mnist():
# def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
# def load_mnist_images(filename):
# def load_mnist_labels(filename):
# def load_svhn():
# def download(filename, source="https://github.com/smlaine2/tempens/raw/master/data/svhn/"):
# def load_svhn_files(filenames):
# def split_semisup(X, y, n_lbl):
# def load_digits():
# def load_noise(n=100,d=5):
# def load_h5(h5_path):
# def nudge_dataset(X, Y):
# def prepare_dataset(X_train, y_train, X_test, y_test, aug_translation=0, zca=False):
# def whiten_norm(x):
# def __init__(self, regularization=1e-5, x=None):
# def fit(self, x):
# def apply(self, x):
# def invert(self, x):
# DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# X = datadict['data']
# Y = datadict['labels']
# X = X.reshape(10000, 3, 32, 32).astype("float32")
# Y = np.array(Y, dtype=np.uint8)
# X, Y = load_CIFAR_batch(f)
# X = np.asarray(digits.data, 'float32')
# X, Y = nudge_dataset(X, digits.target)
# X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
# X = X.reshape((n,1,d,d))
# Y = np.array(Y, dtype=np.uint8)
# X = np.random.randint(2,size=(n,1,d,d)).astype('float32')
# Y = np.random.randint(2,size=(n,)).astype(np.uint8)
# X = np.array(hf.get('data'))
# Y = np.array(hf.get('label'))
# X = np.concatenate([X] +
# [np.apply_along_axis(shift, 1, X, vector)
# for vector in direction_vectors])
# Y = np.concatenate([Y for _ in range(5)], axis=0)
# U, S, V = np.linalg.svd(sigma)
# class ZCA(object):
#
# Path: util/fig.py
# def parselog(fname, xi=0, yi=2):
# def plot_many(fname, curves, names=None, double=[]):
# def plot_one_vs_many(fname, main_curve, curves, double_main=False):
# def plot_many_vs_many(fname, curves1, curves2, double1=False, double2=False):
# def _flip_and_stretch(curves, idx=[]):
#
# Path: util/launch.py
# def print_grid(args):
. Output only the next line. | n_dim, n_out, n_channels = 8, 10, 1 |
Given snippet: <|code_start|>
limit = list(range(10))
stop = list(range(3,7))
target = list(range(10, 20))
sets = itertools.product(limit, stop, target)
sets = list(sets)
N = len(sets)
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
class TestMultiIndexGetter(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
mi = pd.MultiIndex.from_tuples(sets)
df = pd.DataFrame(np.random.randn(N, N), index=ind)
df.columns = mi
df.columns.names = ['limit', 'stop', 'target']
m = MultiIndexGetter(df, attr='columns')
self.df = df
self.m = m
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import TestCase
from trtools.core.columns import MultiIndexGetter, ObjectIndexGetter, LevelWrapper
import itertools
import pandas as pd
import numpy as np
import trtools.util.testing as tm
import nose
and context:
# Path: trtools/core/columns.py
# class MultiIndexGetter(IndexGetter):
# """
# Handles MultiIndex.
# Requires that that the levels be named.
# """
# def sub_column(self, name):
# return self._index.get_level_values(name)
#
# def level_name(self, name):
# """
# Get the .levels value by name
# """
# ind = self._index.names.index(name)
# return self._index.levels[ind]
#
# @property
# def names(self):
# """
# Complete based off of MultiIndex.names
# """
# return [c for c in self._index.names]
#
# class ObjectIndexGetter(IndexGetter):
# """
# Handles an Index of objects and treats the attributes like un-ordered levels.
# """
# def sub_column(self, name):
# return pd.Index([_get_val(col, name) for col in self._index])
#
# def level_name(self, name):
# """
# Note that the only way to get the equivalent of MultiIndex.levels is to get all
# values and then run unique. There should be caching done somewhere here
# """
# vals = self.sub_column(name)
# ind = vals.unique()
# ind.sort()
# ind = pd.Index(ind)
# return ind
#
# @property
# def names(self):
# """
# Try to grab the proper attrs for the Columns
# Best case is that the object has a keys method.
# """
# test = self._index[0]
# try:
# names = list(test.keys())
# return names
# except:
# names = list(test.__dict__.keys())
# return names
#
# class LevelWrapper(object):
# def __init__(self, name, getter):
# self.name = name
# self.getter = getter
#
# def __getitem__(self, key):
# # get level value by .levels which are the distinct monotonic values
# if isinstance(key, int):
# return self.labels[key]
# raise KeyError(key)
#
# @property
# def labels(self):
# """
# Return the acutal labels. Equivalent of MultiIndex.levels[x]
# """
# return self.getter.level_name(self.name)
#
# @property
# def values(self):
# """ Returns the actual values """
# vals = self.getter.sub_column(self.name)
# return vals
#
# def __array__(self):
# # TODO
# # Reset dtypes to proper dtype. Other than the numerics, things like bools
# # come out of here as dtype == object.
# vals = self.values
# # vals is pd.Index
# # pd.Index promotes flaots to objects, we demote to float if it's numeric
# if vals.is_numeric() and not isinstance(vals, pd.Int64Index):
# vals = vals.values.astype(float)
# else:
# # always return an np.ndarray and not index
# vals = vals.values
# return vals
#
# #----------------------------------------------------------------------
# # Arithmetic operators
#
# __add__ = _sub_method(operator.add, '__add__')
# __sub__ = _sub_method(operator.sub, '__sub__')
# __mul__ = _sub_method(operator.mul, '__mul__')
# __truediv__ = _sub_method(operator.truediv, '__truediv__')
# __floordiv__ = _sub_method(operator.floordiv, '__floordiv__')
# __pow__ = _sub_method(operator.pow, '__pow__')
#
# #__radd__ = _sub_method(_radd_compat, '__add__')
# __rmul__ = _sub_method(operator.mul, '__mul__')
# __rsub__ = _sub_method(lambda x, y: y - x, '__sub__')
# __rtruediv__ = _sub_method(lambda x, y: y / x, '__truediv__')
# __rfloordiv__ = _sub_method(lambda x, y: y // x, '__floordiv__')
# __rpow__ = _sub_method(lambda x, y: y ** x, '__pow__')
#
# # comparisons
# __gt__ = _sub_method(operator.gt, '__gt__')
# __ge__ = _sub_method(operator.ge, '__ge__')
# __lt__ = _sub_method(operator.lt, '__lt__')
# __le__ = _sub_method(operator.le, '__le__')
#
# def __eq__(self, other):
# vals = np.array(self)
# try:
# return np.isclose(vals, other)
# except TypeError:
# # likely here because vals/other is not numeric
# return operator.eq(vals, other)
#
# __ne__ = _sub_method(operator.ne, '__ne__')
#
# # Python 2 division operators
# if not compat.PY3:
# __div__ = _sub_method(operator.div, '__div__')
# __rdiv__ = _sub_method(lambda x, y: y / x, '__div__')
# __idiv__ = __div__
which might include code, classes, or functions. Output only the next line. | def test_multiindexgetter(self): |
Here is a snippet: <|code_start|>
limit = list(range(10))
stop = list(range(3,7))
target = list(range(10, 20))
sets = itertools.product(limit, stop, target)
sets = list(sets)
N = len(sets)
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
class TestMultiIndexGetter(TestCase):
<|code_end|>
. Write the next line using the current file imports:
from unittest import TestCase
from trtools.core.columns import MultiIndexGetter, ObjectIndexGetter, LevelWrapper
import itertools
import pandas as pd
import numpy as np
import trtools.util.testing as tm
import nose
and context from other files:
# Path: trtools/core/columns.py
# class MultiIndexGetter(IndexGetter):
# """
# Handles MultiIndex.
# Requires that that the levels be named.
# """
# def sub_column(self, name):
# return self._index.get_level_values(name)
#
# def level_name(self, name):
# """
# Get the .levels value by name
# """
# ind = self._index.names.index(name)
# return self._index.levels[ind]
#
# @property
# def names(self):
# """
# Complete based off of MultiIndex.names
# """
# return [c for c in self._index.names]
#
# class ObjectIndexGetter(IndexGetter):
# """
# Handles an Index of objects and treats the attributes like un-ordered levels.
# """
# def sub_column(self, name):
# return pd.Index([_get_val(col, name) for col in self._index])
#
# def level_name(self, name):
# """
# Note that the only way to get the equivalent of MultiIndex.levels is to get all
# values and then run unique. There should be caching done somewhere here
# """
# vals = self.sub_column(name)
# ind = vals.unique()
# ind.sort()
# ind = pd.Index(ind)
# return ind
#
# @property
# def names(self):
# """
# Try to grab the proper attrs for the Columns
# Best case is that the object has a keys method.
# """
# test = self._index[0]
# try:
# names = list(test.keys())
# return names
# except:
# names = list(test.__dict__.keys())
# return names
#
# class LevelWrapper(object):
# def __init__(self, name, getter):
# self.name = name
# self.getter = getter
#
# def __getitem__(self, key):
# # get level value by .levels which are the distinct monotonic values
# if isinstance(key, int):
# return self.labels[key]
# raise KeyError(key)
#
# @property
# def labels(self):
# """
# Return the acutal labels. Equivalent of MultiIndex.levels[x]
# """
# return self.getter.level_name(self.name)
#
# @property
# def values(self):
# """ Returns the actual values """
# vals = self.getter.sub_column(self.name)
# return vals
#
# def __array__(self):
# # TODO
# # Reset dtypes to proper dtype. Other than the numerics, things like bools
# # come out of here as dtype == object.
# vals = self.values
# # vals is pd.Index
# # pd.Index promotes flaots to objects, we demote to float if it's numeric
# if vals.is_numeric() and not isinstance(vals, pd.Int64Index):
# vals = vals.values.astype(float)
# else:
# # always return an np.ndarray and not index
# vals = vals.values
# return vals
#
# #----------------------------------------------------------------------
# # Arithmetic operators
#
# __add__ = _sub_method(operator.add, '__add__')
# __sub__ = _sub_method(operator.sub, '__sub__')
# __mul__ = _sub_method(operator.mul, '__mul__')
# __truediv__ = _sub_method(operator.truediv, '__truediv__')
# __floordiv__ = _sub_method(operator.floordiv, '__floordiv__')
# __pow__ = _sub_method(operator.pow, '__pow__')
#
# #__radd__ = _sub_method(_radd_compat, '__add__')
# __rmul__ = _sub_method(operator.mul, '__mul__')
# __rsub__ = _sub_method(lambda x, y: y - x, '__sub__')
# __rtruediv__ = _sub_method(lambda x, y: y / x, '__truediv__')
# __rfloordiv__ = _sub_method(lambda x, y: y // x, '__floordiv__')
# __rpow__ = _sub_method(lambda x, y: y ** x, '__pow__')
#
# # comparisons
# __gt__ = _sub_method(operator.gt, '__gt__')
# __ge__ = _sub_method(operator.ge, '__ge__')
# __lt__ = _sub_method(operator.lt, '__lt__')
# __le__ = _sub_method(operator.le, '__le__')
#
# def __eq__(self, other):
# vals = np.array(self)
# try:
# return np.isclose(vals, other)
# except TypeError:
# # likely here because vals/other is not numeric
# return operator.eq(vals, other)
#
# __ne__ = _sub_method(operator.ne, '__ne__')
#
# # Python 2 division operators
# if not compat.PY3:
# __div__ = _sub_method(operator.div, '__div__')
# __rdiv__ = _sub_method(lambda x, y: y / x, '__div__')
# __idiv__ = __div__
, which may include functions, classes, or code. Output only the next line. | def __init__(self, *args, **kwargs): |
Based on the snippet: <|code_start|>
td = TemporaryDirectory()
class TestClass(object):
def __init__(self):
self.count = 0
@cacher(td.name + '/test/test_meth', method=True)
def test_meth(self):
self.count += 1
return self.count
def test_meth2(self):
self.count += 1
return self.count
class TestCaching(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
<|code_end|>
, predict the immediate next line with the help of imports:
import os.path
import nose
from unittest import TestCase
from trtools.io.cacher import cacher
from trtools.util.tempdir import TemporaryDirectory
and context (classes, functions, sometimes code) from other files:
# Path: trtools/io/cacher.py
# class cacher(object):
# '''Decorator. Caches a function's return value each time it is called.
# If called later with the same arguments, the cached value is returned
# (not reevaluated).
# '''
# def __init__(self, filepath=None, method=False):
# self.func = None
# self.filepath = filepath
# self.method = method
# self.cache = {}
# self.first_run = False
#
# def check_dirs(self):
# dir, _ = os.path.split(self.filepath)
# if not os.path.exists(dir):
# print(("Making dirs: %s" % dir))
# os.makedirs(dir)
#
# def __call__(self, func):
# def wrapper(*args, **kwargs):
# if self.filepath and not self.first_run:
# self.check_dirs()
# self.load()
# self.first_run = True
#
# if not isinstance(args, collections.Hashable):
# print('not cacheable')
# # uncacheable. a list, for instance.
# # better to not cache than blow up.
# return func(*args)
#
# if self.method:
# key = hash(args[1:])
# else:
# key = hash(args)
#
# if key in self.cache:
# print('return from cache')
# return self.cache[key]
# else:
# value = func(*args)
# self.cache[key] = value
# if self.filepath:
# self.save()
# return value
# wrapper.cacher = self
# self.func = func
# return wrapper
#
# def save(self):
# with open(self.filepath, 'wb') as f:
# pickle.dump(self.cache, f)
#
# def load(self):
# try:
# print(('Loading %s' % self.filepath))
# with open(self.filepath) as f:
# self.cache = pickle.load(f)
# except IOError:
# print('Cache file does not exist')
#
# def __repr__(self):
# '''Return the function's docstring.'''
# return self.func.__doc__
#
# def __get__(self, obj, objtype):
# '''Support instance methods.'''
# return functools.partial(self.__call__, obj)
#
# def clear(self):
# self.cache.clear()
# if os.path.isfile(self.filepath):
# os.remove(self.filepath)
. Output only the next line. | pass |
Predict the next line after this snippet: <|code_start|># !/usr/bin/env python3
# -*- coding: utf-8 -*-
def main():
t1 = TextField()
t1.x = t1.y = 50
t1.size = 30
t1.text = "Hello World!"
addChild(t1)
t2 = TextField()
t2.x = 50
t2.y = 150
<|code_end|>
using the current file's imports:
from pylash.core import init, addChild
from pylash.display import TextField, TextFormatWeight
and any relevant context from other files:
# Path: pylash/core.py
# def init(speed, title, width, height, callback):
# '''
# The initialization function of `pylash`. This function will create a game window, set its
# size to \\(width \\times height\\) and window title to `title`. The game window will repaint
# per `speed` milliseconds. After the setup of the game window, `callback` will be
# invoked, which is the entrance function of your game.
#
# Parameters
# ----------
# speed : float
# The window repainting rate. Generally, it is supposed to be \\(1000 / FPS\\).
# title : str
# The window title.
# width : int
# The window's width.
# height : int
# The window's height.
# callback : funtion
# The callback function invoked after the setup of game window.
#
# Example
# -------
# ```
# def main():
# print("Hello, world!")
#
# init(1000 / 60, "Init Test", 100, 100, main)
# ```
# '''
#
# stage.app = QtWidgets.QApplication(sys.argv)
#
# stage._setCanvas(speed, title, width, height)
#
# if not hasattr(callback, "__call__"):
# raise TypeError("init(speed, title, width, height, callback): parameter 'callback' must be a function.")
#
# callback()
#
# sys.exit(stage.app.exec_())
#
# def addChild(self, child):
# '''
# Appends `child` to the `stage`'s display list, then the `child` object will be rendered
# on the game window.
#
# Parameters
# ----------
# child : pylash.display.DisplayObject
# The display object to be added to the stage.
# '''
#
# if child is not None:
# child.parent = self
#
# self.childList.append(child)
# else:
# raise TypeError("Stage.addChild(child): parameter 'child' must be a display object.")
#
# Path: pylash/display.py
# class TextField(DisplayObject):
# def __init__(self):
# super(TextField, self).__init__()
#
# self.text = ""
# self.font = "Arial"
# self.size = 15
# self.textColor = "#000000"
# self.backgroundColor = None
# self.italic = False
# self.weight = TextFormatWeight.NORMAL
# self.textAlign = TextFormatAlign.LEFT
# self.textBaseline = TextFormatBaseline.TOP
#
# def _getOriginalWidth(self):
# font = self.__getFont()
# fontMetrics = QtGui.QFontMetrics(font)
#
# return fontMetrics.width(str(self.text))
#
# def _getOriginalHeight(self):
# font = self.__getFont()
# fontMetrics = QtGui.QFontMetrics(font)
#
# return fontMetrics.height()
#
# def __getFont(self):
# weight = self.weight
#
# if self.weight == TextFormatWeight.NORMAL:
# weight = QtGui.QFont.Normal
# elif self.weight == TextFormatWeight.BOLD:
# weight = QtGui.QFont.Bold
# elif self.weight == TextFormatWeight.BOLDER:
# weight = QtGui.QFont.Black
# elif self.weight == TextFormatWeight.LIGHTER:
# weight = QtGui.QFont.Light
#
# font = QtGui.QFont()
# font.setFamily(self.font)
# font.setPixelSize(self.size)
# font.setWeight(weight)
# font.setItalic(self.italic)
#
# return font
#
# def __getTextStartX(self):
# w = self._getOriginalWidth()
#
# if self.textAlign == TextFormatAlign.END or self.textAlign == TextFormatAlign.RIGHT:
# return -w
# elif self.textAlign == TextFormatAlign.CENTER:
# return -w / 2
# else:
# return 0
#
# def __getTextStartY(self):
# h = self._getOriginalHeight()
#
# if self.textBaseline == TextFormatBaseline.ALPHABETIC or self.textBaseline == TextFormatBaseline.MIDDLE:
# return -h
# elif self.textBaseline == TextFormatBaseline.MIDDLE:
# return -h / 2
# else:
# return 0
#
# def _loopDraw(self, c):
# font = self.__getFont()
# flags = QtCore.Qt.AlignCenter
# startX = self.__getTextStartX()
# startY = self.__getTextStartY()
# width = self._getOriginalWidth()
# height = self._getOriginalHeight()
#
# pen = QtGui.QPen()
# pen.setBrush(QtGui.QBrush(getColor(self.textColor)))
#
# if self.backgroundColor:
# brush = QtGui.QBrush()
# brush.setColor(getColor(self.backgroundColor))
# brush.setStyle(QtCore.Qt.SolidPattern)
# c.setBrush(brush)
#
# c.setPen(getColor("transparent"))
#
# c.drawRect(startX, startY, width, height)
#
# c.setFont(font)
# c.setPen(pen)
# c.drawText(startX, startY, width, height, flags, str(self.text))
#
# class TextFormatWeight(object):
# NORMAL = "normal"
# BOLD = "bold"
# BOLDER = "bolder"
# LIGHTER = "lighter"
#
# def __init__(self):
# raise Exception("TextFormatWeight cannot be instantiated.")
. Output only the next line. | t2.text = "Hello Pylash~" |
Given the code snippet: <|code_start|># !/usr/bin/env python3
# -*- coding: utf-8 -*-
def main():
t1 = TextField()
t1.x = t1.y = 50
t1.size = 30
t1.text = "Hello World!"
addChild(t1)
t2 = TextField()
t2.x = 50
t2.y = 150
t2.text = "Hello Pylash~"
t2.rotation = 30
t2.size = 50
<|code_end|>
, generate the next line using the imports in this file:
from pylash.core import init, addChild
from pylash.display import TextField, TextFormatWeight
and context (functions, classes, or occasionally code) from other files:
# Path: pylash/core.py
# def init(speed, title, width, height, callback):
# '''
# The initialization function of `pylash`. This function will create a game window, set its
# size to \\(width \\times height\\) and window title to `title`. The game window will repaint
# per `speed` milliseconds. After the setup of the game window, `callback` will be
# invoked, which is the entrance function of your game.
#
# Parameters
# ----------
# speed : float
# The window repainting rate. Generally, it is supposed to be \\(1000 / FPS\\).
# title : str
# The window title.
# width : int
# The window's width.
# height : int
# The window's height.
# callback : funtion
# The callback function invoked after the setup of game window.
#
# Example
# -------
# ```
# def main():
# print("Hello, world!")
#
# init(1000 / 60, "Init Test", 100, 100, main)
# ```
# '''
#
# stage.app = QtWidgets.QApplication(sys.argv)
#
# stage._setCanvas(speed, title, width, height)
#
# if not hasattr(callback, "__call__"):
# raise TypeError("init(speed, title, width, height, callback): parameter 'callback' must be a function.")
#
# callback()
#
# sys.exit(stage.app.exec_())
#
# def addChild(self, child):
# '''
# Appends `child` to the `stage`'s display list, then the `child` object will be rendered
# on the game window.
#
# Parameters
# ----------
# child : pylash.display.DisplayObject
# The display object to be added to the stage.
# '''
#
# if child is not None:
# child.parent = self
#
# self.childList.append(child)
# else:
# raise TypeError("Stage.addChild(child): parameter 'child' must be a display object.")
#
# Path: pylash/display.py
# class TextField(DisplayObject):
# def __init__(self):
# super(TextField, self).__init__()
#
# self.text = ""
# self.font = "Arial"
# self.size = 15
# self.textColor = "#000000"
# self.backgroundColor = None
# self.italic = False
# self.weight = TextFormatWeight.NORMAL
# self.textAlign = TextFormatAlign.LEFT
# self.textBaseline = TextFormatBaseline.TOP
#
# def _getOriginalWidth(self):
# font = self.__getFont()
# fontMetrics = QtGui.QFontMetrics(font)
#
# return fontMetrics.width(str(self.text))
#
# def _getOriginalHeight(self):
# font = self.__getFont()
# fontMetrics = QtGui.QFontMetrics(font)
#
# return fontMetrics.height()
#
# def __getFont(self):
# weight = self.weight
#
# if self.weight == TextFormatWeight.NORMAL:
# weight = QtGui.QFont.Normal
# elif self.weight == TextFormatWeight.BOLD:
# weight = QtGui.QFont.Bold
# elif self.weight == TextFormatWeight.BOLDER:
# weight = QtGui.QFont.Black
# elif self.weight == TextFormatWeight.LIGHTER:
# weight = QtGui.QFont.Light
#
# font = QtGui.QFont()
# font.setFamily(self.font)
# font.setPixelSize(self.size)
# font.setWeight(weight)
# font.setItalic(self.italic)
#
# return font
#
# def __getTextStartX(self):
# w = self._getOriginalWidth()
#
# if self.textAlign == TextFormatAlign.END or self.textAlign == TextFormatAlign.RIGHT:
# return -w
# elif self.textAlign == TextFormatAlign.CENTER:
# return -w / 2
# else:
# return 0
#
# def __getTextStartY(self):
# h = self._getOriginalHeight()
#
# if self.textBaseline == TextFormatBaseline.ALPHABETIC or self.textBaseline == TextFormatBaseline.MIDDLE:
# return -h
# elif self.textBaseline == TextFormatBaseline.MIDDLE:
# return -h / 2
# else:
# return 0
#
# def _loopDraw(self, c):
# font = self.__getFont()
# flags = QtCore.Qt.AlignCenter
# startX = self.__getTextStartX()
# startY = self.__getTextStartY()
# width = self._getOriginalWidth()
# height = self._getOriginalHeight()
#
# pen = QtGui.QPen()
# pen.setBrush(QtGui.QBrush(getColor(self.textColor)))
#
# if self.backgroundColor:
# brush = QtGui.QBrush()
# brush.setColor(getColor(self.backgroundColor))
# brush.setStyle(QtCore.Qt.SolidPattern)
# c.setBrush(brush)
#
# c.setPen(getColor("transparent"))
#
# c.drawRect(startX, startY, width, height)
#
# c.setFont(font)
# c.setPen(pen)
# c.drawText(startX, startY, width, height, flags, str(self.text))
#
# class TextFormatWeight(object):
# NORMAL = "normal"
# BOLD = "bold"
# BOLDER = "bolder"
# LIGHTER = "lighter"
#
# def __init__(self):
# raise Exception("TextFormatWeight cannot be instantiated.")
. Output only the next line. | t2.textColor = "#FF4500" |
Predict the next line for this snippet: <|code_start|> self._onComplete = None
def load(self, loadList, onUpdate = None, onComplete = None):
self._loadNum = len(loadList)
self._onUpdate = onUpdate
self._onComplete = onComplete
for o in loadList:
path = None
fileType = None
if "path" in o:
path = o["path"]
else:
raise ValueError("LoadManage.load(loadList, onUpdate = None, onComplete = None): parameter 'loadList' has a wrong item which dosen't have a key named 'path'.")
if not "type" in o:
extension = getExtension(path)
if extension in LoadManage.IMAGE_EXTENSION:
fileType = "image"
elif extension in LoadManage.MEDIA_EXTENSION:
fileType = "media"
else:
fileType = o["type"]
if not path:
continue
if fileType == "image":
<|code_end|>
with the help of current file imports:
import threading, time, socket
from PySide2 import QtCore, QtGui, QtMultimedia
from .core import Object, stage
from .events import Event, EventDispatcher
and context from other files:
# Path: pylash/core.py
# class Object(object):
# class CanvasWidget(QtWidgets.QWidget):
# class Stage(Object):
# class KeyCode(object):
# class UnityOfDictAndClass(object):
# def __init__(self):
# def _nonCopyableAttrs(self):
# def copyFrom(self, source):
# def __init__(self):
# def paintEvent(self, event):
# def mousePressEvent(self, event):
# def mouseMoveEvent(self, event):
# def mouseReleaseEvent(self, event):
# def mouseDoubleClickEvent(self, event):
# def keyPressEvent(self, event):
# def keyReleaseEvent(self, event):
# def __enterKeyboardEvent(self, event, eventType):
# def __enterMouseEvent(self, event, eventType):
# def __init__(self):
# def copyFrom(self, source):
# def _setCanvas(self, speed, title, width, height):
# def _onShow(self):
# def _showDisplayList(self, childList):
# def _enterMouseEvent(self, event, cd):
# def setFrameRate(self, speed):
# def addChild(self, child):
# def removeChild(self, child):
# def addEventListener(self, e, listener):
# def removeEventListener(self, e, listener = None):
# def __init__(self):
# def __init__(self):
# def set(obj, key, value):
# def get(obj, key):
# def has(obj, key):
# def init(speed, title, width, height, callback):
# def addChild(child):
# def removeChild(child):
# def getColor(color):
# def removeItemsInList(theList, condition):
# PARENT = "stage_parent_root"
#
# Path: pylash/events.py
# class Event(Object):
# def __init__(self, e):
# super(Event, self).__init__()
#
# if isinstance(e, Event):
# self.copyFrom(e)
# elif isinstance(e, str):
# self.eventType = e
# self.currentTarget = None
# self.target = None
# else:
# raise TypeError("Event.__init__(e): parameter 'e' is either a str or an Event object.")
#
# class EventDispatcher(Object):
# def __init__(self):
# super(EventDispatcher, self).__init__()
#
# self._eventList = []
#
# def __isEventTypeEqual(self, e1, e2):
# e1 = Event(e1)
# e2 = Event(e2)
# return e1.eventType == e2.eventType
#
# def _addEventListenerInList(self, e, listener, eventList):
# eventList.append({
# "eventType" : e,
# "listener" : listener
# })
#
# def _removeEventListenerInList(self, e, listener, eventList):
# def condition(o):
# t = o["eventType"]
# l = o["listener"]
#
# return (l == listener and self.__isEventTypeEqual(e, t))
#
# removeItemsInList(eventList, condition)
#
# def _dispatchEventInList(self, e, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if self.__isEventTypeEqual(e, t):
# e = Event(e)
#
# if e.currentTarget is None:
# e.currentTarget = self
# if e.target is None:
# e.target = self
#
# l(e)
#
# def _hasEventListenerInList(self, e, listener, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if l == listener:
# if self.__isEventTypeEqual(e, t):
# return True
# else:
# return False
#
# return False
#
# def addEventListener(self, e, listener):
# self._addEventListenerInList(e, listener, self._eventList)
#
# def removeEventListener(self, e, listener):
# self._removeEventListenerInList(e, listener, self._eventList)
#
# def removeAllEventListeners(self):
# self._eventList = []
#
# def dispatchEvent(self, e):
# self._dispatchEventInList(e, self._eventList)
#
# def hasEventListener(self, e, listener):
# return self._hasEventListenerInList(e, listener, self._eventList)
, which may contain function names, class names, or code. Output only the next line. | loader = ImageLoader() |
Predict the next line for this snippet: <|code_start|>
e = Event(LoaderEvent.COMPLETE)
e.target = self.content
self.dispatchEvent(e)
def _onError(self, err):
e = Event(LoaderEvent.ERROR)
e.target = Exception("MediaLoader: cannot load file in the given path (%s)." % self.content.errorString())
self.dispatchEvent(e)
def load(self, path):
fullpath = QtCore.QDir.current().absoluteFilePath(path)
mediaContent = QtMultimedia.QMediaContent(QtCore.QUrl.fromLocalFile(fullpath))
self.content = QtMultimedia.QMediaPlayer(stage.canvasWidget)
self.content._mediaStatusChangedSlot = lambda status: self._onMediaStatusChanged(status)
self.content.mediaStatusChanged.connect(self.content._mediaStatusChangedSlot)
self.content._errorSlot = lambda err: self_onError(err)
self.content.error.connect(self.content._errorSlot)
self.content.setMedia(mediaContent)
self.content.stop()
class LoadManageWorker(Object):
def __init__(self):
super(LoadManageWorker, self).__init__()
self._resultList = {}
<|code_end|>
with the help of current file imports:
import threading, time, socket
from PySide2 import QtCore, QtGui, QtMultimedia
from .core import Object, stage
from .events import Event, EventDispatcher
and context from other files:
# Path: pylash/core.py
# class Object(object):
# class CanvasWidget(QtWidgets.QWidget):
# class Stage(Object):
# class KeyCode(object):
# class UnityOfDictAndClass(object):
# def __init__(self):
# def _nonCopyableAttrs(self):
# def copyFrom(self, source):
# def __init__(self):
# def paintEvent(self, event):
# def mousePressEvent(self, event):
# def mouseMoveEvent(self, event):
# def mouseReleaseEvent(self, event):
# def mouseDoubleClickEvent(self, event):
# def keyPressEvent(self, event):
# def keyReleaseEvent(self, event):
# def __enterKeyboardEvent(self, event, eventType):
# def __enterMouseEvent(self, event, eventType):
# def __init__(self):
# def copyFrom(self, source):
# def _setCanvas(self, speed, title, width, height):
# def _onShow(self):
# def _showDisplayList(self, childList):
# def _enterMouseEvent(self, event, cd):
# def setFrameRate(self, speed):
# def addChild(self, child):
# def removeChild(self, child):
# def addEventListener(self, e, listener):
# def removeEventListener(self, e, listener = None):
# def __init__(self):
# def __init__(self):
# def set(obj, key, value):
# def get(obj, key):
# def has(obj, key):
# def init(speed, title, width, height, callback):
# def addChild(child):
# def removeChild(child):
# def getColor(color):
# def removeItemsInList(theList, condition):
# PARENT = "stage_parent_root"
#
# Path: pylash/events.py
# class Event(Object):
# def __init__(self, e):
# super(Event, self).__init__()
#
# if isinstance(e, Event):
# self.copyFrom(e)
# elif isinstance(e, str):
# self.eventType = e
# self.currentTarget = None
# self.target = None
# else:
# raise TypeError("Event.__init__(e): parameter 'e' is either a str or an Event object.")
#
# class EventDispatcher(Object):
# def __init__(self):
# super(EventDispatcher, self).__init__()
#
# self._eventList = []
#
# def __isEventTypeEqual(self, e1, e2):
# e1 = Event(e1)
# e2 = Event(e2)
# return e1.eventType == e2.eventType
#
# def _addEventListenerInList(self, e, listener, eventList):
# eventList.append({
# "eventType" : e,
# "listener" : listener
# })
#
# def _removeEventListenerInList(self, e, listener, eventList):
# def condition(o):
# t = o["eventType"]
# l = o["listener"]
#
# return (l == listener and self.__isEventTypeEqual(e, t))
#
# removeItemsInList(eventList, condition)
#
# def _dispatchEventInList(self, e, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if self.__isEventTypeEqual(e, t):
# e = Event(e)
#
# if e.currentTarget is None:
# e.currentTarget = self
# if e.target is None:
# e.target = self
#
# l(e)
#
# def _hasEventListenerInList(self, e, listener, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if l == listener:
# if self.__isEventTypeEqual(e, t):
# return True
# else:
# return False
#
# return False
#
# def addEventListener(self, e, listener):
# self._addEventListenerInList(e, listener, self._eventList)
#
# def removeEventListener(self, e, listener):
# self._removeEventListenerInList(e, listener, self._eventList)
#
# def removeAllEventListeners(self):
# self._eventList = []
#
# def dispatchEvent(self, e):
# self._dispatchEventInList(e, self._eventList)
#
# def hasEventListener(self, e, listener):
# return self._hasEventListenerInList(e, listener, self._eventList)
, which may contain function names, class names, or code. Output only the next line. | self._loadIndex = 0 |
Predict the next line for this snippet: <|code_start|>
class LoaderEvent(object):
COMPLETE = Event("loader_complete")
ERROR = Event("loader_error")
def __init__(self):
raise Exception("LoaderEvent cannot be instantiated.")
class Loader(EventDispatcher):
def __init__(self):
super(Loader, self).__init__()
self.content = None
self.resourceName = None
def load(self, path):
pass
class ImageLoader(Loader):
class _ImageLoaderWorker(QtCore.QObject):
resultReady = QtCore.Signal(QtGui.QImage)
def __init__(self, path):
super(ImageLoader._ImageLoaderWorker, self).__init__()
self.path = path
<|code_end|>
with the help of current file imports:
import threading, time, socket
from PySide2 import QtCore, QtGui, QtMultimedia
from .core import Object, stage
from .events import Event, EventDispatcher
and context from other files:
# Path: pylash/core.py
# class Object(object):
# class CanvasWidget(QtWidgets.QWidget):
# class Stage(Object):
# class KeyCode(object):
# class UnityOfDictAndClass(object):
# def __init__(self):
# def _nonCopyableAttrs(self):
# def copyFrom(self, source):
# def __init__(self):
# def paintEvent(self, event):
# def mousePressEvent(self, event):
# def mouseMoveEvent(self, event):
# def mouseReleaseEvent(self, event):
# def mouseDoubleClickEvent(self, event):
# def keyPressEvent(self, event):
# def keyReleaseEvent(self, event):
# def __enterKeyboardEvent(self, event, eventType):
# def __enterMouseEvent(self, event, eventType):
# def __init__(self):
# def copyFrom(self, source):
# def _setCanvas(self, speed, title, width, height):
# def _onShow(self):
# def _showDisplayList(self, childList):
# def _enterMouseEvent(self, event, cd):
# def setFrameRate(self, speed):
# def addChild(self, child):
# def removeChild(self, child):
# def addEventListener(self, e, listener):
# def removeEventListener(self, e, listener = None):
# def __init__(self):
# def __init__(self):
# def set(obj, key, value):
# def get(obj, key):
# def has(obj, key):
# def init(speed, title, width, height, callback):
# def addChild(child):
# def removeChild(child):
# def getColor(color):
# def removeItemsInList(theList, condition):
# PARENT = "stage_parent_root"
#
# Path: pylash/events.py
# class Event(Object):
# def __init__(self, e):
# super(Event, self).__init__()
#
# if isinstance(e, Event):
# self.copyFrom(e)
# elif isinstance(e, str):
# self.eventType = e
# self.currentTarget = None
# self.target = None
# else:
# raise TypeError("Event.__init__(e): parameter 'e' is either a str or an Event object.")
#
# class EventDispatcher(Object):
# def __init__(self):
# super(EventDispatcher, self).__init__()
#
# self._eventList = []
#
# def __isEventTypeEqual(self, e1, e2):
# e1 = Event(e1)
# e2 = Event(e2)
# return e1.eventType == e2.eventType
#
# def _addEventListenerInList(self, e, listener, eventList):
# eventList.append({
# "eventType" : e,
# "listener" : listener
# })
#
# def _removeEventListenerInList(self, e, listener, eventList):
# def condition(o):
# t = o["eventType"]
# l = o["listener"]
#
# return (l == listener and self.__isEventTypeEqual(e, t))
#
# removeItemsInList(eventList, condition)
#
# def _dispatchEventInList(self, e, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if self.__isEventTypeEqual(e, t):
# e = Event(e)
#
# if e.currentTarget is None:
# e.currentTarget = self
# if e.target is None:
# e.target = self
#
# l(e)
#
# def _hasEventListenerInList(self, e, listener, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if l == listener:
# if self.__isEventTypeEqual(e, t):
# return True
# else:
# return False
#
# return False
#
# def addEventListener(self, e, listener):
# self._addEventListenerInList(e, listener, self._eventList)
#
# def removeEventListener(self, e, listener):
# self._removeEventListenerInList(e, listener, self._eventList)
#
# def removeAllEventListeners(self):
# self._eventList = []
#
# def dispatchEvent(self, e):
# self._dispatchEventInList(e, self._eventList)
#
# def hasEventListener(self, e, listener):
# return self._hasEventListenerInList(e, listener, self._eventList)
, which may contain function names, class names, or code. Output only the next line. | def doLoad(self): |
Given snippet: <|code_start|> e.target = self.content
self.dispatchEvent(e)
def _onError(self, err):
e = Event(LoaderEvent.ERROR)
e.target = Exception("MediaLoader: cannot load file in the given path (%s)." % self.content.errorString())
self.dispatchEvent(e)
def load(self, path):
fullpath = QtCore.QDir.current().absoluteFilePath(path)
mediaContent = QtMultimedia.QMediaContent(QtCore.QUrl.fromLocalFile(fullpath))
self.content = QtMultimedia.QMediaPlayer(stage.canvasWidget)
self.content._mediaStatusChangedSlot = lambda status: self._onMediaStatusChanged(status)
self.content.mediaStatusChanged.connect(self.content._mediaStatusChangedSlot)
self.content._errorSlot = lambda err: self_onError(err)
self.content.error.connect(self.content._errorSlot)
self.content.setMedia(mediaContent)
self.content.stop()
class LoadManageWorker(Object):
def __init__(self):
super(LoadManageWorker, self).__init__()
self._resultList = {}
self._loadIndex = 0
self._loadNum = 0
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import threading, time, socket
from PySide2 import QtCore, QtGui, QtMultimedia
from .core import Object, stage
from .events import Event, EventDispatcher
and context:
# Path: pylash/core.py
# class Object(object):
# class CanvasWidget(QtWidgets.QWidget):
# class Stage(Object):
# class KeyCode(object):
# class UnityOfDictAndClass(object):
# def __init__(self):
# def _nonCopyableAttrs(self):
# def copyFrom(self, source):
# def __init__(self):
# def paintEvent(self, event):
# def mousePressEvent(self, event):
# def mouseMoveEvent(self, event):
# def mouseReleaseEvent(self, event):
# def mouseDoubleClickEvent(self, event):
# def keyPressEvent(self, event):
# def keyReleaseEvent(self, event):
# def __enterKeyboardEvent(self, event, eventType):
# def __enterMouseEvent(self, event, eventType):
# def __init__(self):
# def copyFrom(self, source):
# def _setCanvas(self, speed, title, width, height):
# def _onShow(self):
# def _showDisplayList(self, childList):
# def _enterMouseEvent(self, event, cd):
# def setFrameRate(self, speed):
# def addChild(self, child):
# def removeChild(self, child):
# def addEventListener(self, e, listener):
# def removeEventListener(self, e, listener = None):
# def __init__(self):
# def __init__(self):
# def set(obj, key, value):
# def get(obj, key):
# def has(obj, key):
# def init(speed, title, width, height, callback):
# def addChild(child):
# def removeChild(child):
# def getColor(color):
# def removeItemsInList(theList, condition):
# PARENT = "stage_parent_root"
#
# Path: pylash/events.py
# class Event(Object):
# def __init__(self, e):
# super(Event, self).__init__()
#
# if isinstance(e, Event):
# self.copyFrom(e)
# elif isinstance(e, str):
# self.eventType = e
# self.currentTarget = None
# self.target = None
# else:
# raise TypeError("Event.__init__(e): parameter 'e' is either a str or an Event object.")
#
# class EventDispatcher(Object):
# def __init__(self):
# super(EventDispatcher, self).__init__()
#
# self._eventList = []
#
# def __isEventTypeEqual(self, e1, e2):
# e1 = Event(e1)
# e2 = Event(e2)
# return e1.eventType == e2.eventType
#
# def _addEventListenerInList(self, e, listener, eventList):
# eventList.append({
# "eventType" : e,
# "listener" : listener
# })
#
# def _removeEventListenerInList(self, e, listener, eventList):
# def condition(o):
# t = o["eventType"]
# l = o["listener"]
#
# return (l == listener and self.__isEventTypeEqual(e, t))
#
# removeItemsInList(eventList, condition)
#
# def _dispatchEventInList(self, e, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if self.__isEventTypeEqual(e, t):
# e = Event(e)
#
# if e.currentTarget is None:
# e.currentTarget = self
# if e.target is None:
# e.target = self
#
# l(e)
#
# def _hasEventListenerInList(self, e, listener, eventList):
# for o in eventList:
# t = o["eventType"]
# l = o["listener"]
#
# if l == listener:
# if self.__isEventTypeEqual(e, t):
# return True
# else:
# return False
#
# return False
#
# def addEventListener(self, e, listener):
# self._addEventListenerInList(e, listener, self._eventList)
#
# def removeEventListener(self, e, listener):
# self._removeEventListenerInList(e, listener, self._eventList)
#
# def removeAllEventListeners(self):
# self._eventList = []
#
# def dispatchEvent(self, e):
# self._dispatchEventInList(e, self._eventList)
#
# def hasEventListener(self, e, listener):
# return self._hasEventListenerInList(e, listener, self._eventList)
which might include code, classes, or functions. Output only the next line. | self._onUpdate = None |
Based on the snippet: <|code_start|> def skew(self, kx, ky):
mtx = Matrix(1, ky, kx, 1, 0, 0, 0, 0, 1)
self.add(mtx)
return self
def add(self, mtx):
a = self.a * mtx.a + self.b * mtx.c + self.u * mtx.tx
b = self.a * mtx.b + self.b * mtx.d + self.u * mtx.ty
u = self.a * mtx.u + self.b * mtx.v + self.u * mtx.w
c = self.c * mtx.a + self.d * mtx.c + self.v * mtx.tx
d = self.c * mtx.b + self.d * mtx.d + self.v * mtx.ty
v = self.c * mtx.u + self.d * mtx.v + self.v * mtx.w
tx = self.tx * mtx.a + self.ty * mtx.c + self.w * mtx.tx
ty = self.tx * mtx.b + self.ty * mtx.d + self.w * mtx.ty
w = self.tx * mtx.u + self.ty * mtx.v + self.w * mtx.w
self.setTo(a, b, c, d, tx, ty, u, v, w)
def toArray(self, mtx):
if isinstance(mtx, list) and len(mtx) == 3:
m = mtx[0] * self.a + mtx[1] * self.c + mtx[2] * self.tx
n = mtx[0] * self.b + mtx[1] * self.d + mtx[2] * self.ty
k = mtx[0] * self.u + mtx[1] * self.v + mtx[2] * self.w
return [m, n, k]
else:
a = self.a * mtx.a + self.b * mtx.c + self.u * mtx.tx
b = self.a * mtx.b + self.b * mtx.d + self.u * mtx.ty
u = self.a * mtx.u + self.b * mtx.v + self.u * mtx.w
<|code_end|>
, predict the immediate next line with the help of imports:
import math
from PySide2 import QtGui
from .core import Object
and context (classes, functions, sometimes code) from other files:
# Path: pylash/core.py
# class Object(object):
# '''
# Base class of other classes in `pylash`, providing fundamental interfaces for `pylash` objects.
# '''
#
# latestObjectIndex = 0
# '''
# The ID number of the last instantiated `pylash` object. It also represents the number of
# instantiated `pylash` objects. Note: it is .
#
# Type: `int`, read-only
# '''
#
# def __init__(self):
# self.objectIndex = Object.latestObjectIndex
# '''
# The unique ID number of the object.
#
# Type: `int`, read-only
# '''
#
# self.name = "instance" + str(self.objectIndex)
# '''
# The name of the object. Default: `"instance" + str(self.objectIndex)`.
#
# Type: `str`
# '''
#
# Object.latestObjectIndex += 1
#
# def _nonCopyableAttrs(self):
# return ["objectIndex", "name"]
#
# def copyFrom(self, source):
# '''
# Copies all instance attributes from `source` to self. The `source` should have the same
# type with selves, or be an object instantiated from the parent classes of the self class.
#
# Parameters
# ----------
# source : pylash.core.Object
# The source object to be copied from.
#
# '''
#
# if not source or not isinstance(self, source.__class__):
# raise TypeError("Object.copyFrom(source): cannot copy from the parameter 'source'.")
#
# noncopyable = self._nonCopyableAttrs()
# attrs = source.__dict__
# for attr_name in attrs:
# if attr_name in noncopyable:
# continue
#
# setattr(self, attr_name, attrs[attr_name])
. Output only the next line. | c = self.c * mtx.a + self.d * mtx.c + self.v * mtx.tx |
Next line prediction: <|code_start|> if (t / 2) < 1:
return c / 2 * math.pow(2, 10 * (t - 1)) + b
t -= 1
return c / 2 * (-math.pow(2, -10 * t) + 2) + b
class Circ(object):
def __init__(self):
raise Exception("Circ cannot be instantiated.")
def easeIn(t, b, c, d):
t /= d
return -c * (math.sqrt(1 - t * t) - 1) + b
def easeOut(t, b, c, d):
t = t / d - 1
return c * math.sqrt(1 - t * t) + b
def easeInOut(t, b, c, d):
t /= d
if (t / 2) < 1:
return -c / 2 * (math.sqrt(1 - t * t) - 1) + b
t -= 2
<|code_end|>
. Use current file imports:
(import math, time
from .core import Object, stage, UnityOfDictAndClass)
and context including class names, function names, or small code snippets from other files:
# Path: pylash/core.py
# class Object(object):
# class CanvasWidget(QtWidgets.QWidget):
# class Stage(Object):
# class KeyCode(object):
# class UnityOfDictAndClass(object):
# def __init__(self):
# def _nonCopyableAttrs(self):
# def copyFrom(self, source):
# def __init__(self):
# def paintEvent(self, event):
# def mousePressEvent(self, event):
# def mouseMoveEvent(self, event):
# def mouseReleaseEvent(self, event):
# def mouseDoubleClickEvent(self, event):
# def keyPressEvent(self, event):
# def keyReleaseEvent(self, event):
# def __enterKeyboardEvent(self, event, eventType):
# def __enterMouseEvent(self, event, eventType):
# def __init__(self):
# def copyFrom(self, source):
# def _setCanvas(self, speed, title, width, height):
# def _onShow(self):
# def _showDisplayList(self, childList):
# def _enterMouseEvent(self, event, cd):
# def setFrameRate(self, speed):
# def addChild(self, child):
# def removeChild(self, child):
# def addEventListener(self, e, listener):
# def removeEventListener(self, e, listener = None):
# def __init__(self):
# def __init__(self):
# def set(obj, key, value):
# def get(obj, key):
# def has(obj, key):
# def init(speed, title, width, height, callback):
# def addChild(child):
# def removeChild(child):
# def getColor(color):
# def removeItemsInList(theList, condition):
# PARENT = "stage_parent_root"
. Output only the next line. | return c / 2 * (math.sqrt(1 - t * t) + 1) + b |
Predict the next line for this snippet: <|code_start|>
def easeOut(t, b, c, d, s = None):
if not s:
s = 1.70158
t = t / d - 1
return c * (t * t * ((s + 1) * t + s) + 1) + b
def easeInOut(t, b, c, d, s = None):
if not s:
s = 1.70158
t /= d
if (t / 2) < 1:
s *= 1.525
return c / 2 * (t * t * ((s + 1) * t - s)) + b
t -= 2
s *= 1.525
return c / 2 * (t * t * ((s + 1) * t + s) + 2) + b
class Bounce(object):
def __init__(self):
raise Exception("Bounce cannot be instantiated.")
<|code_end|>
with the help of current file imports:
import math, time
from .core import Object, stage, UnityOfDictAndClass
and context from other files:
# Path: pylash/core.py
# class Object(object):
# class CanvasWidget(QtWidgets.QWidget):
# class Stage(Object):
# class KeyCode(object):
# class UnityOfDictAndClass(object):
# def __init__(self):
# def _nonCopyableAttrs(self):
# def copyFrom(self, source):
# def __init__(self):
# def paintEvent(self, event):
# def mousePressEvent(self, event):
# def mouseMoveEvent(self, event):
# def mouseReleaseEvent(self, event):
# def mouseDoubleClickEvent(self, event):
# def keyPressEvent(self, event):
# def keyReleaseEvent(self, event):
# def __enterKeyboardEvent(self, event, eventType):
# def __enterMouseEvent(self, event, eventType):
# def __init__(self):
# def copyFrom(self, source):
# def _setCanvas(self, speed, title, width, height):
# def _onShow(self):
# def _showDisplayList(self, childList):
# def _enterMouseEvent(self, event, cd):
# def setFrameRate(self, speed):
# def addChild(self, child):
# def removeChild(self, child):
# def addEventListener(self, e, listener):
# def removeEventListener(self, e, listener = None):
# def __init__(self):
# def __init__(self):
# def set(obj, key, value):
# def get(obj, key):
# def has(obj, key):
# def init(speed, title, width, height, callback):
# def addChild(child):
# def removeChild(child):
# def getColor(color):
# def removeItemsInList(theList, condition):
# PARENT = "stage_parent_root"
, which may contain function names, class names, or code. Output only the next line. | def easeIn(t, b, c, d): |
Given the following code snippet before the placeholder: <|code_start|>setattr(Easing, "Strong", Strong)
setattr(Easing, "Expo", Expo)
setattr(Easing, "Circ", Circ)
setattr(Easing, "Elastic", Elastic)
setattr(Easing, "Back", Back)
setattr(Easing, "Bounce", Bounce)
class TweenLiteChild(Object):
def __init__(self, target, duration, tranVars):
super(TweenLiteChild, self).__init__()
self.__toNew = []
self._initTween(target, duration, tranVars)
def _initTween(self, target, duration, tranVars):
if not duration:
duration = 0.001
self.__target = target
self.__duration = duration
self.__vars = tranVars
if not "delay" in self.__vars:
self.__vars["delay"] = 0
self.__delay = self.__vars["delay"]
del self.__vars["delay"]
self.__currentTime = 0
<|code_end|>
, predict the next line using imports from the current file:
import math, time
from .core import Object, stage, UnityOfDictAndClass
and context including class names, function names, and sometimes code from other files:
# Path: pylash/core.py
# class Object(object):
# class CanvasWidget(QtWidgets.QWidget):
# class Stage(Object):
# class KeyCode(object):
# class UnityOfDictAndClass(object):
# def __init__(self):
# def _nonCopyableAttrs(self):
# def copyFrom(self, source):
# def __init__(self):
# def paintEvent(self, event):
# def mousePressEvent(self, event):
# def mouseMoveEvent(self, event):
# def mouseReleaseEvent(self, event):
# def mouseDoubleClickEvent(self, event):
# def keyPressEvent(self, event):
# def keyReleaseEvent(self, event):
# def __enterKeyboardEvent(self, event, eventType):
# def __enterMouseEvent(self, event, eventType):
# def __init__(self):
# def copyFrom(self, source):
# def _setCanvas(self, speed, title, width, height):
# def _onShow(self):
# def _showDisplayList(self, childList):
# def _enterMouseEvent(self, event, cd):
# def setFrameRate(self, speed):
# def addChild(self, child):
# def removeChild(self, child):
# def addEventListener(self, e, listener):
# def removeEventListener(self, e, listener = None):
# def __init__(self):
# def __init__(self):
# def set(obj, key, value):
# def get(obj, key):
# def has(obj, key):
# def init(speed, title, width, height, callback):
# def addChild(child):
# def removeChild(child):
# def getColor(color):
# def removeItemsInList(theList, condition):
# PARENT = "stage_parent_root"
. Output only the next line. | self.__duration *= 1000 |
Using the snippet: <|code_start|> self.copyFrom(e)
elif isinstance(e, str):
self.eventType = e
self.currentTarget = None
self.target = None
else:
raise TypeError("Event.__init__(e): parameter 'e' is either a str or an Event object.")
class LoopEvent(object):
ENTER_FRAME = Event("loop_enter_frame")
EXIT_FRAME = Event("loop_exit_frame")
class MouseEvent(object):
MOUSE_DOWN = Event("mouse_down")
MOUSE_UP = Event("mouse_up")
MOUSE_MOVE = Event("mouse_move")
MOUSE_OVER = Event("mouse_over")
MOUSE_OUT = Event("mouse_out")
DOUBLE_CLICK = Event("mouse_dbclick")
def __init__(self):
raise Exception("MouseEvent cannot be instantiated.")
class KeyboardEvent(object):
KEY_DOWN = Event("key_down")
KEY_UP = Event("key_up")
<|code_end|>
, determine the next line of code. You have imports:
from .core import Object, removeItemsInList
and context (class names, function names, or code) available:
# Path: pylash/core.py
# class Object(object):
# '''
# Base class of other classes in `pylash`, providing fundamental interfaces for `pylash` objects.
# '''
#
# latestObjectIndex = 0
# '''
# The ID number of the last instantiated `pylash` object. It also represents the number of
# instantiated `pylash` objects. Note: it is .
#
# Type: `int`, read-only
# '''
#
# def __init__(self):
# self.objectIndex = Object.latestObjectIndex
# '''
# The unique ID number of the object.
#
# Type: `int`, read-only
# '''
#
# self.name = "instance" + str(self.objectIndex)
# '''
# The name of the object. Default: `"instance" + str(self.objectIndex)`.
#
# Type: `str`
# '''
#
# Object.latestObjectIndex += 1
#
# def _nonCopyableAttrs(self):
# return ["objectIndex", "name"]
#
# def copyFrom(self, source):
# '''
# Copies all instance attributes from `source` to self. The `source` should have the same
# type with selves, or be an object instantiated from the parent classes of the self class.
#
# Parameters
# ----------
# source : pylash.core.Object
# The source object to be copied from.
#
# '''
#
# if not source or not isinstance(self, source.__class__):
# raise TypeError("Object.copyFrom(source): cannot copy from the parameter 'source'.")
#
# noncopyable = self._nonCopyableAttrs()
# attrs = source.__dict__
# for attr_name in attrs:
# if attr_name in noncopyable:
# continue
#
# setattr(self, attr_name, attrs[attr_name])
#
# def removeItemsInList(theList, condition):
# if not hasattr(condition, "__call__") or not isinstance(theList, list):
# return
#
# targetList = []
#
# for o in theList:
# if condition(o):
# targetList.append(o)
#
# for i in targetList:
# theList.remove(i)
#
# return targetList
. Output only the next line. | def __init__(self): |
Given snippet: <|code_start|> MOUSE_DOWN = Event("mouse_down")
MOUSE_UP = Event("mouse_up")
MOUSE_MOVE = Event("mouse_move")
MOUSE_OVER = Event("mouse_over")
MOUSE_OUT = Event("mouse_out")
DOUBLE_CLICK = Event("mouse_dbclick")
def __init__(self):
raise Exception("MouseEvent cannot be instantiated.")
class KeyboardEvent(object):
KEY_DOWN = Event("key_down")
KEY_UP = Event("key_up")
def __init__(self):
raise Exception("KeyboardEvent cannot be instantiated.")
class EventDispatcher(Object):
def __init__(self):
super(EventDispatcher, self).__init__()
self._eventList = []
def __isEventTypeEqual(self, e1, e2):
e1 = Event(e1)
e2 = Event(e2)
return e1.eventType == e2.eventType
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .core import Object, removeItemsInList
and context:
# Path: pylash/core.py
# class Object(object):
# '''
# Base class of other classes in `pylash`, providing fundamental interfaces for `pylash` objects.
# '''
#
# latestObjectIndex = 0
# '''
# The ID number of the last instantiated `pylash` object. It also represents the number of
# instantiated `pylash` objects. Note: it is .
#
# Type: `int`, read-only
# '''
#
# def __init__(self):
# self.objectIndex = Object.latestObjectIndex
# '''
# The unique ID number of the object.
#
# Type: `int`, read-only
# '''
#
# self.name = "instance" + str(self.objectIndex)
# '''
# The name of the object. Default: `"instance" + str(self.objectIndex)`.
#
# Type: `str`
# '''
#
# Object.latestObjectIndex += 1
#
# def _nonCopyableAttrs(self):
# return ["objectIndex", "name"]
#
# def copyFrom(self, source):
# '''
# Copies all instance attributes from `source` to self. The `source` should have the same
# type with selves, or be an object instantiated from the parent classes of the self class.
#
# Parameters
# ----------
# source : pylash.core.Object
# The source object to be copied from.
#
# '''
#
# if not source or not isinstance(self, source.__class__):
# raise TypeError("Object.copyFrom(source): cannot copy from the parameter 'source'.")
#
# noncopyable = self._nonCopyableAttrs()
# attrs = source.__dict__
# for attr_name in attrs:
# if attr_name in noncopyable:
# continue
#
# setattr(self, attr_name, attrs[attr_name])
#
# def removeItemsInList(theList, condition):
# if not hasattr(condition, "__call__") or not isinstance(theList, list):
# return
#
# targetList = []
#
# for o in theList:
# if condition(o):
# targetList.append(o)
#
# for i in targetList:
# theList.remove(i)
#
# return targetList
which might include code, classes, or functions. Output only the next line. | def _addEventListenerInList(self, e, listener, eventList): |
Predict the next line for this snippet: <|code_start|>
COPULA = "SAME AS" # the textual value of a copula node
PROP = "PROP" # the textual value of a property node
RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
POSSESSIVE = "POSSESS" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
PREP = "PREP" # the textual value of a preposition node
PREP_TYPE = "TYPE" # the textual value of a preposition node's type
COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
LOCATION = "LOCATION" # the textual value of a location node
CONJUNCTION = "CONJ -" # the textual value of a conjunction node
ADVERB = "ADV" # the textual value of a conjunction node
EXISTENSIAL = "EXISTS" # the textual value of a conjunction node
COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
## Node shapes
RECT_NODE_SHAPE = "rect"
DEFAULT_NODE_SHAPE = "ellipse"
PRINT_FEATURES = [("Tense",lambda t:t),
("Determiner",lambda t:"det: "+t["Value"]),
("Time Value",lambda t:"date: "+t),
("Negation", lambda t:"negated"),
("Passive Voice", lambda t:"passive"),
("Modal",lambda t:"modal: "+ " ".join(t["Value"])),
("Definite",lambda t:t),
<|code_end|>
with the help of current file imports:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context from other files:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
, which may contain function names, class names, or code. Output only the next line. | ("Modifier",lambda t:"modifer: "+t)]
|
Given the code snippet: <|code_start|>
COPULA = "SAME AS" # the textual value of a copula node
PROP = "PROP" # the textual value of a property node
RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
POSSESSIVE = "POSSESS" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
PREP = "PREP" # the textual value of a preposition node
PREP_TYPE = "TYPE" # the textual value of a preposition node's type
COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
<|code_end|>
, generate the next line using the imports in this file:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context (functions, classes, or occasionally code) from other files:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
. Output only the next line. | LOCATION = "LOCATION" # the textual value of a location node
|
Given the following code snippet before the placeholder: <|code_start|>POSSESSIVE = "POSSESS" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
PREP = "PREP" # the textual value of a preposition node
PREP_TYPE = "TYPE" # the textual value of a preposition node's type
COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
LOCATION = "LOCATION" # the textual value of a location node
CONJUNCTION = "CONJ -" # the textual value of a conjunction node
ADVERB = "ADV" # the textual value of a conjunction node
EXISTENSIAL = "EXISTS" # the textual value of a conjunction node
COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
## Node shapes
RECT_NODE_SHAPE = "rect"
DEFAULT_NODE_SHAPE = "ellipse"
PRINT_FEATURES = [("Tense",lambda t:t),
("Determiner",lambda t:"det: "+t["Value"]),
("Time Value",lambda t:"date: "+t),
("Negation", lambda t:"negated"),
("Passive Voice", lambda t:"passive"),
("Modal",lambda t:"modal: "+ " ".join(t["Value"])),
("Definite",lambda t:t),
("Modifier",lambda t:"modifer: "+t)]
global nodeCounter
nodeCounter =0
<|code_end|>
, predict the next line using imports from the current file:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context including class names, function names, and sometimes code from other files:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
. Output only the next line. | class Node:
|
Predict the next line for this snippet: <|code_start|>
COPULA = "SAME AS" # the textual value of a copula node
PROP = "PROP" # the textual value of a property node
RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
POSSESSIVE = "POSSESS" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
PREP = "PREP" # the textual value of a preposition node
PREP_TYPE = "TYPE" # the textual value of a preposition node's type
COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
LOCATION = "LOCATION" # the textual value of a location node
CONJUNCTION = "CONJ -" # the textual value of a conjunction node
ADVERB = "ADV" # the textual value of a conjunction node
EXISTENSIAL = "EXISTS" # the textual value of a conjunction node
COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
## Node shapes
RECT_NODE_SHAPE = "rect"
DEFAULT_NODE_SHAPE = "ellipse"
PRINT_FEATURES = [("Tense",lambda t:t),
("Determiner",lambda t:"det: "+t["Value"]),
<|code_end|>
with the help of current file imports:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context from other files:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
, which may contain function names, class names, or code. Output only the next line. | ("Time Value",lambda t:"date: "+t),
|
Given snippet: <|code_start|>
COPULA = "SAME AS" # the textual value of a copula node
PROP = "PROP" # the textual value of a property node
RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
POSSESSIVE = "POSSESS" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
PREP = "PREP" # the textual value of a preposition node
PREP_TYPE = "TYPE" # the textual value of a preposition node's type
COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
LOCATION = "LOCATION" # the textual value of a location node
CONJUNCTION = "CONJ -" # the textual value of a conjunction node
ADVERB = "ADV" # the textual value of a conjunction node
EXISTENSIAL = "EXISTS" # the textual value of a conjunction node
COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
## Node shapes
RECT_NODE_SHAPE = "rect"
DEFAULT_NODE_SHAPE = "ellipse"
PRINT_FEATURES = [("Tense",lambda t:t),
("Determiner",lambda t:"det: "+t["Value"]),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
which might include code, classes, or functions. Output only the next line. | ("Time Value",lambda t:"date: "+t),
|
Given snippet: <|code_start|>
COPULA = "SameAs" # the textual value of a copula node
PROP = "PROP" # the textual value of a property node
RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
POSSESSIVE = "have" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
PREP = "PREP" # the textual value of a preposition node
PREP_TYPE = "TYPE" # the textual value of a preposition node's type
COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
LOCATION = "LOCATION" # the textual value of a location node
CONJUNCTION = "CONJ -" # the textual value of a conjunction node
ADVERB = "ADV" # the textual value of a conjunction node
EXISTENSIAL = "Exists" # the textual value of a conjunction node
COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
## Node shapes
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy, copy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
which might include code, classes, or functions. Output only the next line. | RECT_NODE_SHAPE = "rect"
|
Continue the code snippet: <|code_start|>POSSESSIVE = "have" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
PREP = "PREP" # the textual value of a preposition node
PREP_TYPE = "TYPE" # the textual value of a preposition node's type
COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
LOCATION = "LOCATION" # the textual value of a location node
CONJUNCTION = "CONJ -" # the textual value of a conjunction node
ADVERB = "ADV" # the textual value of a conjunction node
EXISTENSIAL = "Exists" # the textual value of a conjunction node
COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
## Node shapes
RECT_NODE_SHAPE = "rect"
DEFAULT_NODE_SHAPE = "ellipse"
PRINT_FEATURES = [("Tense",lambda t:t),
("Determiner",lambda t:"det: "+t["Value"]),
("Time Value",lambda t:"date: "+t),
("Negation", lambda t:"negated"),
("Passive Voice", lambda t:"passive"),
("Modal",lambda t:"modal: "+ " ".join(t["Value"])),
("Definite",lambda t:t),
("Modifier",lambda t:"modifier: "+t)]
global nodeCounter
nodeCounter = 0
def resetCounter():
global nodeCounter
<|code_end|>
. Use current file imports:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy, copy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context (classes, functions, or code) from other files:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
. Output only the next line. | nodeCounter = 0
|
Given the code snippet: <|code_start|>COND = "COND" # the textual value of a conditional node
TIME = "TIME" # the textual value of a time node
LOCATION = "LOCATION" # the textual value of a location node
CONJUNCTION = "CONJ -" # the textual value of a conjunction node
ADVERB = "ADV" # the textual value of a conjunction node
EXISTENSIAL = "Exists" # the textual value of a conjunction node
COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
## Node shapes
RECT_NODE_SHAPE = "rect"
DEFAULT_NODE_SHAPE = "ellipse"
PRINT_FEATURES = [("Tense",lambda t:t),
("Determiner",lambda t:"det: "+t["Value"]),
("Time Value",lambda t:"date: "+t),
("Negation", lambda t:"negated"),
("Passive Voice", lambda t:"passive"),
("Modal",lambda t:"modal: "+ " ".join(t["Value"])),
("Definite",lambda t:t),
("Modifier",lambda t:"modifier: "+t)]
global nodeCounter
nodeCounter = 0
def resetCounter():
global nodeCounter
nodeCounter = 0
<|code_end|>
, generate the next line using the imports in this file:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy, copy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context (functions, classes, or occasionally code) from other files:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
. Output only the next line. | class Node:
|
Given the following code snippet before the placeholder: <|code_start|>
COPULA = "SameAs" # the textual value of a copula node
PROP = "PROP" # the textual value of a property node
RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
POSSESSIVE = "have" # the textual value of a possessive node
APPOSITION = "appos" # the textual value of an appositio n node
<|code_end|>
, predict the next line using imports from the current file:
from props.graph_representation.word import Word,NO_INDEX, strip_punctuations
from copy import deepcopy, copy
from props.dependency_tree.definitions import time_prep, definite_label,\
adjectival_mod_dependencies
import cgi
and context including class names, function names, and sometimes code from other files:
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
. Output only the next line. | PREP = "PREP" # the textual value of a preposition node
|
Here is a snippet: <|code_start|> self.outputType = outputType
for ent in self.args:
(rel,arg) = ent
if rel == POSSESSOR_LABEL:
ent[1] = fixPossessor(arg)
def find_ent(self,ent):
ret = []
for i,(rel,arg) in enumerate(self.args):
if ent in arg:
ret.append(i)
return ret
def rel_order(self,rel):
if rel in subject_dependencies+[domain_label,POSSESSED_LABEL,POSSESSOR_LABEL]:
return 0
if rel == ARG_LABEL:
return 1
if rel in object_dependencies:
return 2
if rel.startswith("prep"):
return 3
if rel == SOURCE_LABEL:
return 5
else:
return 4
def __str__(self):
PDF = (self.outputType == "pdf")
<|code_end|>
. Write the next line using the current file imports:
from props.dependency_tree.definitions import subject_dependencies, ARG_LABEL,\
object_dependencies, SOURCE_LABEL, domain_label, POSSESSED_LABEL,\
POSSESSOR_LABEL
and context from other files:
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
, which may include functions, classes, or code. Output only the next line. | HTML = (self.outputType == "html")
|
Continue the code snippet: <|code_start|> self.outputType = outputType
for ent in self.args:
(rel,arg) = ent
if rel == POSSESSOR_LABEL:
ent[1] = fixPossessor(arg)
def find_ent(self,ent):
ret = []
for i,(rel,arg) in enumerate(self.args):
if ent in arg:
ret.append(i)
return ret
def rel_order(self,rel):
if rel in subject_dependencies+[domain_label,POSSESSED_LABEL,POSSESSOR_LABEL]:
return 0
if rel == ARG_LABEL:
return 1
if rel in object_dependencies:
return 2
if rel.startswith("prep"):
return 3
if rel == SOURCE_LABEL:
return 5
else:
return 4
def __str__(self):
PDF = (self.outputType == "pdf")
<|code_end|>
. Use current file imports:
from props.dependency_tree.definitions import subject_dependencies, ARG_LABEL,\
object_dependencies, SOURCE_LABEL, domain_label, POSSESSED_LABEL,\
POSSESSOR_LABEL
and context (classes, functions, or code) from other files:
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
. Output only the next line. | HTML = (self.outputType == "html")
|
Here is a snippet: <|code_start|> return 1
if rel in object_dependencies:
return 2
if rel.startswith("prep"):
return 3
if rel == SOURCE_LABEL:
return 5
else:
return 4
def __str__(self):
PDF = (self.outputType == "pdf")
HTML = (self.outputType == "html")
if PDF:
bold = lambda t:t
color = lambda t,color:t
if HTML:
bold = lambda t:"<b>{0}</b>".format(t)
color = lambda t,color:'<font color="{0}">{1}</font>'.format(color,t)
curProp = r'{0}:({1})'.format(bold(self.pred),
", ".join([rel + ":" + bold(color(arg,"blue")) for rel,arg in sorted(self.args,key=lambda(rel,_):self.rel_order(rel))]))
return curProp
mapPossessive = {"my":"I",
"your":"you",
"its":"it",
"her":"she",
"his":"he",
"our":"we",
<|code_end|>
. Write the next line using the current file imports:
from props.dependency_tree.definitions import subject_dependencies, ARG_LABEL,\
object_dependencies, SOURCE_LABEL, domain_label, POSSESSED_LABEL,\
POSSESSOR_LABEL
and context from other files:
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
, which may include functions, classes, or code. Output only the next line. | "their":"they"}
|
Given snippet: <|code_start|> return 0
if rel == ARG_LABEL:
return 1
if rel in object_dependencies:
return 2
if rel.startswith("prep"):
return 3
if rel == SOURCE_LABEL:
return 5
else:
return 4
def __str__(self):
PDF = (self.outputType == "pdf")
HTML = (self.outputType == "html")
if PDF:
bold = lambda t:t
color = lambda t,color:t
if HTML:
bold = lambda t:"<b>{0}</b>".format(t)
color = lambda t,color:'<font color="{0}">{1}</font>'.format(color,t)
curProp = r'{0}:({1})'.format(bold(self.pred),
", ".join([rel + ":" + bold(color(arg,"blue")) for rel,arg in sorted(self.args,key=lambda(rel,_):self.rel_order(rel))]))
return curProp
mapPossessive = {"my":"I",
"your":"you",
"its":"it",
"her":"she",
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from props.dependency_tree.definitions import subject_dependencies, ARG_LABEL,\
object_dependencies, SOURCE_LABEL, domain_label, POSSESSED_LABEL,\
POSSESSOR_LABEL
and context:
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
which might include code, classes, or functions. Output only the next line. | "his":"he",
|
Predict the next line after this snippet: <|code_start|>class Proposition:
def __init__(self,pred,args,outputType):
self.pred = pred
self.args = args
self.outputType = outputType
for ent in self.args:
(rel,arg) = ent
if rel == POSSESSOR_LABEL:
ent[1] = fixPossessor(arg)
def find_ent(self,ent):
ret = []
for i,(rel,arg) in enumerate(self.args):
if ent in arg:
ret.append(i)
return ret
def rel_order(self,rel):
if rel in subject_dependencies+[domain_label,POSSESSED_LABEL,POSSESSOR_LABEL]:
return 0
if rel == ARG_LABEL:
return 1
if rel in object_dependencies:
return 2
<|code_end|>
using the current file's imports:
from props.dependency_tree.definitions import subject_dependencies, ARG_LABEL,\
object_dependencies, SOURCE_LABEL, domain_label, POSSESSED_LABEL,\
POSSESSOR_LABEL
and any relevant context from other files:
# Path: props/dependency_tree/definitions.py
# REASON_LABEL = "reason"
# OUTCOME_LABEL = "outcome"
# EVENT_LABEL = "event"
# CONDITION_LABEL = "condition"
# FIRST_ENTITY_LABEL = "sameAs_arg"
# SECOND_ENTITY_LABEL = "sameAs_arg"
# POSSESSOR_LABEL = "possessor"
# POSSESSED_LABEL = "possessed"
# ARG_LABEL = "arg"
# POSS_LABEL = "poss"
# MARK_LABEL = "mark"
# EXPL_LABEL = 'expl'
# SOURCE_LABEL = 'source'
# MODIFIER_LABEL = "modifier"
# POSSESSIVE_LABEL = "possessive"
# SUBJ_LABEL = "subj"
# OBJECT_LABEL = "obj"
# DIRECT_OBJECT_LABEL = "dobj"
# INDIRECT_OBJECT_LABEL = "iobj"
# VB = "VB" #Verb, base form
# VBD = "VBD" #Verb, past tense
# VBG = "VBG" #Verb, gerund or present participle
# VBN = "VBN" #Verb, past participle
# VBP = "VBP" #Verb, non-3rd person singular present
# VBZ = "VBZ" #Verb, 3rd person singular present
# VERB_POS = [VB,VBD,VBP,VBZ,VBN] # all types of verb pos
# TO = "TO"
# IN = "IN"
# MD = "MD"
# DOT = "."
# COMMA = ","
# TENSE_PAST = "past"
# TENSE_PRESENT = "present"
# TENSE_FUTURE = "future"
# TENSE_UNKNOWN = "unknown"
# WILL = "will"
# WONT = "wo"
# WOULD = "would"
# D = "'d"
# HAVE = "have"
# BE = "be"
# BEEN = "been"
# FUTURE_MODALS = [WILL, WONT, WOULD, ll, D, "may", "might"] # "wo" is the Modal part of "won't"
# AS = "as"
# COND_IF = "if"
# COND_AFTER = "after"
# def aux_children_with_pos(pos_tag):
. Output only the next line. | if rel.startswith("prep"):
|
Given the code snippet: <|code_start|># for line in fin:
# w = line.strip()
# if w:
# intransitive_verbs.append(w)
# fin.close()
# DepTree is a class representing a dependency tree
class DepTree(object):
def __init__(self,pos,word,id,parent=None,parent_id = None,parent_relation=None,children=[],wsj_id = 0, sent_id = 0):
self.children = children # List of node's children
self.parent = parent # Node's parent
self.parent_relation = parent_relation # Node's parent relation
self.parent_id = parent_id # Node's parent id
self.pos = pos # pos tag
self.word = word # word from sentence
self.id = int(id) # location in sentence
self.function_tag = [] # function tag as it in constituency tree
self.is_head_of_time_expression = 0 # indicates if the node is a head of time expression
self.constituent = 0
self.wsj_id = wsj_id
self.sent_id = sent_id
self.is_nominal = False
self.nominal_argument = None
self.childDic = []
def set_parent(self, new_parent): self.parent = new_parent
def set_parent_id(self,parent_id): self.parent_id = parent_id
<|code_end|>
, generate the next line using the imports in this file:
from __builtin__ import dir
from props.graph_representation.graphParsingException import GraphParsingException
from nltk.tree import Tree
from props.graph_representation.word import Word
from props.dependency_tree.definitions import *
from props.constituency_tree.definitions import *
from props.constituency_tree.my_definitions import any_in
from Tense import tense_rules
from nltk import Tree
from nltk.stem.wordnet import WordNetLemmatizer
import copy,os
and context (functions, classes, or occasionally code) from other files:
# Path: props/graph_representation/graphParsingException.py
# class GraphParsingException(Exception):
# """
# Exception to identify errors coming from the graph parsing process
# """
# pass
#
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# Path: props/constituency_tree/my_definitions.py
# ADVERBIALS = ["ADV", "VOC", "BNF", "DIR", "EXT", "LOC", "MNR", "TMP", "CLR","PRP"]
. Output only the next line. | def get_parent(self): return self.parent
|
Based on the snippet: <|code_start|> #mark head as matching pattern's head
ret = [self]
# find a child matching each of the pattern's children
availableChildren = [(i,c) for i,c in enumerate(self.children)]
lastMatch = -1
for c_pat in pat:
for i,c_t in availableChildren:
successor = False
if c_pat.node.startswith("$+"):
successor = True
c_pat.node = c_pat.node[2:]
curMatch = c_t.match(c_pat)
if curMatch:
if successor:
if i-lastMatch != 1:
return False
lastMatch = i
ret.append(curMatch)
availableChildren.remove((i,c_t))
break
if not curMatch:
return False
return ret
def get_text(self):
ret = [Word(index=self.id,word=self.word)]
for c in self.children:
ret += c.get_text()
<|code_end|>
, predict the immediate next line with the help of imports:
from __builtin__ import dir
from props.graph_representation.graphParsingException import GraphParsingException
from nltk.tree import Tree
from props.graph_representation.word import Word
from props.dependency_tree.definitions import *
from props.constituency_tree.definitions import *
from props.constituency_tree.my_definitions import any_in
from Tense import tense_rules
from nltk import Tree
from nltk.stem.wordnet import WordNetLemmatizer
import copy,os
and context (classes, functions, sometimes code) from other files:
# Path: props/graph_representation/graphParsingException.py
# class GraphParsingException(Exception):
# """
# Exception to identify errors coming from the graph parsing process
# """
# pass
#
# Path: props/graph_representation/word.py
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# Path: props/constituency_tree/my_definitions.py
# ADVERBIALS = ["ADV", "VOC", "BNF", "DIR", "EXT", "LOC", "MNR", "TMP", "CLR","PRP"]
. Output only the next line. | return ret
|
Given the following code snippet before the placeholder: <|code_start|>18 9,came VBD 1 1
24 8,just RB 0 0 mod,18
26 10,back RB 0 0 mod,18
27 12,Russia NNP 0 0 prep_from,18''',
),
('She said that the boy is tall',
'''
She said that the boy is tall
146 2,said VBD 1 1
147 1,She PRP 0 0 subj,146
151 5,boy NN 0 0 prop_of,153
153 7,tall VBZ 1 0 comp,146
'''
),
('If you build it, they will come',
'''If you build it , they will come
215 3,build VBP 1 0 condition,216
216 1,If IN 1 1
217 2,you PRP 0 0 subj,215
218 8,come VB 1 0 outcome,216
219 4,it PRP 0 0 dobj,215
221 6,they PRP 0 0 subj,218'''
)]
for sent, expected in testCases:
self.compare(self.getProps(sent), expected)
if __name__ == '__main__':
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from props.unit_tests.props_test import PropsTest
and context including class names, function names, and sometimes code from other files:
# Path: props/unit_tests/props_test.py
# class PropsTest(unittest.TestCase):
# ''' Super class for all PropS tests '''
#
# def setUp(self):
# ''' takes care of the needed initializations '''
# logging.info("running tests")
# load_berkeley(tokenize = True)
#
#
# def getProps(self, sent):
# ''' returns the textual props representation of an input sentence '''
# g, tree = parseSentences(sent)[0]
# return str(g)
#
# def compare(self, sent, expected):
# result = self.getProps(sent)
# self.assertEqual(removeWhitespaces(result),
# removeWhitespaces(expected),
# result)
. Output only the next line. | unittest.main() |
Predict the next line for this snippet: <|code_start|># import graph_representation.node
# from graph_representation.node import isRcmodProp
# from graph_representation.node import Node
def accessibility_wo_self(graph):
ret = accessibility(graph)
for k in ret:
ret[k].remove(k)
<|code_end|>
with the help of current file imports:
from pygraph.algorithms.sorting import topological_sorting
from pygraph.classes.digraph import digraph
from pygraph.algorithms.accessibility import accessibility
from props.graph_representation.word import NO_INDEX, Word, strip_punctuations
from pygraph.algorithms.traversal import traversal
from pygraph.algorithms.minmax import minimal_spanning_tree, shortest_path
from props.graph_representation import newNode
from operator import itemgetter
import subprocess, math, re, nltk
import cgi
import time
import logging
and context from other files:
# Path: props/graph_representation/word.py
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/graph_representation/newNode.py
# COPULA = "SameAs" # the textual value of a copula node
# PROP = "PROP" # the textual value of a property node
# RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
# POSSESSIVE = "have" # the textual value of a possessive node
# APPOSITION = "appos" # the textual value of an appositio n node
# PREP = "PREP" # the textual value of a preposition node
# PREP_TYPE = "TYPE" # the textual value of a preposition node's type
# COND = "COND" # the textual value of a conditional node
# TIME = "TIME" # the textual value of a time node
# LOCATION = "LOCATION" # the textual value of a location node
# CONJUNCTION = "CONJ -" # the textual value of a conjunction node
# ADVERB = "ADV" # the textual value of a conjunction node
# EXISTENSIAL = "Exists" # the textual value of a conjunction node
# COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
# RECT_NODE_SHAPE = "rect"
# DEFAULT_NODE_SHAPE = "ellipse"
# PRINT_FEATURES = [("Tense",lambda t:t),
# ("Determiner",lambda t:"det: "+t["Value"]),
# ("Time Value",lambda t:"date: "+t),
# ("Negation", lambda t:"negated"),
# ("Passive Voice", lambda t:"passive"),
# ("Modal",lambda t:"modal: "+ " ".join(t["Value"])),
# ("Definite",lambda t:t),
# ("Modifier",lambda t:"modifier: "+t)]
# def resetCounter():
# def __init__(self,text,isPredicate,features,gr,orderText = True,uid=-1):
# def removeLemma(self):
# def get_text(self,gr):
# def addPropogation(self,node):
# def minIndex(self):
# def maxIndex(self):
# def get_original_text(self):
# def get_sorted_text(self):
# def __str__(self):
# def to_conll_like(self):
# def neighbors(self):
# def incidents(self):
# def is_implicit(self):
# def pos(self):
# def isConj(self):
# def is_wh_question(self):
# def __hash__(self):
# def makeTopNode(self):
# def getCopular(gr,index,features):
# def getPossesive(gr,index):
# def join(node1,node2,gr):
# def isDefinite(node):
# class Node:
, which may contain function names, class names, or code. Output only the next line. | return ret
|
Given snippet: <|code_start|># import graph_representation.node
# from graph_representation.node import isRcmodProp
# from graph_representation.node import Node
def accessibility_wo_self(graph):
ret = accessibility(graph)
for k in ret:
ret[k].remove(k)
return ret
# def isRCmod(graph, node):
# ns = graph.neighbors(node)
# for neigbour in ns:
# if isRcmodProp(neigbour):
# propNs = graph.neighbors(neigbour)
# if len(propNs) == 1:
# return neigbour, propNs[0]
# return False
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from pygraph.algorithms.sorting import topological_sorting
from pygraph.classes.digraph import digraph
from pygraph.algorithms.accessibility import accessibility
from props.graph_representation.word import NO_INDEX, Word, strip_punctuations
from pygraph.algorithms.traversal import traversal
from pygraph.algorithms.minmax import minimal_spanning_tree, shortest_path
from props.graph_representation import newNode
from operator import itemgetter
import subprocess, math, re, nltk
import cgi
import time
import logging
and context:
# Path: props/graph_representation/word.py
# NO_INDEX = -1 # index used in cases where there's no such word in the sentence
#
# class Word:
# """
# word container class, to add the index of the word in addition to the word
#
# @type index: int
# @var index: the index of the word within the sentence
#
# @type text: string
# @var text: the text contained within this word
# """
#
# def __init__(self,index,word):
# """
# initialize a word container object
# """
# self.index = index
# self.word = word
#
# def to_conll_like(self):
# return ",".join([str(self.index),self.word])
#
# def __str__(self):
# ret = cgi.escape(self.word)
# if self.index != NO_INDEX:
# ret += '<FONT POINT-SIZE="7">[{0}]</FONT>'.format(self.index)
# return ret
#
# def __eq__(self,other_word):
# return (self.index == other_word.index) and (self.word == other_word.word)
#
# def __hash__(self):
# return self.__str__().__hash__()
#
# def strip_punctuations(ls):
# """
# removes punctuations from beginning and end of the list
# """
# puncts = ':.,;\t '
# sep = "\t"
# totalElms = len(ls)
# s = sep.join([x.word for x in ls])
# ret = ls[totalElms-len(s.lstrip(puncts).split(sep)):len(s.rstrip(puncts).split(sep))]
# return ret
#
# Path: props/graph_representation/newNode.py
# COPULA = "SameAs" # the textual value of a copula node
# PROP = "PROP" # the textual value of a property node
# RCMOD_PROP = "PROP" # the textual value of a property for rcmod node
# POSSESSIVE = "have" # the textual value of a possessive node
# APPOSITION = "appos" # the textual value of an appositio n node
# PREP = "PREP" # the textual value of a preposition node
# PREP_TYPE = "TYPE" # the textual value of a preposition node's type
# COND = "COND" # the textual value of a conditional node
# TIME = "TIME" # the textual value of a time node
# LOCATION = "LOCATION" # the textual value of a location node
# CONJUNCTION = "CONJ -" # the textual value of a conjunction node
# ADVERB = "ADV" # the textual value of a conjunction node
# EXISTENSIAL = "Exists" # the textual value of a conjunction node
# COND_TYPE= PREP_TYPE # the textual value of a conditional node's type
# RECT_NODE_SHAPE = "rect"
# DEFAULT_NODE_SHAPE = "ellipse"
# PRINT_FEATURES = [("Tense",lambda t:t),
# ("Determiner",lambda t:"det: "+t["Value"]),
# ("Time Value",lambda t:"date: "+t),
# ("Negation", lambda t:"negated"),
# ("Passive Voice", lambda t:"passive"),
# ("Modal",lambda t:"modal: "+ " ".join(t["Value"])),
# ("Definite",lambda t:t),
# ("Modifier",lambda t:"modifier: "+t)]
# def resetCounter():
# def __init__(self,text,isPredicate,features,gr,orderText = True,uid=-1):
# def removeLemma(self):
# def get_text(self,gr):
# def addPropogation(self,node):
# def minIndex(self):
# def maxIndex(self):
# def get_original_text(self):
# def get_sorted_text(self):
# def __str__(self):
# def to_conll_like(self):
# def neighbors(self):
# def incidents(self):
# def is_implicit(self):
# def pos(self):
# def isConj(self):
# def is_wh_question(self):
# def __hash__(self):
# def makeTopNode(self):
# def getCopular(gr,index,features):
# def getPossesive(gr,index):
# def join(node1,node2,gr):
# def isDefinite(node):
# class Node:
which might include code, classes, or functions. Output only the next line. | def duplicate_node(graph, node, connectToNeighbours):
|
Based on the snippet: <|code_start|>#! /usr/bin/env python3
def main(args):
ar, rs = count_reads_stats(args.reads)
print(rs, ar)
if args.config:
with open(str(args.config)) as f:
config = json.load(f)
config['reads_size'] = rs
config['r'] = ar
with open(str(args.config), 'w') as f:
json.dump(config, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simulate reads form random genome with errors')
parser.add_argument('reads', help='Input histogram')
<|code_end|>
, predict the immediate next line with the help of imports:
import argparse
import json
from pathlib import Path
from covest.data import count_reads_stats
and context (classes, functions, sometimes code) from other files:
# Path: covest/data.py
# def count_reads_stats(fname):
# s = n = 0
# for _, read in load_reads(fname):
# s += len(read)
# n += 1
#
# ar = round(s / n) if n else 0
# return ar, s
. Output only the next line. | parser.add_argument('-c', '--config', type=Path, help='Add to config') |
Using the snippet: <|code_start|> 'estimated_genome_size', 'estimated_genome_size_std',
]
print(format_table(
header,
titles,
sorted(
list(table_lines.values()),
key=lambda x: (
x['original_coverage'],
x['original_error_rate'],
x['original_k'],
x.get('repeats', False),
)
),
template_file=format_templates[args.format],
escape=format_escape.get(args.format, None),
))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse experiment output and generate table')
parser.add_argument('path', help='Experiment')
parser.add_argument('-f', '--format', default='html', help='Table format')
parser.add_argument('-i', '--filter', default='*.out', help='Filter files')
parser.add_argument('-a', '--average', action='store_true',
help='Compute average from all sequences')
parser.add_argument('-ne', '--no-error', action='store_true', help='Error is unknown')
parser.add_argument('--legacy', action='store_true', help='Run in legacy mode')
args = parser.parse_args()
<|code_end|>
, determine the next line of code. You have imports:
import argparse
from collections import defaultdict
from covest.data import count_reads_stats
from tools.experiment_parser import parse_all
from tools.table_generator import format_table
and context (class names, function names, or code) available:
# Path: covest/data.py
# def count_reads_stats(fname):
# s = n = 0
# for _, read in load_reads(fname):
# s += len(read)
# n += 1
#
# ar = round(s / n) if n else 0
# return ar, s
#
# Path: tools/experiment_parser.py
# def parse_all(path, file_filter, err=False, legacy=False):
# # path = args.path
# # files = sorted(glob.glob(os.path.join(path, args.filter)))
# files = sorted(glob.glob(os.path.join(path, file_filter)))
# # err = not args.no_error
#
# table_lines = defaultdict(dict)
# sequences = set()
#
# for fname in files:
# try:
# seq_name, cov, error, k, ext, ef = parse_fname(fname, err)
# repeats = ext[-1] == 'r'
# sequences.add(seq_name)
# key = (seq_name, cov, error, k, repeats)
#
# table_lines[key]['provided_coverage'] = cov
# table_lines[key]['provided_error_rate'] = error
# table_lines[key]['provided_k'] = k
# table_lines[key]['repeats'] = repeats
# table_lines[key]['fname'] = fname
# table_lines[key]['seq_name'] = seq_name
#
# if ext == '.est' or ext == '.est_r':
# d = parse_estimate(fname)
# table_lines[key]['coverage'] = d.get(
# 'coverage', None)
# table_lines[key]['error_rate'] = d.get(
# 'error_rate', None)
# table_lines[key]['loglikelihood'] = d.get(
# 'loglikelihood', None)
# table_lines[key]['q1'] = d.get('q1', None)
# table_lines[key]['q2'] = d.get('q2', None)
# table_lines[key]['q'] = d.get('q', None)
# table_lines[key]['guessed_coverage'] = d.get('guessed_coverage', None)
# table_lines[key]['guessed_error_rate'] = d.get('guessed_error_rate', None)
# table_lines[key]['guessed_loglikelihood'] = d.get(
# 'guessed_loglikelihood', None)
# table_lines[key]['original_loglikelihood'] = d.get(
# 'original_loglikelihood', None)
# table_lines[key]['genome_size'] = d.get(
# 'genome_size', None)
# elif ext == '.fit':
# table_lines[key]['williams_genome_size'] = parse_williams(fname)
# else:
# table_lines[key]['khmer_coverage'] = kmer_to_read_coverage(
# parse_khmer(fname), k)
# except Exception as e:
# print('Unable to process {}\n{}'.format(fname, e), file=sys.stderr)
# return table_lines
#
# Path: tools/table_generator.py
# def format_table(header, titles, lines, template_file, escape=None, round_floats=4, is_list=False):
# def format_val(val):
# if round_floats and type(val) is float:
# val = round(val, round_floats)
# if escape:
# try:
# return escape(val)
# except:
# pass
# return val
#
# if not is_list:
# lines = lines_to_list(header, lines)
# data = {
# 'header': [
# {'value': format_val(titles.get(h, h)), 'first': i == 0, 'last': i == len(header) - 1}
# for i, h in enumerate(header)
# ],
# 'body': [
# {'line': [
# {'value': format_val(v), 'first': i == 0, 'last': i == len(l) - 1}
# for i, v in enumerate(l)
# ]} for l in lines
# ],
# }
#
# with open(template_file) as f:
# template = f.read()
# return pystache.render(template, data)
. Output only the next line. | main(args) |
Predict the next line for this snippet: <|code_start|> 'csv': 'templates/csv.tpl',
'tex': 'templates/tex.tpl',
}
format_escape = {
'tex': lambda x: x.replace('_', '\\_'),
}
titles = {
'original_coverage': 'Coverage',
'original_error_rate': 'Error Rate',
'estimated_coverage': 'Est. Coverage',
'estimated_coverage_std': 'Est. Coverage Std',
'estimated_error_rate': 'Est. Error Rate',
'estimated_error_rate_std': 'Est. Error Rate Std',
'estimated_genome_size': 'Est. Genome Size',
'estimated_genome_size_std': 'Est. Genome Size Std',
}
if args.average:
table_lines = compute_average(table_lines)
header = [
'original_coverage', 'original_error_rate',
'estimated_coverage', 'estimated_coverage_std',
'estimated_error_rate', 'estimated_error_rate_std',
'estimated_genome_size', 'estimated_genome_size_std',
]
print(format_table(
header,
<|code_end|>
with the help of current file imports:
import argparse
from collections import defaultdict
from covest.data import count_reads_stats
from tools.experiment_parser import parse_all
from tools.table_generator import format_table
and context from other files:
# Path: covest/data.py
# def count_reads_stats(fname):
# s = n = 0
# for _, read in load_reads(fname):
# s += len(read)
# n += 1
#
# ar = round(s / n) if n else 0
# return ar, s
#
# Path: tools/experiment_parser.py
# def parse_all(path, file_filter, err=False, legacy=False):
# # path = args.path
# # files = sorted(glob.glob(os.path.join(path, args.filter)))
# files = sorted(glob.glob(os.path.join(path, file_filter)))
# # err = not args.no_error
#
# table_lines = defaultdict(dict)
# sequences = set()
#
# for fname in files:
# try:
# seq_name, cov, error, k, ext, ef = parse_fname(fname, err)
# repeats = ext[-1] == 'r'
# sequences.add(seq_name)
# key = (seq_name, cov, error, k, repeats)
#
# table_lines[key]['provided_coverage'] = cov
# table_lines[key]['provided_error_rate'] = error
# table_lines[key]['provided_k'] = k
# table_lines[key]['repeats'] = repeats
# table_lines[key]['fname'] = fname
# table_lines[key]['seq_name'] = seq_name
#
# if ext == '.est' or ext == '.est_r':
# d = parse_estimate(fname)
# table_lines[key]['coverage'] = d.get(
# 'coverage', None)
# table_lines[key]['error_rate'] = d.get(
# 'error_rate', None)
# table_lines[key]['loglikelihood'] = d.get(
# 'loglikelihood', None)
# table_lines[key]['q1'] = d.get('q1', None)
# table_lines[key]['q2'] = d.get('q2', None)
# table_lines[key]['q'] = d.get('q', None)
# table_lines[key]['guessed_coverage'] = d.get('guessed_coverage', None)
# table_lines[key]['guessed_error_rate'] = d.get('guessed_error_rate', None)
# table_lines[key]['guessed_loglikelihood'] = d.get(
# 'guessed_loglikelihood', None)
# table_lines[key]['original_loglikelihood'] = d.get(
# 'original_loglikelihood', None)
# table_lines[key]['genome_size'] = d.get(
# 'genome_size', None)
# elif ext == '.fit':
# table_lines[key]['williams_genome_size'] = parse_williams(fname)
# else:
# table_lines[key]['khmer_coverage'] = kmer_to_read_coverage(
# parse_khmer(fname), k)
# except Exception as e:
# print('Unable to process {}\n{}'.format(fname, e), file=sys.stderr)
# return table_lines
#
# Path: tools/table_generator.py
# def format_table(header, titles, lines, template_file, escape=None, round_floats=4, is_list=False):
# def format_val(val):
# if round_floats and type(val) is float:
# val = round(val, round_floats)
# if escape:
# try:
# return escape(val)
# except:
# pass
# return val
#
# if not is_list:
# lines = lines_to_list(header, lines)
# data = {
# 'header': [
# {'value': format_val(titles.get(h, h)), 'first': i == 0, 'last': i == len(header) - 1}
# for i, h in enumerate(header)
# ],
# 'body': [
# {'line': [
# {'value': format_val(v), 'first': i == 0, 'last': i == len(l) - 1}
# for i, v in enumerate(l)
# ]} for l in lines
# ],
# }
#
# with open(template_file) as f:
# template = f.read()
# return pystache.render(template, data)
, which may contain function names, class names, or code. Output only the next line. | titles, |
Predict the next line for this snippet: <|code_start|>
def other(base):
b = random.randrange(len(BASES) - 1)
if BASES[b] == base:
return BASES[-1]
return BASES[b]
def reverse_complement(seq):
complement = {
'G': 'C',
'C': 'G',
'A': 'T',
'T': 'A',
'N': 'N',
}
return ''.join(complement[x] for x in reversed(seq))
def substitute(sequence):
subst_table = {
'W': ('A', 'T'),
'S': ('C', 'G'),
'M': ('A', 'C'),
'K': ('G', 'T'),
'R': ('A', 'G'),
'Y': ('C', 'T'),
'B': ('C', 'G', 'T'),
'D': ('A', 'G', 'T'),
'H': ('A', 'C', 'T'),
<|code_end|>
with the help of current file imports:
import random
import argparse
from os import path
from Bio import SeqIO
from covest.data import load_reads
and context from other files:
# Path: covest/data.py
# def load_reads(fname):
# _, ext = path.splitext(fname)
# fmt = 'fasta'
# if ext == '.fq' or ext == '.fastq':
# fmt = 'fastq'
# try:
# with open(fname, "rU") as f:
# for read in SeqIO.parse(f, fmt):
# yield read.id, read.seq
# except FileNotFoundError as e:
# verbose_print(e)
, which may contain function names, class names, or code. Output only the next line. | 'V': ('A', 'C', 'G'), |
Continue the code snippet: <|code_start|> if dest.exists():
dest.unlink()
src = str(run_script_filename.resolve())
dst = str(dest)
if link:
os.symlink(src, dst)
else:
shutil.copy2(src, dst)
else:
print('File does not exist: {}'.format(run_script_filename), file=sys.stderr)
exit(2)
def write_config(config, dest_dir):
with open(str(dest_dir / CONFIG_FILENAME), 'w') as f:
json.dump(config, f)
def pipeline(src_file, dest_dir, link=True, force=False, run_script_filename=None, sample=None,
read_info=None, src_config_file=None, clean=False, generate_coverage=None,
use_art=False):
mkdir(dest_dir, force=force)
if generate_coverage is None:
reads_file = get_reads_data(src_file, dest_dir, link=link)
else:
reads_file = get_reads_from_sequence(src_file, dest_dir, generate_coverage, use_art=use_art)
try:
config = generate_config(reads_file, src_config_file)
if sample is not None:
reads_file, config = sample_reads(reads_file, config, sample_info=sample)
<|code_end|>
. Use current file imports:
import argparse
import json
import os
import shutil
import sys
from pathlib import Path
from tempfile import mkstemp
from covest.constants import DEFAULT_K
from covest.data import sample_reads as _sample_reads, count_reads_stats
from covest.utils import run
and context (classes, functions, or code) from other files:
# Path: covest/constants.py
# DEFAULT_K = 21
#
# Path: covest/data.py
# def sample_reads(src_reads_file, dest_reads_file, factor):
# prob = 1.0 / factor
# with open(dest_reads_file, 'w') as f:
# for read_id, read in load_reads(src_reads_file):
# if random.random() < prob:
# f.write('>{}\n'.format(read_id))
# f.write('{}\n'.format(read))
#
# def count_reads_stats(fname):
# s = n = 0
# for _, read in load_reads(fname):
# s += len(read)
# n += 1
#
# ar = round(s / n) if n else 0
# return ar, s
#
# Path: covest/utils.py
# def run(command, shell=False, output=None, verbose=False):
# if verbose:
# print(command, file=sys.stderr)
# f = open(output, 'w') if output else None
# if not shell:
# command = command.split()
# return subprocess.call(command, shell=shell, stdout=f)
. Output only the next line. | if 'r' not in config or 'reads_size' not in config: |
Next line prediction: <|code_start|> print('read length: %d, size: %d' % (read_length, reads_size), file=sys.stderr)
config.update({
'reads_size': reads_size,
'r': read_length,
})
return config
def generate_histogram(reads_file, dest_dir, config, clean=False):
print('Generating histogram...', file=sys.stderr)
hist_file = dest_dir / HISTOGRAM_FILENAME
jellyfish_count = 'jellyfish count -m {k} -s 500M -t 16 -C {infile} -o {infile}.jf'
jellyfish_hist = 'jellyfish histo {infile}.jf -o {outfile}'
params = {
'k': DEFAULT_K,
'infile': reads_file,
'outfile': hist_file,
}
run(jellyfish_count.format(**params), shell=True, verbose=True)
run(jellyfish_hist.format(**params), shell=True, verbose=True)
config['k'] = DEFAULT_K
config['hist'] = hist_file.name
if clean:
Path('{}.jf'.format(reads_file)).unlink()
return hist_file, config
<|code_end|>
. Use current file imports:
(import argparse
import json
import os
import shutil
import sys
from pathlib import Path
from tempfile import mkstemp
from covest.constants import DEFAULT_K
from covest.data import sample_reads as _sample_reads, count_reads_stats
from covest.utils import run)
and context including class names, function names, or small code snippets from other files:
# Path: covest/constants.py
# DEFAULT_K = 21
#
# Path: covest/data.py
# def sample_reads(src_reads_file, dest_reads_file, factor):
# prob = 1.0 / factor
# with open(dest_reads_file, 'w') as f:
# for read_id, read in load_reads(src_reads_file):
# if random.random() < prob:
# f.write('>{}\n'.format(read_id))
# f.write('{}\n'.format(read))
#
# def count_reads_stats(fname):
# s = n = 0
# for _, read in load_reads(fname):
# s += len(read)
# n += 1
#
# ar = round(s / n) if n else 0
# return ar, s
#
# Path: covest/utils.py
# def run(command, shell=False, output=None, verbose=False):
# if verbose:
# print(command, file=sys.stderr)
# f = open(output, 'w') if output else None
# if not shell:
# command = command.split()
# return subprocess.call(command, shell=shell, stdout=f)
. Output only the next line. | def create_run_script(run_script_filename, dest_dir, link=True): |
Next line prediction: <|code_start|> rs = config['reads_size']
del config['reads_size']
else:
_, rs = count_reads_stats(str(reads_file))
c = rs / gs
factor = c / tc
print(
'Current coverage: {c}, target coverage: {tc}, genome size: {gs}, '
'factor: {factor}'.format(
c=c, tc=tc, gs=gs, factor=factor
),
file=sys.stderr
)
elif len(sample_info) == 1:
factor = sample_info[0]
print(
'Factor: {factor}'.format(factor=factor),
file=sys.stderr
)
else:
print('Please specify a valid sample_info.', file=sys.stderr)
exit(1)
fd, rf_sampled = mkstemp(prefix='reads', suffix='.fa')
_sample_reads(str(reads_file), rf_sampled, factor)
os.close(fd)
if 'r' in config:
del config['r']
reads_file.unlink()
<|code_end|>
. Use current file imports:
(import argparse
import json
import os
import shutil
import sys
from pathlib import Path
from tempfile import mkstemp
from covest.constants import DEFAULT_K
from covest.data import sample_reads as _sample_reads, count_reads_stats
from covest.utils import run)
and context including class names, function names, or small code snippets from other files:
# Path: covest/constants.py
# DEFAULT_K = 21
#
# Path: covest/data.py
# def sample_reads(src_reads_file, dest_reads_file, factor):
# prob = 1.0 / factor
# with open(dest_reads_file, 'w') as f:
# for read_id, read in load_reads(src_reads_file):
# if random.random() < prob:
# f.write('>{}\n'.format(read_id))
# f.write('{}\n'.format(read))
#
# def count_reads_stats(fname):
# s = n = 0
# for _, read in load_reads(fname):
# s += len(read)
# n += 1
#
# ar = round(s / n) if n else 0
# return ar, s
#
# Path: covest/utils.py
# def run(command, shell=False, output=None, verbose=False):
# if verbose:
# print(command, file=sys.stderr)
# f = open(output, 'w') if output else None
# if not shell:
# command = command.split()
# return subprocess.call(command, shell=shell, stdout=f)
. Output only the next line. | reads_file.suffix = '.fa' |
Predict the next line for this snippet: <|code_start|>#! /usr/bin/env python
SEPARATE_EF = True
def kmer_to_read_coverage(c, k, read_length=100):
if c is not None:
return c * read_length / (read_length - k + 1)
def compute_average(table_lines, std_key_suffix='_std'):
table_cnt = defaultdict(lambda: defaultdict(int))
table_sum = defaultdict(lambda: defaultdict(float))
table_avg = defaultdict(lambda: defaultdict(float))
table_std_sum = defaultdict(lambda: defaultdict(float))
for key, val in table_lines.items():
for k, v in val.items():
try:
table_sum[key[1:]][k] += v
table_cnt[key[1:]][k] += 1.0
except TypeError:
pass
for key, val in table_sum.items():
for k, v in val.items():
if table_cnt[key][k] == 0:
<|code_end|>
with the help of current file imports:
import argparse
from collections import defaultdict
from tools import templates
from tools.experiment_parser import parse_all
from tools.table_generator import format_table
and context from other files:
# Path: tools/templates.py
#
# Path: tools/experiment_parser.py
# def parse_all(path, file_filter, err=False, legacy=False):
# # path = args.path
# # files = sorted(glob.glob(os.path.join(path, args.filter)))
# files = sorted(glob.glob(os.path.join(path, file_filter)))
# # err = not args.no_error
#
# table_lines = defaultdict(dict)
# sequences = set()
#
# for fname in files:
# try:
# seq_name, cov, error, k, ext, ef = parse_fname(fname, err)
# repeats = ext[-1] == 'r'
# sequences.add(seq_name)
# key = (seq_name, cov, error, k, repeats)
#
# table_lines[key]['provided_coverage'] = cov
# table_lines[key]['provided_error_rate'] = error
# table_lines[key]['provided_k'] = k
# table_lines[key]['repeats'] = repeats
# table_lines[key]['fname'] = fname
# table_lines[key]['seq_name'] = seq_name
#
# if ext == '.est' or ext == '.est_r':
# d = parse_estimate(fname)
# table_lines[key]['coverage'] = d.get(
# 'coverage', None)
# table_lines[key]['error_rate'] = d.get(
# 'error_rate', None)
# table_lines[key]['loglikelihood'] = d.get(
# 'loglikelihood', None)
# table_lines[key]['q1'] = d.get('q1', None)
# table_lines[key]['q2'] = d.get('q2', None)
# table_lines[key]['q'] = d.get('q', None)
# table_lines[key]['guessed_coverage'] = d.get('guessed_coverage', None)
# table_lines[key]['guessed_error_rate'] = d.get('guessed_error_rate', None)
# table_lines[key]['guessed_loglikelihood'] = d.get(
# 'guessed_loglikelihood', None)
# table_lines[key]['original_loglikelihood'] = d.get(
# 'original_loglikelihood', None)
# table_lines[key]['genome_size'] = d.get(
# 'genome_size', None)
# elif ext == '.fit':
# table_lines[key]['williams_genome_size'] = parse_williams(fname)
# else:
# table_lines[key]['khmer_coverage'] = kmer_to_read_coverage(
# parse_khmer(fname), k)
# except Exception as e:
# print('Unable to process {}\n{}'.format(fname, e), file=sys.stderr)
# return table_lines
#
# Path: tools/table_generator.py
# def format_table(header, titles, lines, template_file, escape=None, round_floats=4, is_list=False):
# def format_val(val):
# if round_floats and type(val) is float:
# val = round(val, round_floats)
# if escape:
# try:
# return escape(val)
# except:
# pass
# return val
#
# if not is_list:
# lines = lines_to_list(header, lines)
# data = {
# 'header': [
# {'value': format_val(titles.get(h, h)), 'first': i == 0, 'last': i == len(header) - 1}
# for i, h in enumerate(header)
# ],
# 'body': [
# {'line': [
# {'value': format_val(v), 'first': i == 0, 'last': i == len(l) - 1}
# for i, v in enumerate(l)
# ]} for l in lines
# ],
# }
#
# with open(template_file) as f:
# template = f.read()
# return pystache.render(template, data)
, which may contain function names, class names, or code. Output only the next line. | table_avg[key][k] = None |
Next line prediction: <|code_start|>#! /usr/bin/env python
SEPARATE_EF = True
def kmer_to_read_coverage(c, k, read_length=100):
if c is not None:
<|code_end|>
. Use current file imports:
(import argparse
from collections import defaultdict
from tools import templates
from tools.experiment_parser import parse_all
from tools.table_generator import format_table)
and context including class names, function names, or small code snippets from other files:
# Path: tools/templates.py
#
# Path: tools/experiment_parser.py
# def parse_all(path, file_filter, err=False, legacy=False):
# # path = args.path
# # files = sorted(glob.glob(os.path.join(path, args.filter)))
# files = sorted(glob.glob(os.path.join(path, file_filter)))
# # err = not args.no_error
#
# table_lines = defaultdict(dict)
# sequences = set()
#
# for fname in files:
# try:
# seq_name, cov, error, k, ext, ef = parse_fname(fname, err)
# repeats = ext[-1] == 'r'
# sequences.add(seq_name)
# key = (seq_name, cov, error, k, repeats)
#
# table_lines[key]['provided_coverage'] = cov
# table_lines[key]['provided_error_rate'] = error
# table_lines[key]['provided_k'] = k
# table_lines[key]['repeats'] = repeats
# table_lines[key]['fname'] = fname
# table_lines[key]['seq_name'] = seq_name
#
# if ext == '.est' or ext == '.est_r':
# d = parse_estimate(fname)
# table_lines[key]['coverage'] = d.get(
# 'coverage', None)
# table_lines[key]['error_rate'] = d.get(
# 'error_rate', None)
# table_lines[key]['loglikelihood'] = d.get(
# 'loglikelihood', None)
# table_lines[key]['q1'] = d.get('q1', None)
# table_lines[key]['q2'] = d.get('q2', None)
# table_lines[key]['q'] = d.get('q', None)
# table_lines[key]['guessed_coverage'] = d.get('guessed_coverage', None)
# table_lines[key]['guessed_error_rate'] = d.get('guessed_error_rate', None)
# table_lines[key]['guessed_loglikelihood'] = d.get(
# 'guessed_loglikelihood', None)
# table_lines[key]['original_loglikelihood'] = d.get(
# 'original_loglikelihood', None)
# table_lines[key]['genome_size'] = d.get(
# 'genome_size', None)
# elif ext == '.fit':
# table_lines[key]['williams_genome_size'] = parse_williams(fname)
# else:
# table_lines[key]['khmer_coverage'] = kmer_to_read_coverage(
# parse_khmer(fname), k)
# except Exception as e:
# print('Unable to process {}\n{}'.format(fname, e), file=sys.stderr)
# return table_lines
#
# Path: tools/table_generator.py
# def format_table(header, titles, lines, template_file, escape=None, round_floats=4, is_list=False):
# def format_val(val):
# if round_floats and type(val) is float:
# val = round(val, round_floats)
# if escape:
# try:
# return escape(val)
# except:
# pass
# return val
#
# if not is_list:
# lines = lines_to_list(header, lines)
# data = {
# 'header': [
# {'value': format_val(titles.get(h, h)), 'first': i == 0, 'last': i == len(header) - 1}
# for i, h in enumerate(header)
# ],
# 'body': [
# {'line': [
# {'value': format_val(v), 'first': i == 0, 'last': i == len(l) - 1}
# for i, v in enumerate(l)
# ]} for l in lines
# ],
# }
#
# with open(template_file) as f:
# template = f.read()
# return pystache.render(template, data)
. Output only the next line. | return c * read_length / (read_length - k + 1) |
Given snippet: <|code_start|>
def compute_average(table_lines, std_key_suffix='_std'):
table_cnt = defaultdict(lambda: defaultdict(int))
table_sum = defaultdict(lambda: defaultdict(float))
table_avg = defaultdict(lambda: defaultdict(float))
table_std_sum = defaultdict(lambda: defaultdict(float))
for key, val in table_lines.items():
for k, v in val.items():
try:
table_sum[key[1:]][k] += v
table_cnt[key[1:]][k] += 1.0
except TypeError:
pass
for key, val in table_sum.items():
for k, v in val.items():
if table_cnt[key][k] == 0:
table_avg[key][k] = None
else:
table_avg[key][k] = v / table_cnt[key][k]
for key, val in table_lines.items():
for k, v in val.items():
try:
table_std_sum[key[1:]][k] += (v - table_avg[key[1:]][k]) ** 2
except TypeError:
pass
for key, val in table_std_sum.items():
for k, v in val.items():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import argparse
from collections import defaultdict
from tools import templates
from tools.experiment_parser import parse_all
from tools.table_generator import format_table
and context:
# Path: tools/templates.py
#
# Path: tools/experiment_parser.py
# def parse_all(path, file_filter, err=False, legacy=False):
# # path = args.path
# # files = sorted(glob.glob(os.path.join(path, args.filter)))
# files = sorted(glob.glob(os.path.join(path, file_filter)))
# # err = not args.no_error
#
# table_lines = defaultdict(dict)
# sequences = set()
#
# for fname in files:
# try:
# seq_name, cov, error, k, ext, ef = parse_fname(fname, err)
# repeats = ext[-1] == 'r'
# sequences.add(seq_name)
# key = (seq_name, cov, error, k, repeats)
#
# table_lines[key]['provided_coverage'] = cov
# table_lines[key]['provided_error_rate'] = error
# table_lines[key]['provided_k'] = k
# table_lines[key]['repeats'] = repeats
# table_lines[key]['fname'] = fname
# table_lines[key]['seq_name'] = seq_name
#
# if ext == '.est' or ext == '.est_r':
# d = parse_estimate(fname)
# table_lines[key]['coverage'] = d.get(
# 'coverage', None)
# table_lines[key]['error_rate'] = d.get(
# 'error_rate', None)
# table_lines[key]['loglikelihood'] = d.get(
# 'loglikelihood', None)
# table_lines[key]['q1'] = d.get('q1', None)
# table_lines[key]['q2'] = d.get('q2', None)
# table_lines[key]['q'] = d.get('q', None)
# table_lines[key]['guessed_coverage'] = d.get('guessed_coverage', None)
# table_lines[key]['guessed_error_rate'] = d.get('guessed_error_rate', None)
# table_lines[key]['guessed_loglikelihood'] = d.get(
# 'guessed_loglikelihood', None)
# table_lines[key]['original_loglikelihood'] = d.get(
# 'original_loglikelihood', None)
# table_lines[key]['genome_size'] = d.get(
# 'genome_size', None)
# elif ext == '.fit':
# table_lines[key]['williams_genome_size'] = parse_williams(fname)
# else:
# table_lines[key]['khmer_coverage'] = kmer_to_read_coverage(
# parse_khmer(fname), k)
# except Exception as e:
# print('Unable to process {}\n{}'.format(fname, e), file=sys.stderr)
# return table_lines
#
# Path: tools/table_generator.py
# def format_table(header, titles, lines, template_file, escape=None, round_floats=4, is_list=False):
# def format_val(val):
# if round_floats and type(val) is float:
# val = round(val, round_floats)
# if escape:
# try:
# return escape(val)
# except:
# pass
# return val
#
# if not is_list:
# lines = lines_to_list(header, lines)
# data = {
# 'header': [
# {'value': format_val(titles.get(h, h)), 'first': i == 0, 'last': i == len(header) - 1}
# for i, h in enumerate(header)
# ],
# 'body': [
# {'line': [
# {'value': format_val(v), 'first': i == 0, 'last': i == len(l) - 1}
# for i, v in enumerate(l)
# ]} for l in lines
# ],
# }
#
# with open(template_file) as f:
# template = f.read()
# return pystache.render(template, data)
which might include code, classes, or functions. Output only the next line. | if table_cnt[key][k] <= 1: |
Next line prediction: <|code_start|>#! /usr/bin/env python3
BASES = ['A', 'C', 'G', 'T', ]
DEFAULT_FACTOR = 2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Subsample reads randomly form other reads')
parser.add_argument('-f', '--factor', type=float,
default=DEFAULT_FACTOR, help='Factor')
<|code_end|>
. Use current file imports:
(import argparse
from covest.data import sample_reads)
and context including class names, function names, or small code snippets from other files:
# Path: covest/data.py
# def sample_reads(src_reads_file, dest_reads_file, factor):
# prob = 1.0 / factor
# with open(dest_reads_file, 'w') as f:
# for read_id, read in load_reads(src_reads_file):
# if random.random() < prob:
# f.write('>{}\n'.format(read_id))
# f.write('{}\n'.format(read))
. Output only the next line. | parser.add_argument('reads', help='Reads file') |
Next line prediction: <|code_start|>
def correct_c(self, c):
return c * (self.r - self.k + 1) / self.r
@lru_cache(maxsize=None)
def _get_lambda_s(self, c, err):
return [
c * (3 ** -s) * (1.0 - err) ** (self.k - s) * err ** s
for s in range(self.max_error)
]
def compute_probabilities(self, c, err, *_):
# read to kmer coverage
ck = self.correct_c(c)
# lambda for kmers with s errors
l_s = self._get_lambda_s(ck, err)
# expected probability of kmers with s errors and coverage >= 1
n_s = [self.comb[s] * (1.0 - exp(-l_s[s])) for s in range(self.max_error)]
sum_n_s = fix_zero(sum(n_s[t] for t in range(self.max_error)))
# portion of kmers with s errors
a_s = [n_s[s] / sum_n_s for s in range(self.max_error)]
# probability that unique kmer has coverage j (j > 0)
p_j = {
j: sum(
a_s[s] * tr_poisson(l_s[s], j) for s in range(self.max_error)
)
for j in self.hist
}
return p_j
<|code_end|>
. Use current file imports:
(import inspect
import itertools
import multiprocessing
import sys
import matplotlib.pyplot as plt
from covest_poisson import truncated_poisson as tr_poisson
from functools import lru_cache
from math import exp, fsum
from scipy.misc import comb
from covest import constants
from covest.utils import safe_log, fix_zero)
and context including class names, function names, or small code snippets from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def safe_log(x):
# if x is None or x <= 0:
# return -constants.INF
# return log(x)
#
# def fix_zero(x, val=1):
# if x == 0:
# return val
# else:
# return x
. Output only the next line. | def compute_loglikelihood(self, *args): |
Predict the next line for this snippet: <|code_start|>
MODEL_CLASS_SUFFIX = 'Model'
class BasicModel:
params = ('coverage', 'error_rate')
def __init__(self, k, r, hist, tail, max_error=None, max_cov=None, *args, **kwargs):
self.repeats = False
self.k = k
self.r = r
self.bounds = ((0.01, max_cov), (0, 0.5))
self.defaults = (1, self._default_param(1))
self.comb = [comb(k, s) * (3 ** s) for s in range(k + 1)]
self.hist = hist
self.tail = tail
if max_error is None:
self.max_error = self.k + 1
else:
<|code_end|>
with the help of current file imports:
import inspect
import itertools
import multiprocessing
import sys
import matplotlib.pyplot as plt
from covest_poisson import truncated_poisson as tr_poisson
from functools import lru_cache
from math import exp, fsum
from scipy.misc import comb
from covest import constants
from covest.utils import safe_log, fix_zero
and context from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def safe_log(x):
# if x is None or x <= 0:
# return -constants.INF
# return log(x)
#
# def fix_zero(x, val=1):
# if x == 0:
# return val
# else:
# return x
, which may contain function names, class names, or code. Output only the next line. | self.max_error = min(self.k + 1, max_error) |
Predict the next line for this snippet: <|code_start|> return [probs.get(i, 0) for i in range(max_j)]
hs = float(sum(self.hist.values()))
hp = adjust_probs({k: f / hs for k, f in self.hist.items()}, hist=True)
ep = adjust_probs(self.compute_probabilities(*est))
gp = adjust_probs(self.compute_probabilities(*guess))
if orig is not None and None not in orig:
op = adjust_probs(self.compute_probabilities(*orig))
else:
op = adjust_probs({1:0})
if log_scale:
plt.yscale('log')
plt.plot(
range(len(hp)), hp, 'ko',
label='hist',
ms=8,
)
plt.plot(
range(len(ep)), ep, 'ro',
label='est: {}'.format(fmt(est)),
ms=6,
)
plt.plot(
range(len(gp)), gp, 'go',
label='guess: {}'.format(fmt(guess)),
ms=5,
)
plt.plot(
range(len(op)), op, 'co',
<|code_end|>
with the help of current file imports:
import inspect
import itertools
import multiprocessing
import sys
import matplotlib.pyplot as plt
from covest_poisson import truncated_poisson as tr_poisson
from functools import lru_cache
from math import exp, fsum
from scipy.misc import comb
from covest import constants
from covest.utils import safe_log, fix_zero
and context from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def safe_log(x):
# if x is None or x <= 0:
# return -constants.INF
# return log(x)
#
# def fix_zero(x, val=1):
# if x == 0:
# return val
# else:
# return x
, which may contain function names, class names, or code. Output only the next line. | label='orig: {}'.format(fmt(orig)), |
Predict the next line for this snippet: <|code_start|> return x
def verbose_print(message):
if not constants.VERBOSE:
return
sys.stderr.write(message + "\n")
def safe_int(x):
return int(x) if x != float('inf') else None
def fix_zero(x, val=1):
if x == 0:
return val
else:
return x
def safe_log(x):
if x is None or x <= 0:
return -constants.INF
return log(x)
def estimate_p(cc, alpha):
return (cc * (alpha - 1)) / (alpha * cc - alpha - cc)
<|code_end|>
with the help of current file imports:
import subprocess
import sys
from math import log, exp
from covest import constants
from .inverse import inverse
and context from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/inverse.py
# def inverse(f, delta=1e-8):
# """Given a function y = f(x) that is a monotonically increasing function on
# non-negative numbers, return the function x = f_1(y) that is an approximate
# inverse, picking the closest value to the inverse, within delta."""
# def derivative(func):
# return lambda y: (func(y + delta) - func(y)) / delta
#
# def root(y):
# return lambda x: f(x) - y
#
# def newton(y, iters=15):
# guess = float(y) / 2
# rootfunc = root(y)
# derifunc = derivative(rootfunc)
# d = (rootfunc(guess) / derifunc(guess))
# while abs(d) > delta:
# guess -= d
# d = (rootfunc(guess) / derifunc(guess))
#
# return guess
# return newton
, which may contain function names, class names, or code. Output only the next line. | def kmer_to_read_coverage(coverage, k, r): |
Continue the code snippet: <|code_start|> r = s / ss
r = round(r, constants.AUTO_TRIM_PRECISION)
if r >= 1:
trim = i
break
return trim
def trim_hist(hist, threshold):
if threshold >= max(hist):
return hist, 0
h = {k: v for k, v in hist.items() if k < threshold}
tail = sum(v for k, v in hist.items() if k >= threshold)
# remove 0 elements
return {k: v for k, v in h.items() if v > 0}, tail
def process_histogram(hist, k, r, trim=None, sample_factor=None, max_notrim=constants.MAX_NOTRIM):
hist = dict(hist)
tail = 0
if sample_factor is not None and sample_factor > 1:
verbose_print('Sampling histogram {}x...'.format(sample_factor))
hist = sample_histogram(hist, sample_factor, trim)
if sample_factor is None and max(hist) > max_notrim:
verbose_print('Sampling histogram...')
hist, sample_factor, c, e = auto_sample_hist(hist, k, r, trim=trim)
if sample_factor > 1:
verbose_print('Histogram sampled with factor {}.'.format(sample_factor))
else:
verbose_print('No sampling necessary')
<|code_end|>
. Use current file imports:
import random
from collections import defaultdict
from covest_poisson import poisson_dist
from math import exp, floor, ceil
from scipy.stats import binom
from covest import constants
from .utils import estimate_p, kmer_to_read_coverage, fix_coverage, verbose_print
and context (classes, functions, or code) from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def estimate_p(cc, alpha):
# return (cc * (alpha - 1)) / (alpha * cc - alpha - cc)
#
# def kmer_to_read_coverage(coverage, k, r):
# return coverage * r / (r - k + 1)
#
# def fix_coverage(coverage):
# return inverse(lambda c: (c - c * exp(-c)) / (1 - exp(-c) - c * exp(-c)))(coverage)
#
# def verbose_print(message):
# if not constants.VERBOSE:
# return
# sys.stderr.write(message + "\n")
. Output only the next line. | else: |
Continue the code snippet: <|code_start|>def trim_hist(hist, threshold):
if threshold >= max(hist):
return hist, 0
h = {k: v for k, v in hist.items() if k < threshold}
tail = sum(v for k, v in hist.items() if k >= threshold)
# remove 0 elements
return {k: v for k, v in h.items() if v > 0}, tail
def process_histogram(hist, k, r, trim=None, sample_factor=None, max_notrim=constants.MAX_NOTRIM):
hist = dict(hist)
tail = 0
if sample_factor is not None and sample_factor > 1:
verbose_print('Sampling histogram {}x...'.format(sample_factor))
hist = sample_histogram(hist, sample_factor, trim)
if sample_factor is None and max(hist) > max_notrim:
verbose_print('Sampling histogram...')
hist, sample_factor, c, e = auto_sample_hist(hist, k, r, trim=trim)
if sample_factor > 1:
verbose_print('Histogram sampled with factor {}.'.format(sample_factor))
else:
verbose_print('No sampling necessary')
else:
c, e = compute_coverage_apx(hist, k, r)
if sample_factor is None:
sample_factor = 1
if trim is None:
if max(hist) > max_notrim:
trim = get_trim(hist, ignore_last=True)
verbose_print('Trimming at: {}'.format(trim))
<|code_end|>
. Use current file imports:
import random
from collections import defaultdict
from covest_poisson import poisson_dist
from math import exp, floor, ceil
from scipy.stats import binom
from covest import constants
from .utils import estimate_p, kmer_to_read_coverage, fix_coverage, verbose_print
and context (classes, functions, or code) from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def estimate_p(cc, alpha):
# return (cc * (alpha - 1)) / (alpha * cc - alpha - cc)
#
# def kmer_to_read_coverage(coverage, k, r):
# return coverage * r / (r - k + 1)
#
# def fix_coverage(coverage):
# return inverse(lambda c: (c - c * exp(-c)) / (1 - exp(-c) - c * exp(-c)))(coverage)
#
# def verbose_print(message):
# if not constants.VERBOSE:
# return
# sys.stderr.write(message + "\n")
. Output only the next line. | hist, tail = trim_hist(hist, trim) |
Predict the next line after this snippet: <|code_start|>
def compute_coverage_apx(hist, k, r):
observed_ones = hist.get(1, 0)
all_kmers = sum(i * h for i, h in hist.items())
total_unique_kmers = sum(h for h in hist.values())
if total_unique_kmers == 0:
return 0.0, 1.0
# discard first column
all_kmers -= observed_ones
unique_kmers = total_unique_kmers - observed_ones
# compute coverage from hist >=2
try:
cov = all_kmers / unique_kmers
cov = fix_coverage(cov)
# fix unique kmers
<|code_end|>
using the current file's imports:
import random
from collections import defaultdict
from covest_poisson import poisson_dist
from math import exp, floor, ceil
from scipy.stats import binom
from covest import constants
from .utils import estimate_p, kmer_to_read_coverage, fix_coverage, verbose_print
and any relevant context from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def estimate_p(cc, alpha):
# return (cc * (alpha - 1)) / (alpha * cc - alpha - cc)
#
# def kmer_to_read_coverage(coverage, k, r):
# return coverage * r / (r - k + 1)
#
# def fix_coverage(coverage):
# return inverse(lambda c: (c - c * exp(-c)) / (1 - exp(-c) - c * exp(-c)))(coverage)
#
# def verbose_print(message):
# if not constants.VERBOSE:
# return
# sys.stderr.write(message + "\n")
. Output only the next line. | unique_kmers /= (1.0 - exp(-cov) - cov * exp(-cov)) |
Given the code snippet: <|code_start|> all_kmers = sum(i * h for i, h in hist.items())
total_unique_kmers = sum(h for h in hist.values())
if total_unique_kmers == 0:
return 0.0, 1.0
# discard first column
all_kmers -= observed_ones
unique_kmers = total_unique_kmers - observed_ones
# compute coverage from hist >=2
try:
cov = all_kmers / unique_kmers
cov = fix_coverage(cov)
# fix unique kmers
unique_kmers /= (1.0 - exp(-cov) - cov * exp(-cov))
# compute alpha (error read ratio)
estimated_ones = unique_kmers * cov * exp(-cov)
estimated_zeros = unique_kmers * exp(-cov)
error_ones = max(0.0, observed_ones - estimated_ones)
alpha = error_ones / (total_unique_kmers + estimated_zeros)
# estimate probability of correct kmer and error rate
estimated_p = max(0.0, estimate_p(cov, alpha))
e = 1 - estimated_p ** (1.0 / k)
# return corrected coverage and error estimate
if estimated_p > 0:
# function for conversion between kmer and base coverage
return float(kmer_to_read_coverage(cov / estimated_p, k, r)), float(e)
else:
return 0.0, float(e)
except ZeroDivisionError:
<|code_end|>
, generate the next line using the imports in this file:
import random
from collections import defaultdict
from covest_poisson import poisson_dist
from math import exp, floor, ceil
from scipy.stats import binom
from covest import constants
from .utils import estimate_p, kmer_to_read_coverage, fix_coverage, verbose_print
and context (functions, classes, or occasionally code) from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def estimate_p(cc, alpha):
# return (cc * (alpha - 1)) / (alpha * cc - alpha - cc)
#
# def kmer_to_read_coverage(coverage, k, r):
# return coverage * r / (r - k + 1)
#
# def fix_coverage(coverage):
# return inverse(lambda c: (c - c * exp(-c)) / (1 - exp(-c) - c * exp(-c)))(coverage)
#
# def verbose_print(message):
# if not constants.VERBOSE:
# return
# sys.stderr.write(message + "\n")
. Output only the next line. | return 0.0, 1.0 |
Next line prediction: <|code_start|>
def auto_sample_hist(hist, k, r, trim=None):
h = dict(hist)
f = 1
s = 1
c, e = compute_coverage_apx(hist, k, r)
while c > constants.AUTO_SAMPLE_TARGET_COVERAGE:
f += s
s *= 2
h = sample_histogram(hist, factor=f, trim=trim)
c, e = compute_coverage_apx(h, k, r)
s //= 4
f2 = f - s
while s >= 1:
h2 = sample_histogram(hist, factor=f2, trim=trim)
c, e = compute_coverage_apx(h2, k, r)
if c > constants.AUTO_SAMPLE_TARGET_COVERAGE:
f2 += s
else:
h = h2
f = f2
f2 -= s
s //= 2
return h, f, c, e
def remove_noise(hist):
<|code_end|>
. Use current file imports:
(import random
from collections import defaultdict
from covest_poisson import poisson_dist
from math import exp, floor, ceil
from scipy.stats import binom
from covest import constants
from .utils import estimate_p, kmer_to_read_coverage, fix_coverage, verbose_print)
and context including class names, function names, or small code snippets from other files:
# Path: covest/constants.py
# MAX_EXP = 200
# GRID_DEPTH = 3
# INF = float('inf')
# VERBOSE = True
# PLOT_LOG_SCALE = True
# USE_BIGFLOAT = False
# STEP = 1.1
# OPTIMIZATION_METHOD = 'L-BFGS-B'
# INITIAL_GRID_COUNT = 20
# INITIAL_GRID_STEP = 3
# DEFAULT_ERR_SCALE = 1
# DEFAULT_K = 21
# DEFAULT_READ_LENGTH = 100
# DEFAULT_REPEAT_MODEL = 0
# DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
# AUTO_SAMPLE_TARGET_COVERAGE = 12
# AUTO_TRIM_PRECISION = 6
# NOISE_THRESHOLD = 10**-6
# MAX_ERRORS = 8
# MAX_NOTRIM = 25
# DEFAULT_THREAD_COUNT = cpu_count()
# DEFAULT_THREAD_COUNT = 2
#
# Path: covest/utils.py
# def estimate_p(cc, alpha):
# return (cc * (alpha - 1)) / (alpha * cc - alpha - cc)
#
# def kmer_to_read_coverage(coverage, k, r):
# return coverage * r / (r - k + 1)
#
# def fix_coverage(coverage):
# return inverse(lambda c: (c - c * exp(-c)) / (1 - exp(-c) - c * exp(-c)))(coverage)
#
# def verbose_print(message):
# if not constants.VERBOSE:
# return
# sys.stderr.write(message + "\n")
. Output only the next line. | total = sum(hist.values()) |
Predict the next line after this snippet: <|code_start|> ['alt E', 'Finder', ['UIElementRole::custom_ui']],
['__FlipScrollWheel__', 'flipscrollwheel_vertical', ['Finder', 'cmd', 'built_in_keyboard_and_trackpad']],
['ctrl cmd F', 'cmd F', ['VIRTUALMACHINE']],
]
result = '''
<appdef>
<appname>BILIBILI</appname>
<equal>com.typcn.Bilibili</equal>
</appdef>
<appdef>
<appname>Finder</appname>
<equal>com.apple.finder</equal>
</appdef>
<deviceproductdef>
<productname>CHERRY_3494_PRODUCT</productname>
<productid>0x0011</productid>
</deviceproductdef>
<deviceproductdef>
<productname>built_in_keyboard_and_trackpad_PRODUCT</productname>
<productid>0x0259</productid>
</deviceproductdef>
<devicevendordef>
<vendorname>CHERRY_3494_VENDOR</vendorname>
<vendorid>0x046a</vendorid>
</devicevendordef>
<devicevendordef>
<vendorname>built_in_keyboard_and_trackpad_VENDOR</vendorname>
<vendorid>0x05ac</vendorid>
</devicevendordef>
<|code_end|>
using the current file's imports:
from easy_karabiner.query import DefinitionBucket
from easy_karabiner.parse import *
and any relevant context from other files:
# Path: easy_karabiner/query.py
# class DefinitionBucket(object):
# """This class is used to store global `Definition` objects,
# so we can create a `Definition` object from anywhere,
# and found it by the original value used to define.
# """
# def __init__(self):
# self.buckets = {
# 'filter': {},
# 'key': {},
# }
#
# @classmethod
# def get_instance(cls, reset=False):
# if not hasattr(cls, '_instance') or reset:
# cls._instance = cls()
# return cls._instance
#
# @classmethod
# def get_all_definitions(cls):
# list_of_defos = [d.values() for d in cls.get_instance().buckets.values()]
# defos = set(chain.from_iterable(chain.from_iterable(list_of_defos)))
# return sorted(defos, key=lambda f: (f.get_def_tag_name(), f.get_name()))
#
# @classmethod
# def put(cls, category, name, definitions):
# cls.get_instance().buckets[category][name] = definitions
#
# @classmethod
# def get(cls, category, name):
# return cls.get_instance().buckets[category].get(name)
#
# @classmethod
# def has(cls, category, name):
# return name in cls.get_instance().buckets[category]
#
# @classmethod
# def clear(cls):
# cls.get_instance(reset=True)
. Output only the next line. | <replacementdef> |
Continue the code snippet: <|code_start|># coding: utf-8
from __future__ import print_function
__all__ = ['get_app_info', 'get_all_app_info',
'get_peripheral_info', 'get_all_peripheral_info']
<|code_end|>
. Use current file imports:
import os
import subprocess
from . import util
from .fucking_string import ensure_utf8
and context (classes, functions, or code) from other files:
# Path: easy_karabiner/fucking_string.py
# def ensure_utf8(s):
# # convert from any object to `unicode`
# if not isinstance(s, basestring):
# s = unicode(s)
#
# if isinstance(s, unicode):
# s = s.encode('utf-8')
# return unicode(s, encoding='utf-8')
. Output only the next line. | def call(cmd, **kwargs): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import print_function
class Generator(BaseXML):
"""Construct Karabiner favorite XML tree
>>> g = Generator()
>>> s = '''
... <root>
... <Easy-Karabiner>{version}</Easy-Karabiner>
... <item>
<|code_end|>
, generate the next line using the imports in this file:
from . import __version__
from . import parse
from . import config
from .basexml import BaseXML
from .util import print_info
from . import util
import doctest
and context (functions, classes, or occasionally code) from other files:
# Path: easy_karabiner/basexml.py
# class BaseXML(object):
# xml_parser = etree.XMLParser(strip_cdata=False)
#
# @classmethod
# def unescape(cls, s):
# return saxutils.unescape(s, {
# """: '"',
# "'": "'",
# })
#
# @classmethod
# def parse(cls, filepath):
# return etree.parse(filepath).getroot()
#
# @classmethod
# def parse_string(cls, xml_str):
# return etree.fromstring(xml_str, cls.xml_parser)
#
# @classmethod
# def get_class_name(cls):
# return cls.__name__
#
# @classmethod
# def is_cdata_text(cls, text):
# return text.startswith('<![CDATA[') and text.endswith(']]>')
#
# @classmethod
# def remove_cdata_mark(cls, text):
# return text[len('<![CDATA['):-len(']]>')]
#
# @classmethod
# def create_cdata_text(cls, text):
# # do NOT use `etree.CDATA`
# return '<![CDATA[%s]]>' % text
#
# @classmethod
# def assign_text_attribute(cls, etree_element, text):
# if text is not None:
# etree_element.text = ensure_utf8(text)
# else:
# etree_element.text = text
#
# @classmethod
# def create_tag(cls, name, text=None, **kwargs):
# et = etree.Element(name, **kwargs)
# cls.assign_text_attribute(et, text)
# return et
#
# @classmethod
# def pretty_text(cls, elem, indent=" ", level=0):
# """WARNING: This method would change the construct of XML tree"""
# i = "\n" + level * indent
#
# if len(elem) == 0:
# if elem.text is not None:
# lines = elem.text.split('\n')
# if len(lines) > 1:
# if not lines[0].startswith(' '):
# lines[0] = (i + indent) + lines[0]
# if lines[-1].strip() == '':
# lines.pop()
# elem.text = (i + indent).join(lines) + i
# else:
# for subelem in elem:
# BaseXML.pretty_text(subelem, indent, level + 1)
#
# return elem
#
# @classmethod
# def to_format_str(cls, xml_tree, pretty_text=True):
# indent = " "
# if pretty_text:
# BaseXML.pretty_text(xml_tree, indent=indent)
# xml_string = etree.tostring(xml_tree)
# xml_string = minidom.parseString(xml_string).toprettyxml(indent=indent)
# xml_string = cls.unescape(xml_string)
# return xml_string
#
# def to_xml(self):
# """NOTICE: This method must be a REENTRANT function, which means
# it should NOT change status or modify any member of `self` object.
# Because other methods may change the construct of the XML tree.
# """
# raise exception.NeedOverrideError()
#
# def to_str(self, pretty_text=True, remove_first_line=False):
# xml_str = self.to_format_str(self.to_xml(), pretty_text=pretty_text)
#
# if remove_first_line:
# lines = xml_str.split('\n')
# if len(lines[-1].strip()) == 0:
# # remove last blank line
# lines = lines[1:-1]
# else:
# lines = lines[1:]
# xml_str = '\n'.join(lines)
#
# return xml_str
#
# def __str__(self):
# # `remove_first_line=True` is used to remove version tag in the first line
# return self.to_str(remove_first_line=True)
#
# Path: easy_karabiner/util.py
# def print_info(msg):
# print_message(msg, color='green')
. Output only the next line. | ... <name>Easy-Karabiner</name> |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import print_function
class Generator(BaseXML):
"""Construct Karabiner favorite XML tree
>>> g = Generator()
>>> s = '''
... <root>
... <Easy-Karabiner>{version}</Easy-Karabiner>
... <item>
... <name>Easy-Karabiner</name>
... <item>
... <name>Enable</name>
<|code_end|>
. Use current file imports:
from . import __version__
from . import parse
from . import config
from .basexml import BaseXML
from .util import print_info
from . import util
import doctest
and context (classes, functions, or code) from other files:
# Path: easy_karabiner/basexml.py
# class BaseXML(object):
# xml_parser = etree.XMLParser(strip_cdata=False)
#
# @classmethod
# def unescape(cls, s):
# return saxutils.unescape(s, {
# """: '"',
# "'": "'",
# })
#
# @classmethod
# def parse(cls, filepath):
# return etree.parse(filepath).getroot()
#
# @classmethod
# def parse_string(cls, xml_str):
# return etree.fromstring(xml_str, cls.xml_parser)
#
# @classmethod
# def get_class_name(cls):
# return cls.__name__
#
# @classmethod
# def is_cdata_text(cls, text):
# return text.startswith('<![CDATA[') and text.endswith(']]>')
#
# @classmethod
# def remove_cdata_mark(cls, text):
# return text[len('<![CDATA['):-len(']]>')]
#
# @classmethod
# def create_cdata_text(cls, text):
# # do NOT use `etree.CDATA`
# return '<![CDATA[%s]]>' % text
#
# @classmethod
# def assign_text_attribute(cls, etree_element, text):
# if text is not None:
# etree_element.text = ensure_utf8(text)
# else:
# etree_element.text = text
#
# @classmethod
# def create_tag(cls, name, text=None, **kwargs):
# et = etree.Element(name, **kwargs)
# cls.assign_text_attribute(et, text)
# return et
#
# @classmethod
# def pretty_text(cls, elem, indent=" ", level=0):
# """WARNING: This method would change the construct of XML tree"""
# i = "\n" + level * indent
#
# if len(elem) == 0:
# if elem.text is not None:
# lines = elem.text.split('\n')
# if len(lines) > 1:
# if not lines[0].startswith(' '):
# lines[0] = (i + indent) + lines[0]
# if lines[-1].strip() == '':
# lines.pop()
# elem.text = (i + indent).join(lines) + i
# else:
# for subelem in elem:
# BaseXML.pretty_text(subelem, indent, level + 1)
#
# return elem
#
# @classmethod
# def to_format_str(cls, xml_tree, pretty_text=True):
# indent = " "
# if pretty_text:
# BaseXML.pretty_text(xml_tree, indent=indent)
# xml_string = etree.tostring(xml_tree)
# xml_string = minidom.parseString(xml_string).toprettyxml(indent=indent)
# xml_string = cls.unescape(xml_string)
# return xml_string
#
# def to_xml(self):
# """NOTICE: This method must be a REENTRANT function, which means
# it should NOT change status or modify any member of `self` object.
# Because other methods may change the construct of the XML tree.
# """
# raise exception.NeedOverrideError()
#
# def to_str(self, pretty_text=True, remove_first_line=False):
# xml_str = self.to_format_str(self.to_xml(), pretty_text=pretty_text)
#
# if remove_first_line:
# lines = xml_str.split('\n')
# if len(lines[-1].strip()) == 0:
# # remove last blank line
# lines = lines[1:-1]
# else:
# lines = lines[1:]
# xml_str = '\n'.join(lines)
#
# return xml_str
#
# def __str__(self):
# # `remove_first_line=True` is used to remove version tag in the first line
# return self.to_str(remove_first_line=True)
#
# Path: easy_karabiner/util.py
# def print_info(msg):
# print_message(msg, color='green')
. Output only the next line. | ... <identifier>private.easy_karabiner</identifier> |
Given the code snippet: <|code_start|> </vkchangeinputsourcedef>'''
util.assert_xml_equal(d, s)
def test_vkopenurldef():
d = VKOpenURL('KeyCode::VK_OPEN_URL_karabiner', 'https://pqrs.org/osx/karabiner/')
s = '''
<vkopenurldef>
<name>KeyCode::VK_OPEN_URL_karabiner</name>
<url>https://pqrs.org/osx/karabiner/</url>
</vkopenurldef>'''
util.assert_xml_equal(d, s)
d = VKOpenURL('KeyCode::VK_OPEN_URL_FINDER', '/Applications/Finder.app', background=True)
s = '''
<vkopenurldef>
<name>KeyCode::VK_OPEN_URL_FINDER</name>
<url type="file">/Applications/Finder.app</url>
<background/>
</vkopenurldef>'''
util.assert_xml_equal(d, s)
d = VKOpenURL('KeyCode::VK_OPEN_URL_Calculator', '/Applications/Calculator.app')
s = '''
<vkopenurldef>
<name>KeyCode::VK_OPEN_URL_Calculator</name>
<url type="file">/Applications/Calculator.app</url>
</vkopenurldef>'''
util.assert_xml_equal(d, s)
<|code_end|>
, generate the next line using the imports in this file:
from easy_karabiner import util
from easy_karabiner.definition import *
and context (functions, classes, or occasionally code) from other files:
# Path: easy_karabiner/util.py
# def read_python_file(pypath):
# def get_checksum(s):
# def escape_string(s):
# def encode_with_utf8(o):
# def is_hex(s):
# def is_list_or_tuple(obj):
# def split_ignore_quote(s):
# def remove_all_space(s):
# def is_xml_element_equal(node1, node2):
# def is_xml_tree_equal(tree1, tree2, ignore_tags=tuple()):
# def assert_xml_equal(xml_tree1, xml_tree2, ignore_tags=tuple()):
# def print_message(msg, color=None, err=False):
# def print_error(msg, print_stack=False):
# def print_warning(msg):
# def print_info(msg):
. Output only the next line. | d = VKOpenURL('KeyCode::VK_OPEN_URL_date_pbcopy', '#! /bin/date | /usr/bin/pbcopy') |
Predict the next line for this snippet: <|code_start|> for tag_name, tag_val in tag_val_pairs:
if len(tag_name) > 0:
tag_name, tag_attrs = self.split_name_and_attrs(tag_name)
tag = self.create_tag(tag_name, tag_val, attrib=tag_attrs)
xml_tree.append(tag)
return xml_tree
@property
def id(self):
return self.get_def_tag_name(), self.get_name()
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
class NoNameTagDefinitionBase(DefinitionBase):
def to_xml(self):
xml_tree = self.create_tag(self.get_def_tag_name(), self.name)
return xml_tree
class App(DefinitionBase):
"""
>>> d = App('BILIBILI', 'com.typcn.Bilibili')
>>> s = '''
... <appdef>
<|code_end|>
with the help of current file imports:
from . import exception
from . import def_tag_map
from .basexml import BaseXML
from . import util
import doctest
and context from other files:
# Path: easy_karabiner/basexml.py
# class BaseXML(object):
# xml_parser = etree.XMLParser(strip_cdata=False)
#
# @classmethod
# def unescape(cls, s):
# return saxutils.unescape(s, {
# """: '"',
# "'": "'",
# })
#
# @classmethod
# def parse(cls, filepath):
# return etree.parse(filepath).getroot()
#
# @classmethod
# def parse_string(cls, xml_str):
# return etree.fromstring(xml_str, cls.xml_parser)
#
# @classmethod
# def get_class_name(cls):
# return cls.__name__
#
# @classmethod
# def is_cdata_text(cls, text):
# return text.startswith('<![CDATA[') and text.endswith(']]>')
#
# @classmethod
# def remove_cdata_mark(cls, text):
# return text[len('<![CDATA['):-len(']]>')]
#
# @classmethod
# def create_cdata_text(cls, text):
# # do NOT use `etree.CDATA`
# return '<![CDATA[%s]]>' % text
#
# @classmethod
# def assign_text_attribute(cls, etree_element, text):
# if text is not None:
# etree_element.text = ensure_utf8(text)
# else:
# etree_element.text = text
#
# @classmethod
# def create_tag(cls, name, text=None, **kwargs):
# et = etree.Element(name, **kwargs)
# cls.assign_text_attribute(et, text)
# return et
#
# @classmethod
# def pretty_text(cls, elem, indent=" ", level=0):
# """WARNING: This method would change the construct of XML tree"""
# i = "\n" + level * indent
#
# if len(elem) == 0:
# if elem.text is not None:
# lines = elem.text.split('\n')
# if len(lines) > 1:
# if not lines[0].startswith(' '):
# lines[0] = (i + indent) + lines[0]
# if lines[-1].strip() == '':
# lines.pop()
# elem.text = (i + indent).join(lines) + i
# else:
# for subelem in elem:
# BaseXML.pretty_text(subelem, indent, level + 1)
#
# return elem
#
# @classmethod
# def to_format_str(cls, xml_tree, pretty_text=True):
# indent = " "
# if pretty_text:
# BaseXML.pretty_text(xml_tree, indent=indent)
# xml_string = etree.tostring(xml_tree)
# xml_string = minidom.parseString(xml_string).toprettyxml(indent=indent)
# xml_string = cls.unescape(xml_string)
# return xml_string
#
# def to_xml(self):
# """NOTICE: This method must be a REENTRANT function, which means
# it should NOT change status or modify any member of `self` object.
# Because other methods may change the construct of the XML tree.
# """
# raise exception.NeedOverrideError()
#
# def to_str(self, pretty_text=True, remove_first_line=False):
# xml_str = self.to_format_str(self.to_xml(), pretty_text=pretty_text)
#
# if remove_first_line:
# lines = xml_str.split('\n')
# if len(lines[-1].strip()) == 0:
# # remove last blank line
# lines = lines[1:-1]
# else:
# lines = lines[1:]
# xml_str = '\n'.join(lines)
#
# return xml_str
#
# def __str__(self):
# # `remove_first_line=True` is used to remove version tag in the first line
# return self.to_str(remove_first_line=True)
, which may contain function names, class names, or code. Output only the next line. | ... <appname>BILIBILI</appname> |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import print_function
def is_defined_filter(val):
return query_filter_class_names(val, scope='all')
def is_defined_key(val):
if get_key_alias(val.lower()):
return True
else:
for k in [val, val.upper(), val.lower()]:
if KeyHeaderQuery.query(k):
return True
return DefinitionBucket.has('key', val)
def is_predefined_modifier(key):
key = get_key_alias(key.lower()) or key
for k in [key, key.upper(), key.lower()]:
if KeyHeaderQuery.query(k) == 'ModifierFlag':
return True
parts = key.split('::', 1)
<|code_end|>
, generate the next line using the imports in this file:
import os
import glob
from itertools import chain
from . import alias
from . import config
from . import exception
from . import definition
from . import def_tag_map
from .basexml import BaseXML
and context (functions, classes, or occasionally code) from other files:
# Path: easy_karabiner/basexml.py
# class BaseXML(object):
# xml_parser = etree.XMLParser(strip_cdata=False)
#
# @classmethod
# def unescape(cls, s):
# return saxutils.unescape(s, {
# """: '"',
# "'": "'",
# })
#
# @classmethod
# def parse(cls, filepath):
# return etree.parse(filepath).getroot()
#
# @classmethod
# def parse_string(cls, xml_str):
# return etree.fromstring(xml_str, cls.xml_parser)
#
# @classmethod
# def get_class_name(cls):
# return cls.__name__
#
# @classmethod
# def is_cdata_text(cls, text):
# return text.startswith('<![CDATA[') and text.endswith(']]>')
#
# @classmethod
# def remove_cdata_mark(cls, text):
# return text[len('<![CDATA['):-len(']]>')]
#
# @classmethod
# def create_cdata_text(cls, text):
# # do NOT use `etree.CDATA`
# return '<![CDATA[%s]]>' % text
#
# @classmethod
# def assign_text_attribute(cls, etree_element, text):
# if text is not None:
# etree_element.text = ensure_utf8(text)
# else:
# etree_element.text = text
#
# @classmethod
# def create_tag(cls, name, text=None, **kwargs):
# et = etree.Element(name, **kwargs)
# cls.assign_text_attribute(et, text)
# return et
#
# @classmethod
# def pretty_text(cls, elem, indent=" ", level=0):
# """WARNING: This method would change the construct of XML tree"""
# i = "\n" + level * indent
#
# if len(elem) == 0:
# if elem.text is not None:
# lines = elem.text.split('\n')
# if len(lines) > 1:
# if not lines[0].startswith(' '):
# lines[0] = (i + indent) + lines[0]
# if lines[-1].strip() == '':
# lines.pop()
# elem.text = (i + indent).join(lines) + i
# else:
# for subelem in elem:
# BaseXML.pretty_text(subelem, indent, level + 1)
#
# return elem
#
# @classmethod
# def to_format_str(cls, xml_tree, pretty_text=True):
# indent = " "
# if pretty_text:
# BaseXML.pretty_text(xml_tree, indent=indent)
# xml_string = etree.tostring(xml_tree)
# xml_string = minidom.parseString(xml_string).toprettyxml(indent=indent)
# xml_string = cls.unescape(xml_string)
# return xml_string
#
# def to_xml(self):
# """NOTICE: This method must be a REENTRANT function, which means
# it should NOT change status or modify any member of `self` object.
# Because other methods may change the construct of the XML tree.
# """
# raise exception.NeedOverrideError()
#
# def to_str(self, pretty_text=True, remove_first_line=False):
# xml_str = self.to_format_str(self.to_xml(), pretty_text=pretty_text)
#
# if remove_first_line:
# lines = xml_str.split('\n')
# if len(lines[-1].strip()) == 0:
# # remove last blank line
# lines = lines[1:-1]
# else:
# lines = lines[1:]
# xml_str = '\n'.join(lines)
#
# return xml_str
#
# def __str__(self):
# # `remove_first_line=True` is used to remove version tag in the first line
# return self.to_str(remove_first_line=True)
. Output only the next line. | return len(parts) == 2 and parts[0] == 'ModifierFlag' |
Given the code snippet: <|code_start|> g = Generator(maps=MAPS, definitions=DEFINITIONS)
s = '''
<root>
<Easy-Karabiner>{version}</Easy-Karabiner>
<item>
<name>Easy-Karabiner</name>
<appdef>
<appname>BILIBILI</appname>
<equal>com.typcn.Bilibili</equal>
</appdef>
<deviceproductdef>
<productname>3494</productname>
<productid>0x0011</productid>
</deviceproductdef>
<devicevendordef>
<vendorname>CHERRY</vendorname>
<vendorid>0x046a</vendorid>
</devicevendordef>
<item>
<name>Enable</name>
<identifier>private.easy_karabiner</identifier>
<block>
<autogen> __KeyToKey__ KeyCode::COMMAND_L, KeyCode::OPTION_L</autogen>
</block>
<block>
<device_only> DeviceVendor::CHERRY, DeviceProduct::3494 </device_only>
<only>BILIBILI</only>
<autogen> __KeyToKey__ KeyCode::OPTION_L, KeyCode::COMMAND_L </autogen>
</block>
<block>
<|code_end|>
, generate the next line using the imports in this file:
from easy_karabiner import __version__
from easy_karabiner import util
from easy_karabiner import query
from easy_karabiner.generator import *
and context (functions, classes, or occasionally code) from other files:
# Path: easy_karabiner/util.py
# def read_python_file(pypath):
# def get_checksum(s):
# def escape_string(s):
# def encode_with_utf8(o):
# def is_hex(s):
# def is_list_or_tuple(obj):
# def split_ignore_quote(s):
# def remove_all_space(s):
# def is_xml_element_equal(node1, node2):
# def is_xml_tree_equal(tree1, tree2, ignore_tags=tuple()):
# def assert_xml_equal(xml_tree1, xml_tree2, ignore_tags=tuple()):
# def print_message(msg, color=None, err=False):
# def print_error(msg, print_stack=False):
# def print_warning(msg):
# def print_info(msg):
#
# Path: easy_karabiner/query.py
# @classmethod
# def query(cls, value):
# self = cls.get_instance()
# for type in self.orders:
# if self.is_in(type, value):
# return type
# return None
. Output only the next line. | <device_not> DeviceVendor::APPLE_COMPUTER, DeviceProduct::ANY </device_not> |
Based on the snippet: <|code_start|> <autogen> __KeyToKey__ KeyCode::OPTION_L, KeyCode::COMMAND_L </autogen>
</block>
<block>
<device_not> DeviceVendor::APPLE_COMPUTER, DeviceProduct::ANY </device_not>
<autogen> __FlipScrollWheel__ Option::FLIPSCROLLWHEEL_VERTICAL </autogen>
</block>
</item>
</item>
</root>
'''.format(version=__version__)
util.assert_xml_equal(g, s)
# test for reentrant of `BaseXML` methods
assert(str(g) == str(g))
query.DefinitionBucket.clear()
DEFINITIONS = {
'APP_FINDER': '/Applications/Finder.app',
'Open::Calculator': '/Applications/Calculator.app',
}
MAPS = [
['alt', 'cmd', ['fn']],
['ctrl alt F', 'APP_FINDER', ['!ModifierFlag::NONE']],
['cmd', 'alt', ['fn']],
['ctrl shift C', 'Open::Calculator', ['!none']],
]
g = Generator(maps=MAPS, definitions=DEFINITIONS)
s = '''
<root>
<Easy-Karabiner>{version}</Easy-Karabiner>
<|code_end|>
, predict the immediate next line with the help of imports:
from easy_karabiner import __version__
from easy_karabiner import util
from easy_karabiner import query
from easy_karabiner.generator import *
and context (classes, functions, sometimes code) from other files:
# Path: easy_karabiner/util.py
# def read_python_file(pypath):
# def get_checksum(s):
# def escape_string(s):
# def encode_with_utf8(o):
# def is_hex(s):
# def is_list_or_tuple(obj):
# def split_ignore_quote(s):
# def remove_all_space(s):
# def is_xml_element_equal(node1, node2):
# def is_xml_tree_equal(tree1, tree2, ignore_tags=tuple()):
# def assert_xml_equal(xml_tree1, xml_tree2, ignore_tags=tuple()):
# def print_message(msg, color=None, err=False):
# def print_error(msg, print_stack=False):
# def print_warning(msg):
# def print_info(msg):
#
# Path: easy_karabiner/query.py
# @classmethod
# def query(cls, value):
# self = cls.get_instance()
# for type in self.orders:
# if self.is_in(type, value):
# return type
# return None
. Output only the next line. | <item> |
Continue the code snippet: <|code_start|> return saxutils.unescape(s, {
""": '"',
"'": "'",
})
@classmethod
def parse(cls, filepath):
return etree.parse(filepath).getroot()
@classmethod
def parse_string(cls, xml_str):
return etree.fromstring(xml_str, cls.xml_parser)
@classmethod
def get_class_name(cls):
return cls.__name__
@classmethod
def is_cdata_text(cls, text):
return text.startswith('<![CDATA[') and text.endswith(']]>')
@classmethod
def remove_cdata_mark(cls, text):
return text[len('<![CDATA['):-len(']]>')]
@classmethod
def create_cdata_text(cls, text):
# do NOT use `etree.CDATA`
return '<![CDATA[%s]]>' % text
<|code_end|>
. Use current file imports:
import lxml.etree as etree
import xml.dom.minidom as minidom
import xml.sax.saxutils as saxutils
from . import exception
from .fucking_string import ensure_utf8
and context (classes, functions, or code) from other files:
# Path: easy_karabiner/fucking_string.py
# def ensure_utf8(s):
# # convert from any object to `unicode`
# if not isinstance(s, basestring):
# s = unicode(s)
#
# if isinstance(s, unicode):
# s = s.encode('utf-8')
# return unicode(s, encoding='utf-8')
. Output only the next line. | @classmethod |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import print_function
def read_python_file(pypath):
vars = {}
with open(pypath, 'rb') as fp:
<|code_end|>
, generate the next line using the imports in this file:
import shlex
import click
import traceback
from hashlib import sha1
from . import config
from .basexml import BaseXML
from .fucking_string import ensure_utf8, is_string_type
and context (functions, classes, or occasionally code) from other files:
# Path: easy_karabiner/basexml.py
# class BaseXML(object):
# xml_parser = etree.XMLParser(strip_cdata=False)
#
# @classmethod
# def unescape(cls, s):
# return saxutils.unescape(s, {
# """: '"',
# "'": "'",
# })
#
# @classmethod
# def parse(cls, filepath):
# return etree.parse(filepath).getroot()
#
# @classmethod
# def parse_string(cls, xml_str):
# return etree.fromstring(xml_str, cls.xml_parser)
#
# @classmethod
# def get_class_name(cls):
# return cls.__name__
#
# @classmethod
# def is_cdata_text(cls, text):
# return text.startswith('<![CDATA[') and text.endswith(']]>')
#
# @classmethod
# def remove_cdata_mark(cls, text):
# return text[len('<![CDATA['):-len(']]>')]
#
# @classmethod
# def create_cdata_text(cls, text):
# # do NOT use `etree.CDATA`
# return '<![CDATA[%s]]>' % text
#
# @classmethod
# def assign_text_attribute(cls, etree_element, text):
# if text is not None:
# etree_element.text = ensure_utf8(text)
# else:
# etree_element.text = text
#
# @classmethod
# def create_tag(cls, name, text=None, **kwargs):
# et = etree.Element(name, **kwargs)
# cls.assign_text_attribute(et, text)
# return et
#
# @classmethod
# def pretty_text(cls, elem, indent=" ", level=0):
# """WARNING: This method would change the construct of XML tree"""
# i = "\n" + level * indent
#
# if len(elem) == 0:
# if elem.text is not None:
# lines = elem.text.split('\n')
# if len(lines) > 1:
# if not lines[0].startswith(' '):
# lines[0] = (i + indent) + lines[0]
# if lines[-1].strip() == '':
# lines.pop()
# elem.text = (i + indent).join(lines) + i
# else:
# for subelem in elem:
# BaseXML.pretty_text(subelem, indent, level + 1)
#
# return elem
#
# @classmethod
# def to_format_str(cls, xml_tree, pretty_text=True):
# indent = " "
# if pretty_text:
# BaseXML.pretty_text(xml_tree, indent=indent)
# xml_string = etree.tostring(xml_tree)
# xml_string = minidom.parseString(xml_string).toprettyxml(indent=indent)
# xml_string = cls.unescape(xml_string)
# return xml_string
#
# def to_xml(self):
# """NOTICE: This method must be a REENTRANT function, which means
# it should NOT change status or modify any member of `self` object.
# Because other methods may change the construct of the XML tree.
# """
# raise exception.NeedOverrideError()
#
# def to_str(self, pretty_text=True, remove_first_line=False):
# xml_str = self.to_format_str(self.to_xml(), pretty_text=pretty_text)
#
# if remove_first_line:
# lines = xml_str.split('\n')
# if len(lines[-1].strip()) == 0:
# # remove last blank line
# lines = lines[1:-1]
# else:
# lines = lines[1:]
# xml_str = '\n'.join(lines)
#
# return xml_str
#
# def __str__(self):
# # `remove_first_line=True` is used to remove version tag in the first line
# return self.to_str(remove_first_line=True)
#
# Path: easy_karabiner/fucking_string.py
# def ensure_utf8(s):
# # convert from any object to `unicode`
# if not isinstance(s, basestring):
# s = unicode(s)
#
# if isinstance(s, unicode):
# s = s.encode('utf-8')
# return unicode(s, encoding='utf-8')
#
# def is_string_type(s):
# return isinstance(s, (basestring, unicode, str))
. Output only the next line. | exec(compile(fp.read(), pypath, 'exec'), {}, vars) |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import print_function
def read_python_file(pypath):
vars = {}
with open(pypath, 'rb') as fp:
exec(compile(fp.read(), pypath, 'exec'), {}, vars)
return vars
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import shlex
import click
import traceback
from hashlib import sha1
from . import config
from .basexml import BaseXML
from .fucking_string import ensure_utf8, is_string_type
and context:
# Path: easy_karabiner/basexml.py
# class BaseXML(object):
# xml_parser = etree.XMLParser(strip_cdata=False)
#
# @classmethod
# def unescape(cls, s):
# return saxutils.unescape(s, {
# """: '"',
# "'": "'",
# })
#
# @classmethod
# def parse(cls, filepath):
# return etree.parse(filepath).getroot()
#
# @classmethod
# def parse_string(cls, xml_str):
# return etree.fromstring(xml_str, cls.xml_parser)
#
# @classmethod
# def get_class_name(cls):
# return cls.__name__
#
# @classmethod
# def is_cdata_text(cls, text):
# return text.startswith('<![CDATA[') and text.endswith(']]>')
#
# @classmethod
# def remove_cdata_mark(cls, text):
# return text[len('<![CDATA['):-len(']]>')]
#
# @classmethod
# def create_cdata_text(cls, text):
# # do NOT use `etree.CDATA`
# return '<![CDATA[%s]]>' % text
#
# @classmethod
# def assign_text_attribute(cls, etree_element, text):
# if text is not None:
# etree_element.text = ensure_utf8(text)
# else:
# etree_element.text = text
#
# @classmethod
# def create_tag(cls, name, text=None, **kwargs):
# et = etree.Element(name, **kwargs)
# cls.assign_text_attribute(et, text)
# return et
#
# @classmethod
# def pretty_text(cls, elem, indent=" ", level=0):
# """WARNING: This method would change the construct of XML tree"""
# i = "\n" + level * indent
#
# if len(elem) == 0:
# if elem.text is not None:
# lines = elem.text.split('\n')
# if len(lines) > 1:
# if not lines[0].startswith(' '):
# lines[0] = (i + indent) + lines[0]
# if lines[-1].strip() == '':
# lines.pop()
# elem.text = (i + indent).join(lines) + i
# else:
# for subelem in elem:
# BaseXML.pretty_text(subelem, indent, level + 1)
#
# return elem
#
# @classmethod
# def to_format_str(cls, xml_tree, pretty_text=True):
# indent = " "
# if pretty_text:
# BaseXML.pretty_text(xml_tree, indent=indent)
# xml_string = etree.tostring(xml_tree)
# xml_string = minidom.parseString(xml_string).toprettyxml(indent=indent)
# xml_string = cls.unescape(xml_string)
# return xml_string
#
# def to_xml(self):
# """NOTICE: This method must be a REENTRANT function, which means
# it should NOT change status or modify any member of `self` object.
# Because other methods may change the construct of the XML tree.
# """
# raise exception.NeedOverrideError()
#
# def to_str(self, pretty_text=True, remove_first_line=False):
# xml_str = self.to_format_str(self.to_xml(), pretty_text=pretty_text)
#
# if remove_first_line:
# lines = xml_str.split('\n')
# if len(lines[-1].strip()) == 0:
# # remove last blank line
# lines = lines[1:-1]
# else:
# lines = lines[1:]
# xml_str = '\n'.join(lines)
#
# return xml_str
#
# def __str__(self):
# # `remove_first_line=True` is used to remove version tag in the first line
# return self.to_str(remove_first_line=True)
#
# Path: easy_karabiner/fucking_string.py
# def ensure_utf8(s):
# # convert from any object to `unicode`
# if not isinstance(s, basestring):
# s = unicode(s)
#
# if isinstance(s, unicode):
# s = s.encode('utf-8')
# return unicode(s, encoding='utf-8')
#
# def is_string_type(s):
# return isinstance(s, (basestring, unicode, str))
which might include code, classes, or functions. Output only the next line. | def get_checksum(s): |
Continue the code snippet: <|code_start|>
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_categories(cls):
cls.category1 = Category.objects.create(category_name='category1')
cls.category2 = Category.objects.create(category_name='category2')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_authors(cls):
cls.author1 = Author.objects.create(author_name='Best Author 1')
cls.author2 = Author.objects.create(author_name='trueAuthorNew')
cls.author3 = Author.objects.create(author_name='zlast author')
cls.author4 = Author.objects.create(author_name='<AuthorSpecialSymbols>&"')
cls.author5 = Author.objects.create(author_name="O'Connor")
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_languages(cls):
cls.language_en = Language.objects.create(language='English')
cls.language_ru = Language.objects.create(language='Russian')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_books(cls):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
test_book_image_path = os.path.join(TEST_DATA_DIR, 'test_book_image.png')
books_setup = [
{
<|code_end|>
. Use current file imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context (classes, functions, or code) from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | 'name': 'First Book', |
Given the code snippet: <|code_start|> AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Third Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Second Book'))
AddedBook.objects.create(id_user=cls.the_user5, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user6, id_book=Book.objects.get(book_name='Sixth Book'))
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_book_rating(cls):
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user1, rating=10)
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user2, rating=5)
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user5, rating=3)
BookRating.objects.create(id_book=Book.objects.get(book_name='Fourth Book'), id_user=cls.the_user1, rating=7)
BookRating.objects.create(id_book=Book.objects.get(book_name='Sixth Book'), id_user=cls.the_user1, rating=4)
BookRating.objects.create(id_book=Book.objects.get(book_name='Second Book'), id_user=cls.the_user2, rating=7)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_book_comment(cls):
second_book = Book.objects.get(book_name='Second Book')
third_book = Book.objects.get(book_name='Third Book')
fourth_book = Book.objects.get(book_name='Fourth Book')
BookComment.objects.create(id_book=second_book, id_user=cls.the_user1, text='Test book 2 user 1')
BookComment.objects.create(id_book=second_book, id_user=cls.the_user2, text='Test book 2 user 2')
BookComment.objects.create(id_book=third_book, id_user=cls.the_user1, text='Test book 3 user 1')
BookComment.objects.create(id_book=fourth_book, id_user=cls.the_user1, text='Test book 4 user 1')
BookComment.objects.create(id_book=fourth_book, id_user=cls.the_user5, text='Test book 4 user 5')
# ------------------------------------------------------------------------------------------------------------------
<|code_end|>
, generate the next line using the imports in this file:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context (functions, classes, or occasionally code) from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | @classmethod |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(TEST_DIR, 'fixtures')
# ----------------------------------------------------------------------------------------------------------------------
class ModelTest(TestCase):
# ------------------------------------------------------------------------------------------------------------------
<|code_end|>
, generate the next line using the imports in this file:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context (functions, classes, or occasionally code) from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | @classmethod |
Predict the next line after this snippet: <|code_start|> 'category': cls.category1,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2,
'blocked_book': True
},
{
'name': 'Fifth Book',
'author': cls.author1,
'category': cls.category2,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
'name': 'Sixth Book',
'author': cls.author2,
'category': cls.category2,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2
},
{
'name': 'Seventh Book<>&"',
'author': cls.author4,
'category': cls.category2,
'language': cls.language_en,
<|code_end|>
using the current file's imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and any relevant context from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | 'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()), |
Based on the snippet: <|code_start|> test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
test_book_image_path = os.path.join(TEST_DATA_DIR, 'test_book_image.png')
books_setup = [
{
'name': 'First Book',
'author': cls.author1,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
'name': 'Second Book',
'author': cls.author2,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'who_added': cls.the_user2,
'blocked_book': True
},
{
'name': 'Third Book',
'author': cls.author2,
'category': cls.category1,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
<|code_end|>
, predict the immediate next line with the help of imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context (classes, functions, sometimes code) from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | 'who_added': cls.the_user1, |
Predict the next line for this snippet: <|code_start|> ]
for book in books_setup:
Book.objects.create(
book_name=book['name'],
id_author=book['author'],
id_category=book['category'],
description='TEST description',
language=book['language'],
book_file=book['file'],
photo=book.get('photo', False),
who_added=book['who_added'],
private_book=book.get('private', False),
blocked_book=book.get('blocked_book', False)
)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_added_books(cls):
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Third Book'))
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Fourth Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Third Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Second Book'))
AddedBook.objects.create(id_user=cls.the_user5, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user6, id_book=Book.objects.get(book_name='Sixth Book'))
# ------------------------------------------------------------------------------------------------------------------
@classmethod
<|code_end|>
with the help of current file imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
, which may contain function names, class names, or code. Output only the next line. | def setup_book_rating(cls): |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(TEST_DIR, 'fixtures')
# ----------------------------------------------------------------------------------------------------------------------
class ModelTest(TestCase):
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setUpTestData(cls):
cls.setup_users()
cls.setup_categories()
<|code_end|>
, predict the immediate next line with the help of imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context (classes, functions, sometimes code) from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | cls.setup_authors() |
Predict the next line after this snippet: <|code_start|> cls.anonymous_user = auth.get_user(client)
cls.user1 = User.objects.create_user('user1', 'user1@user1.com', 'testpassword1')
cls.user2 = User.objects.create_user('user2', 'user2@user2.com', 'testpassword2')
cls.user3 = User.objects.create_user('user3', 'user3@user3.com', 'testpassword3')
cls.user4 = User.objects.create_user('user4', 'user4@user4.com', 'testpassword4')
cls.user5 = User.objects.create_user('user5', 'user5@user5.com', 'testpassword5')
cls.user6 = User.objects.create_user('user6', 'user6@user6.com', 'testpassword6')
cls.the_user1 = TheUser.objects.get(id_user=cls.user1)
cls.the_user2 = TheUser.objects.get(id_user=cls.user2)
cls.the_user5 = TheUser.objects.get(id_user=cls.user5)
cls.the_user6 = TheUser.objects.get(id_user=cls.user6)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_categories(cls):
cls.category1 = Category.objects.create(category_name='category1')
cls.category2 = Category.objects.create(category_name='category2')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_authors(cls):
cls.author1 = Author.objects.create(author_name='Best Author 1')
cls.author2 = Author.objects.create(author_name='trueAuthorNew')
cls.author3 = Author.objects.create(author_name='zlast author')
cls.author4 = Author.objects.create(author_name='<AuthorSpecialSymbols>&"')
cls.author5 = Author.objects.create(author_name="O'Connor")
# ------------------------------------------------------------------------------------------------------------------
<|code_end|>
using the current file's imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and any relevant context from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | @classmethod |
Predict the next line for this snippet: <|code_start|> cls.language_en = Language.objects.create(language='English')
cls.language_ru = Language.objects.create(language='Russian')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_books(cls):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
test_book_image_path = os.path.join(TEST_DATA_DIR, 'test_book_image.png')
books_setup = [
{
'name': 'First Book',
'author': cls.author1,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
'name': 'Second Book',
'author': cls.author2,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'who_added': cls.the_user2,
'blocked_book': True
},
{
<|code_end|>
with the help of current file imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
, which may contain function names, class names, or code. Output only the next line. | 'name': 'Third Book', |
Predict the next line for this snippet: <|code_start|> AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Second Book'))
AddedBook.objects.create(id_user=cls.the_user5, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user6, id_book=Book.objects.get(book_name='Sixth Book'))
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_book_rating(cls):
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user1, rating=10)
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user2, rating=5)
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user5, rating=3)
BookRating.objects.create(id_book=Book.objects.get(book_name='Fourth Book'), id_user=cls.the_user1, rating=7)
BookRating.objects.create(id_book=Book.objects.get(book_name='Sixth Book'), id_user=cls.the_user1, rating=4)
BookRating.objects.create(id_book=Book.objects.get(book_name='Second Book'), id_user=cls.the_user2, rating=7)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_book_comment(cls):
second_book = Book.objects.get(book_name='Second Book')
third_book = Book.objects.get(book_name='Third Book')
fourth_book = Book.objects.get(book_name='Fourth Book')
BookComment.objects.create(id_book=second_book, id_user=cls.the_user1, text='Test book 2 user 1')
BookComment.objects.create(id_book=second_book, id_user=cls.the_user2, text='Test book 2 user 2')
BookComment.objects.create(id_book=third_book, id_user=cls.the_user1, text='Test book 3 user 1')
BookComment.objects.create(id_book=fourth_book, id_user=cls.the_user1, text='Test book 4 user 1')
BookComment.objects.create(id_book=fourth_book, id_user=cls.the_user5, text='Test book 4 user 5')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
@mock.patch('app.signals.email_dispatch.apply_async', new=mock.Mock())
<|code_end|>
with the help of current file imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
, which may contain function names, class names, or code. Output only the next line. | def setup_post_messages(cls): |
Continue the code snippet: <|code_start|>
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_categories(cls):
cls.category1 = Category.objects.create(category_name='category1')
cls.category2 = Category.objects.create(category_name='category2')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_authors(cls):
cls.author1 = Author.objects.create(author_name='Best Author 1')
cls.author2 = Author.objects.create(author_name='trueAuthorNew')
cls.author3 = Author.objects.create(author_name='zlast author')
cls.author4 = Author.objects.create(author_name='<AuthorSpecialSymbols>&"')
cls.author5 = Author.objects.create(author_name="O'Connor")
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_languages(cls):
cls.language_en = Language.objects.create(language='English')
cls.language_ru = Language.objects.create(language='Russian')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_books(cls):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
test_book_image_path = os.path.join(TEST_DATA_DIR, 'test_book_image.png')
books_setup = [
{
<|code_end|>
. Use current file imports:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context (classes, functions, or code) from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | 'name': 'First Book', |
Given the code snippet: <|code_start|> 'name': 'Fourth Book',
'author': cls.author1,
'category': cls.category1,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2,
'blocked_book': True
},
{
'name': 'Fifth Book',
'author': cls.author1,
'category': cls.category2,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
'name': 'Sixth Book',
'author': cls.author2,
'category': cls.category2,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2
},
{
'name': 'Seventh Book<>&"',
'author': cls.author4,
<|code_end|>
, generate the next line using the imports in this file:
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
and context (functions, classes, or occasionally code) from other files:
# Path: app/forms.py
# class AddBookForm(forms.Form):
# bookname = forms.CharField(max_length=150)
# author = forms.CharField(max_length=100)
# category = forms.CharField(max_length=30)
# language = forms.CharField(max_length=30)
# about = forms.CharField(widget=forms.Textarea)
# bookfile = forms.FileField(validators=[validate_pdf])
# private = forms.BooleanField(required=False)
#
# Path: app/models.py
# class TheUser(models.Model):
# class Category(models.Model):
# class Author(models.Model):
# class Language(models.Model):
# class Book(models.Model):
# class BookRating(models.Model):
# class BookComment(models.Model):
# class AddedBook(models.Model):
# class Post(models.Model):
# class SupportMessage(models.Model):
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
# def __str__(self):
# def get_api_reminders(self):
# def get_web_reminders(self):
# def update_reminder(self, field, value):
# def __str__(self):
# def __str__(self):
# def get_authors_list(author_part, do_escape=False):
# def __str__(self):
# def __str__(self):
# def get_related_objects_for_create(user_id, book_form):
# def get_related_objects_create_api(user, data):
# def get_related_objects_selected_book(user, book_id, user_key=''):
# def sort_by_book_name(user, category):
# def sort_by_author(user, category):
# def sort_by_estimation(user, category):
# def sort_by_readable(user, category=None, count=9):
# def generate_books(filtered_books):
# def fetch_books(search_data):
# def generate_existing_books(book_part):
# def exclude_private_books(user, books):
# def get_user_added_books(user):
# def get_count_added(book_id):
#
# Path: app/tests/utils.py
# class Utils:
# """
# Class with util functions which helps to generate some data.
# """
#
# # ------------------------------------------------------------------------------------------------------------------
# @staticmethod
# def generate_sort_dict(book):
# return {
# 'id': book.id,
# 'name': escape(book.book_name),
# 'author': escape(book.id_author.author_name),
# 'url': book.photo.url if book.photo else '',
# 'upload_date': book.upload_date.strftime('%d-%m-%Y')
# }
. Output only the next line. | 'category': cls.category2, |
Next line prediction: <|code_start|> cls.the_user = TheUser.objects.get(id_user=cls.user)
cls.client = APIClient()
cls.api_key = settings.API_SECRET_KEY
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_missing_params(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key, 'username': 'username'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'password': ['This field is required.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_long_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'a' * 40,
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['detail'], {'username': ['Ensure this field has no more than 30 characters.']})
# ------------------------------------------------------------------------------------------------------------------
def test_user_login_too_short_username(self):
response = self.client.post(reverse('user_login_api'), {'app_key': self.api_key,
'username': 'a',
'password': 'somepassword'})
self.assertEqual(response.resolver_match.func, user_login)
<|code_end|>
. Use current file imports:
(from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from ...views.index_views import user_login
from app.models import TheUser
)
and context including class names, function names, or small code snippets from other files:
# Path: api/views/index_views.py
# @api_view(['POST'])
# def user_login(request):
# """
# Validates request data and logs user.
# """
# validate_api_secret_key(request.data.get('app_key'))
#
# username_request_serializer = UserLoginUsernameRequest(data=request.data)
# email_request_serializer = UserLoginEmailRequest(data=request.data)
#
# if email_request_serializer.is_valid():
# user_obj = User.objects.filter(email=request.data.get('username'))
# username = user_obj[0] if len(user_obj) else None
#
# return login_response(request, username)
#
# elif username_request_serializer.is_valid():
# return login_response(request, request.data.get('username'))
#
# return invalid_data_response(username_request_serializer)
#
# Path: app/models.py
# class TheUser(models.Model):
# """
# Class for user objects in database.
# """
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
#
# id_user = models.OneToOneField(User)
# user_photo = models.ImageField(blank=True, upload_to='user', storage=OverwriteStorage())
# auth_token = models.CharField(max_length=50, null=True, blank=True)
# subscription = models.BooleanField(default=True)
# reminder = models.CharField(max_length=256, null=False, default=REMINDER_TEMPLATE)
#
# # ------------------------------------------------------------------------------------------------------------------
# def __str__(self):
# return str(self.id_user)
#
# # ------------------------------------------------------------------------------------------------------------------
# def get_api_reminders(self):
# """
# Returns the reminders only necessary for API endpoints.
# """
# data = json.loads(self.reminder)
#
# mobile_data = dict(data['common'])
# mobile_data.update(data['api'])
#
# return mobile_data
#
# # ------------------------------------------------------------------------------------------------------------------
# def get_web_reminders(self):
# """
# Returns the reminders only necessary for web part.
# """
# data = json.loads(self.reminder)
#
# web_data = dict(data['common'])
# web_data.update(data['web'])
#
# return web_data
#
# # ------------------------------------------------------------------------------------------------------------------
# def update_reminder(self, field, value):
# """
# Updates the reminder status.
# """
# data = json.loads(self.reminder)
#
# for key in data:
# if field in data[key]:
# data[key][field] = value
#
# self.reminder = json.dumps(data)
# self.save()
. Output only the next line. | self.assertEqual(response.status_code, 400)
|
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------------------------------------------------
class IndexViewsTestCase(TestCase):
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(username='api_login', email='api_login@email.com', password='123456')
cls.the_user = TheUser.objects.get(id_user=cls.user)
<|code_end|>
. Use current file imports:
from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from ...views.index_views import user_login
from app.models import TheUser
and context (classes, functions, or code) from other files:
# Path: api/views/index_views.py
# @api_view(['POST'])
# def user_login(request):
# """
# Validates request data and logs user.
# """
# validate_api_secret_key(request.data.get('app_key'))
#
# username_request_serializer = UserLoginUsernameRequest(data=request.data)
# email_request_serializer = UserLoginEmailRequest(data=request.data)
#
# if email_request_serializer.is_valid():
# user_obj = User.objects.filter(email=request.data.get('username'))
# username = user_obj[0] if len(user_obj) else None
#
# return login_response(request, username)
#
# elif username_request_serializer.is_valid():
# return login_response(request, request.data.get('username'))
#
# return invalid_data_response(username_request_serializer)
#
# Path: app/models.py
# class TheUser(models.Model):
# """
# Class for user objects in database.
# """
# REMINDER_TEMPLATE = json.dumps({
# "common": {
# "fb_page": True,
# "fb_group": True,
# "twitter": True,
# "vk": True,
# "disabled_all": False
# },
# "api": {
# "app_rate": True
# },
# "web": {
# "app_download": True,
# },
# })
#
# id_user = models.OneToOneField(User)
# user_photo = models.ImageField(blank=True, upload_to='user', storage=OverwriteStorage())
# auth_token = models.CharField(max_length=50, null=True, blank=True)
# subscription = models.BooleanField(default=True)
# reminder = models.CharField(max_length=256, null=False, default=REMINDER_TEMPLATE)
#
# # ------------------------------------------------------------------------------------------------------------------
# def __str__(self):
# return str(self.id_user)
#
# # ------------------------------------------------------------------------------------------------------------------
# def get_api_reminders(self):
# """
# Returns the reminders only necessary for API endpoints.
# """
# data = json.loads(self.reminder)
#
# mobile_data = dict(data['common'])
# mobile_data.update(data['api'])
#
# return mobile_data
#
# # ------------------------------------------------------------------------------------------------------------------
# def get_web_reminders(self):
# """
# Returns the reminders only necessary for web part.
# """
# data = json.loads(self.reminder)
#
# web_data = dict(data['common'])
# web_data.update(data['web'])
#
# return web_data
#
# # ------------------------------------------------------------------------------------------------------------------
# def update_reminder(self, field, value):
# """
# Updates the reminder status.
# """
# data = json.loads(self.reminder)
#
# for key in data:
# if field in data[key]:
# data[key][field] = value
#
# self.reminder = json.dumps(data)
# self.save()
. Output only the next line. | cls.client = APIClient()
|
Predict the next line after this snippet: <|code_start|> url(r'comment-add', selected_book_views.add_comment, name='add_comment_app'),
url(r'load-comments', selected_book_views.load_comments, name='load_comments_app'),
url(r'report-book', selected_book_views.report_book, name='report-book'),
# Library urls.
url(r'library', library_views.all_categories, name='categories'),
url(r'^category/(?P<category_id>\d+)/$', library_views.selected_category, name='category'),
url(r'^category/(?P<category_id>\d+)/load-books/$', library_views.load_books, name='load_books'),
url(r'sort', library_views.sort, name='book_sort'),
url(r'search-book', library_views.find_books, name='search_book_app'),
url(r'^author/(?P<author_id>\d+)/$', library_views.selected_author, name='author'),
# Profile urls.
url(r'profile/(?P<profile_id>\d+)/$', profile_views.profile, name='profile'),
url(r'profile/(?P<profile_id>\d+)/load-books/$', profile_views.load_uploaded_books, name='load_uploaded_books_app'),
url(r'upload-avatar', profile_views.upload_avatar, name='upload_avatar'),
url(r'change-password', profile_views.change_password, name='change_password'),
# About project urls.
url(r'about', about_views.about, name='about'),
url(r'send-message', about_views.send_message, name='send_message'),
# Additional urls.
url(r'logout', additional_views.user_logout, name='logout'),
url(r'unsubscribe/(?P<token>[0-9a-zA-Z_-]+)/', additional_views.unsubscribe, name='unsubscribe'),
url(r'(?P<file>[%&+ \w]+.txt)', additional_views.share_txt, name='share_txt'),
url(r'(?P<file>[%&+ \w]+.xml)', additional_views.share_xml, name='share_xml'),
url(r'^update-reminder', reminder_views.update_reminder, name='update_reminder'),
url(r'^payment-success', additional_views.payment_success, name='payment_success_app')
<|code_end|>
using the current file's imports:
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from app.views import (index_views, additional_views, add_book_views, selected_book_views, library_views,
read_book_views, profile_views, about_views, reminder_views)
and any relevant context from other files:
# Path: app/views/index_views.py
# RANDOM_BOOKS_COUNT = 6
# def index(request):
# def home(request):
# def login_response(request, username, password):
# def user_login(request):
# def is_user_exists(request, form):
# def is_mail_exists(request, form):
# def sign_in(request):
# def restore_data(request, form):
#
# Path: app/views/additional_views.py
# def user_logout(request):
# def share_txt(request, file):
# def share_xml(request, file):
# def unsubscribe(request, token):
# def payment_success(request):
#
# Path: app/views/add_book_views.py
# READ_PRIVILEGES = 0o644
# def add_book(request):
# def generate_authors(request, form):
# def generate_books(request, form):
# def add_book_successful(request, form):
#
# Path: app/views/selected_book_views.py
# BOOK_COVER_HEIGHT = 350
# COMMENTS_PER_PAGE = 20
# COMMENTS_START_PAGE = 1
# RANDOM_BOOKS_COUNT = 6
# def selected_book(request, book_id):
# def store_image(request, form):
# def add_book_to_home(request, form):
# def remove_book_from_home(request, form):
# def change_rating(request, form):
# def set_rating(request, rating_form):
# def add_comment(request, form):
# def load_comments(request, form):
# def report_book(request, form):
#
# Path: app/views/library_views.py
# MOST_READ_BOOKS_COUNT = 9
# def all_categories(request):
# def selected_category(request, category_id):
# def selected_author(request, author_id):
# def sort(request, form):
# def find_books(request, form):
# def load_books(request, category_id, form):
#
# Path: app/views/read_book_views.py
# def open_book(request, book_id):
# def set_current_page(request, form):
#
# Path: app/views/profile_views.py
# AVATAR_WIDTH = 250
# def profile(request, profile_id):
# def load_uploaded_books(request, profile_id, form):
# def upload_avatar(request):
# def change_password(request, form):
#
# Path: app/views/about_views.py
# def about(request):
# def send_message(request, form):
#
# Path: app/views/reminder_views.py
# def update_reminder(request):
. Output only the next line. | ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
handler404 = 'app.views.error_views.not_found_404'
handler400 = 'app.views.error_views.bad_request_400'
handler403 = 'app.views.error_views.permission_denied_403'
<|code_end|>
, determine the next line of code. You have imports:
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from app.views import (index_views, additional_views, add_book_views, selected_book_views, library_views,
read_book_views, profile_views, about_views, reminder_views)
and context (class names, function names, or code) available:
# Path: app/views/index_views.py
# RANDOM_BOOKS_COUNT = 6
# def index(request):
# def home(request):
# def login_response(request, username, password):
# def user_login(request):
# def is_user_exists(request, form):
# def is_mail_exists(request, form):
# def sign_in(request):
# def restore_data(request, form):
#
# Path: app/views/additional_views.py
# def user_logout(request):
# def share_txt(request, file):
# def share_xml(request, file):
# def unsubscribe(request, token):
# def payment_success(request):
#
# Path: app/views/add_book_views.py
# READ_PRIVILEGES = 0o644
# def add_book(request):
# def generate_authors(request, form):
# def generate_books(request, form):
# def add_book_successful(request, form):
#
# Path: app/views/selected_book_views.py
# BOOK_COVER_HEIGHT = 350
# COMMENTS_PER_PAGE = 20
# COMMENTS_START_PAGE = 1
# RANDOM_BOOKS_COUNT = 6
# def selected_book(request, book_id):
# def store_image(request, form):
# def add_book_to_home(request, form):
# def remove_book_from_home(request, form):
# def change_rating(request, form):
# def set_rating(request, rating_form):
# def add_comment(request, form):
# def load_comments(request, form):
# def report_book(request, form):
#
# Path: app/views/library_views.py
# MOST_READ_BOOKS_COUNT = 9
# def all_categories(request):
# def selected_category(request, category_id):
# def selected_author(request, author_id):
# def sort(request, form):
# def find_books(request, form):
# def load_books(request, category_id, form):
#
# Path: app/views/read_book_views.py
# def open_book(request, book_id):
# def set_current_page(request, form):
#
# Path: app/views/profile_views.py
# AVATAR_WIDTH = 250
# def profile(request, profile_id):
# def load_uploaded_books(request, profile_id, form):
# def upload_avatar(request):
# def change_password(request, form):
#
# Path: app/views/about_views.py
# def about(request):
# def send_message(request, form):
#
# Path: app/views/reminder_views.py
# def update_reminder(request):
. Output only the next line. | handler500 = 'app.views.error_views.internal_error_500' |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
handler404 = 'app.views.error_views.not_found_404'
handler400 = 'app.views.error_views.bad_request_400'
handler403 = 'app.views.error_views.permission_denied_403'
handler500 = 'app.views.error_views.internal_error_500'
<|code_end|>
, generate the next line using the imports in this file:
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from app.views import (index_views, additional_views, add_book_views, selected_book_views, library_views,
read_book_views, profile_views, about_views, reminder_views)
and context (functions, classes, or occasionally code) from other files:
# Path: app/views/index_views.py
# RANDOM_BOOKS_COUNT = 6
# def index(request):
# def home(request):
# def login_response(request, username, password):
# def user_login(request):
# def is_user_exists(request, form):
# def is_mail_exists(request, form):
# def sign_in(request):
# def restore_data(request, form):
#
# Path: app/views/additional_views.py
# def user_logout(request):
# def share_txt(request, file):
# def share_xml(request, file):
# def unsubscribe(request, token):
# def payment_success(request):
#
# Path: app/views/add_book_views.py
# READ_PRIVILEGES = 0o644
# def add_book(request):
# def generate_authors(request, form):
# def generate_books(request, form):
# def add_book_successful(request, form):
#
# Path: app/views/selected_book_views.py
# BOOK_COVER_HEIGHT = 350
# COMMENTS_PER_PAGE = 20
# COMMENTS_START_PAGE = 1
# RANDOM_BOOKS_COUNT = 6
# def selected_book(request, book_id):
# def store_image(request, form):
# def add_book_to_home(request, form):
# def remove_book_from_home(request, form):
# def change_rating(request, form):
# def set_rating(request, rating_form):
# def add_comment(request, form):
# def load_comments(request, form):
# def report_book(request, form):
#
# Path: app/views/library_views.py
# MOST_READ_BOOKS_COUNT = 9
# def all_categories(request):
# def selected_category(request, category_id):
# def selected_author(request, author_id):
# def sort(request, form):
# def find_books(request, form):
# def load_books(request, category_id, form):
#
# Path: app/views/read_book_views.py
# def open_book(request, book_id):
# def set_current_page(request, form):
#
# Path: app/views/profile_views.py
# AVATAR_WIDTH = 250
# def profile(request, profile_id):
# def load_uploaded_books(request, profile_id, form):
# def upload_avatar(request):
# def change_password(request, form):
#
# Path: app/views/about_views.py
# def about(request):
# def send_message(request, form):
#
# Path: app/views/reminder_views.py
# def update_reminder(request):
. Output only the next line. | urlpatterns = [ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.