Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|>"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
admin.site.site_header = '视频点播管理系统'
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^admin_resumable/', include('admin_resumable.urls')),
url(r'^api/auth/token/', obtain_jwt_token),
url(r'^tv/api/', include("epg.api.urls", namespace='tv-api')),
url(r'^vod/api/', include("vodmanagement.api.urls", namespace='vod-api')),
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_jwt.views import obtain_jwt_token
from mysite.upload import upload_image
and context:
# Path: mysite/upload.py
# @csrf_exempt
# def upload_image(request, dir_name):
# """
# kindeditor图片上传返回数据格式说明:
# "error": 1, "message": "出错信息"}
# "error": 0, "url": "图片地址"}
# """
# result = {"error": 1, "message": "上传出错"}
# files = request.FILES.get("imgFile", None)
# if files:
# result = image_upload(files, dir_name)
# return HttpResponse(json.dumps(result), content_type="application/json")
which might include code, classes, or functions. Output only the next line. | url(r'^uploads/(?P<dir_name>[^/]+)$', upload_image, name='upload_image') |
Here is a snippet: <|code_start|>
class VodPageNumberPagination(PageNumberPagination):
page_size = 12
def get_paginated_response(self, data):
year = self.request.query_params.get('year')
return Response(OrderedDict([
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('cur_page', self.page.number),
('num_pages', self.paginator.num_pages),
('page_range', self.paginator.pager_num_range()),
('year', year),
('results', data)
]))
def paginate_queryset(self, queryset, request, view=None):
"""
Paginate a queryset if required, either returning a
page object, or `None` if pagination is not configured for this view.
"""
page_size = self.get_page_size(request)
if not page_size:
return None
cur_page = request.query_params.get(self.page_query_param)
if not cur_page:
cur_page = 1
# paginator = self.django_paginator_class(queryset, page_size)
<|code_end|>
. Write the next line using the current file imports:
from rest_framework.pagination import *
from vodmanagement.pagination import CustomPaginator
and context from other files:
# Path: vodmanagement/pagination.py
# class CustomPaginator(Paginator):
# def __init__(self, current_page=0, per_pager_num=5, *args, **kwargs):
# # 当前页
# self.current_page = int(current_page)
# # 最多显示的页码数量
# self.per_pager_num = int(per_pager_num)
# super(CustomPaginator, self).__init__(*args, **kwargs)
#
# def pager_num_range(self):
# if self.num_pages < self.per_pager_num:
# return list(range(1, self.num_pages + 1))
# # 如果页数特别多
# part = int(self.per_pager_num / 2)
# if self.current_page <= part:
# return list(range(1, self.per_pager_num + 1))
# if (self.current_page + part) > self.num_pages:
# return list(range(self.num_pages - self.per_pager_num + 1, self.num_pages + 1))
# return list(range(self.current_page - part, self.current_page + part + 1))
, which may include functions, classes, or code. Output only the next line. | self.paginator = CustomPaginator(cur_page, 5, queryset, page_size) |
Predict the next line for this snippet: <|code_start|> """
return int(self.kwargs.get('resumableTotalSize')) == self.size
def process_chunk(self, file):
if self.storage.exists(self.current_chunk_name):
self.storage.delete(self.current_chunk_name)
self.storage.save(self.current_chunk_name, file)
@property
def size(self):
"""Gets chunks size.
"""
size = 0
for chunk in self.chunk_names:
size += self.storage.size(chunk)
return size
def resotre_file(self):
zip_file = Path(self.storage.location)/Path(self.base_filename)
logging.debug('开始解压文件',zip_file)
file_zip = zipfile.ZipFile(zip_file, 'r')
for file in file_zip.namelist():
file_zip.extract(file, self.storage.location)
file_zip.close()
logging.debug('解压文件完成',self.storage.location)
os.remove(file_zip.filename)
def save_model(self, model, save_path, request):
category_id = request.POST['category']
if category_id is '':
<|code_end|>
with the help of current file imports:
import logging
import os
import re
import zipfile
from pathlib import Path
from django.core.files.base import File
from django.conf import settings
from vodmanagement import models
from datetime import datetime
and context from other files:
# Path: vodmanagement/models.py
# class UserPermission(models.Model):
# class VodManager(models.Manager):
# class FileDirectory(models.Model):
# class Meta:
# class VideoRegion(models.Model):
# class Meta:
# class VideoCategory(models.Model):
# class Meta:
# class MultipleUpload(models.Model):
# class Meta:
# class Restore(models.Model):
# class Meta:
# class Vod(models.Model):
# class Meta:
# def __str__(self):
# def has_permision(self):
# def active(self, *args, **kwargs):
# def upload_location(instance, filename):
# def upload_image_location(instance, filename):
# def upload_record_image_location(instance, filename):
# def default_description(instance):
# def default_filedir():
# def __str__(self):
# def save(self, *args, **kwargs):
# def __str__(self):
# def __str__(self):
# def save(self, *args, **kwargs):
# def colored_level(self):
# def save(self, force_insert=False, force_update=False, using=None,
# update_fields=None):
# def save(self, without_valid=False, *args, **kwargs):
# def __unicode__(self):
# def __str__(self):
# def image_tag(self):
# def get_absolute_url(self):
# def add_view_count(self):
# def colored_active(self):
# def video_format(self):
# def pre_save_post_receiver(sender, instance, *args, **kwargs):
# def post_init_receiver(sender, instance, *args, **kwargs):
# TYPES = (
# ('common', 'Common'),
# ('special', 'Special purpose'),
# )
# VIDEO_QUALITY = [
# ('SD', '标清'),
# ('HD', '高清'),
# ('FHD', '超清'),
# ]
# SAVE_PATH = (
# ('', settings.LOCAL_MEDIA_ROOT),
# )
, which may contain function names, class names, or code. Output only the next line. | category_id = models.VideoCategory.objects.first().id |
Next line prediction: <|code_start|>
class TestWeb(object):
@classmethod
def setup_class(cls):
<|code_end|>
. Use current file imports:
(from nose.tools import assert_equal
from base import *
from bibserver import web, ingest
import urllib
import os)
and context including class names, function names, or small code snippets from other files:
# Path: bibserver/web.py
# def load_account_for_login_manager(userid):
# def set_current_user():
# def standard_authentication():
# def query(path='Record'):
# def content():
# def home():
# def users():
# def get(self):
# def post(self):
# def get(self):
# def post(self):
# def get(self):
# def post(self):
# def note(nid=''):
# def default(path):
# class UploadView(MethodView):
# class CreateView(MethodView):
# class NoUploadOrCreate(MethodView):
. Output only the next line. | web.app.config['TESTING'] = True |
Predict the next line for this snippet: <|code_start|>
TESTDB = 'bibserver-test'
here = os.path.dirname(__file__)
fixtures_path = os.path.join(here, 'fixtures.json')
fixtures = json.load(open(fixtures_path))
config["ELASTIC_SEARCH_DB"] = TESTDB
<|code_end|>
with the help of current file imports:
import os
import json
from bibserver import dao
from bibserver.config import config
and context from other files:
# Path: bibserver/dao.py
# def make_id(data):
# def init_db():
# def get_conn():
# def __init__(self, **kwargs):
# def id(self):
# def version(self):
# def save(self):
# def delete(self):
# def get(cls, id_):
# def get_mapping(cls):
# def get_facets_from_mapping(cls,mapping=False,prefix=''):
# def upsert(cls, data, state=None):
# def bulk_upsert(cls, dataset, state=None):
# def delete_by_query(cls, query):
# def query(cls, q='', terms=None, facet_fields=None, flt=False, default_operator='AND', **kwargs):
# def raw_query(self, query_string):
# def about(cls, id_):
# def records(self):
# def get_by_owner_coll(cls,owner,coll):
# def delete(self):
# def __len__(self):
# def set_password(self, password):
# def check_password(self, password):
# def is_super(self):
# def collections(self):
# def notes(self):
# def delete(self):
# class InvalidDAOIDException(Exception):
# class DomainObject(UserDict.IterableUserDict):
# class Record(DomainObject):
# class Note(DomainObject):
# class Collection(DomainObject):
# class Account(DomainObject, UserMixin):
, which may contain function names, class names, or code. Output only the next line. | dao.init_db() |
Given the code snippet: <|code_start|> if not hasattr(self, 'link_name'):
self.link_name = ""
def recomputeInit(self, freecad_obj):
self.freecad_object = freecad_obj
thickness = retrieve_thickness_from_biggest_face(freecad_obj)
if compare_value(thickness, self.thickness) is False:
FreeCAD.Console.PrintError("Recomputed thickness for %s is different (%f != %f)\n" % (self.name, thickness, self.thickness))
# Prendre la normal la plus présente en terme de surface (biggest_area_faces)
# appliquer une transformation pour orienter la normal vers Z
# l'éppaiseur et le Zlength du boundedbox (ce sera donc l'éppaisseur max)
def retrieve_thickness_from_bounded_box():
return None
# Prend les deux premiere faces ayant la même normal (géré exception !! si une seule face)
# Pour chaque face recupere les points et calcule la plus petite distance entre chaque point
# de la premiere face et ceux de la deuxième face. La distance la plus petite est l'éppaisseur estimé.
def retrieve_thickness_from_biggest_face(freecad_object):
area_faces = biggest_area_faces(freecad_object.Shape)
# order faces by normals
sub_areas_face = sort_area_shape_list(area_faces[2])
# TODO : check if normals at opposite
#list_edges_face1 = Part.__sortEdges__(area_faces[2][0].Edges)
#list_edges_face2 = Part.__sortEdges__(area_faces[2][1].Edges)
list_edges_face1 = Part.__sortEdges__(sub_areas_face[0][2][0].Edges)
list_edges_face2 = Part.__sortEdges__(sub_areas_face[1][2][0].Edges)
<|code_end|>
, generate the next line using the imports in this file:
import FreeCAD
import Part
import collections
from lasercut.helper import ObjectProperties, sort_quad_vertex, biggest_area_faces, sort_area_shape_list, compare_value
and context (functions, classes, or occasionally code) from other files:
# Path: lasercut/helper.py
# class ObjectProperties(object):
# def __init__(self, **kwargs):
# self.obj_class = str(type(self).__name__)
# for k, v in kwargs.items():
# if not hasattr(self, "_allowed") or str(k) in self._allowed:
# setattr(self, k, v)
# #else:
# # FreeCAD.Console.PrintWarning(str(k) + " is not allowed for " + str(type(self)))
#
# def sort_quad_vertex(list_edges, reverse):
# list_points = []
# if not reverse:
# list_points = [list_edges[0].Vertexes[0].Point, list_edges[0].Vertexes[1].Point]
# else:
# list_points = [list_edges[0].Vertexes[1].Point, list_edges[0].Vertexes[0].Point]
# for edge in list_edges[1:-1]:
# vertex1 = edge.Vertexes[0].Point
# vertex2 = edge.Vertexes[1].Point
#
# if compare_freecad_vector(vertex1, list_points[-1]):
# list_points.append(vertex2)
# elif compare_freecad_vector(vertex2, list_points[-1]):
# list_points.append(vertex1)
# else:
# return None
#
# return list_points
#
# def biggest_area_faces(freecad_shape):
# sorted_list = sort_area_shape_faces(freecad_shape)
# biggest_area_face = sorted_list[-1]
# # contains : 0:normal, 1:area mm2, 2; list of faces
# return biggest_area_face
#
# def sort_area_shape_list(faces_list):
# return sort_area_face_common(faces_list, compare_freecad_vector)
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
. Output only the next line. | list_pts_face1 = sort_quad_vertex(list_edges_face1, False) |
Using the snippet: <|code_start|> if not hasattr(self, 'hole_width_tolerance'):
self.hole_width_tolerance = 0.0
# For cross part
if not hasattr(self, 'dog_bone'):
self.dog_bone = True
if not hasattr(self, 'node_type'):
self.node_type = self.NODE_NO
if not hasattr(self, 'node_thickness'):
self.node_thickness = 0.05 * self.thickness
if not hasattr(self, 'link_name'):
self.link_name = ""
def recomputeInit(self, freecad_obj):
self.freecad_object = freecad_obj
thickness = retrieve_thickness_from_biggest_face(freecad_obj)
if compare_value(thickness, self.thickness) is False:
FreeCAD.Console.PrintError("Recomputed thickness for %s is different (%f != %f)\n" % (self.name, thickness, self.thickness))
# Prendre la normal la plus présente en terme de surface (biggest_area_faces)
# appliquer une transformation pour orienter la normal vers Z
# l'éppaiseur et le Zlength du boundedbox (ce sera donc l'éppaisseur max)
def retrieve_thickness_from_bounded_box():
return None
# Prend les deux premiere faces ayant la même normal (géré exception !! si une seule face)
# Pour chaque face recupere les points et calcule la plus petite distance entre chaque point
# de la premiere face et ceux de la deuxième face. La distance la plus petite est l'éppaisseur estimé.
def retrieve_thickness_from_biggest_face(freecad_object):
<|code_end|>
, determine the next line of code. You have imports:
import FreeCAD
import Part
import collections
from lasercut.helper import ObjectProperties, sort_quad_vertex, biggest_area_faces, sort_area_shape_list, compare_value
and context (class names, function names, or code) available:
# Path: lasercut/helper.py
# class ObjectProperties(object):
# def __init__(self, **kwargs):
# self.obj_class = str(type(self).__name__)
# for k, v in kwargs.items():
# if not hasattr(self, "_allowed") or str(k) in self._allowed:
# setattr(self, k, v)
# #else:
# # FreeCAD.Console.PrintWarning(str(k) + " is not allowed for " + str(type(self)))
#
# def sort_quad_vertex(list_edges, reverse):
# list_points = []
# if not reverse:
# list_points = [list_edges[0].Vertexes[0].Point, list_edges[0].Vertexes[1].Point]
# else:
# list_points = [list_edges[0].Vertexes[1].Point, list_edges[0].Vertexes[0].Point]
# for edge in list_edges[1:-1]:
# vertex1 = edge.Vertexes[0].Point
# vertex2 = edge.Vertexes[1].Point
#
# if compare_freecad_vector(vertex1, list_points[-1]):
# list_points.append(vertex2)
# elif compare_freecad_vector(vertex2, list_points[-1]):
# list_points.append(vertex1)
# else:
# return None
#
# return list_points
#
# def biggest_area_faces(freecad_shape):
# sorted_list = sort_area_shape_faces(freecad_shape)
# biggest_area_face = sorted_list[-1]
# # contains : 0:normal, 1:area mm2, 2; list of faces
# return biggest_area_face
#
# def sort_area_shape_list(faces_list):
# return sort_area_face_common(faces_list, compare_freecad_vector)
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
. Output only the next line. | area_faces = biggest_area_faces(freecad_object.Shape) |
Predict the next line after this snippet: <|code_start|> # For cross part
if not hasattr(self, 'dog_bone'):
self.dog_bone = True
if not hasattr(self, 'node_type'):
self.node_type = self.NODE_NO
if not hasattr(self, 'node_thickness'):
self.node_thickness = 0.05 * self.thickness
if not hasattr(self, 'link_name'):
self.link_name = ""
def recomputeInit(self, freecad_obj):
self.freecad_object = freecad_obj
thickness = retrieve_thickness_from_biggest_face(freecad_obj)
if compare_value(thickness, self.thickness) is False:
FreeCAD.Console.PrintError("Recomputed thickness for %s is different (%f != %f)\n" % (self.name, thickness, self.thickness))
# Prendre la normal la plus présente en terme de surface (biggest_area_faces)
# appliquer une transformation pour orienter la normal vers Z
# l'éppaiseur et le Zlength du boundedbox (ce sera donc l'éppaisseur max)
def retrieve_thickness_from_bounded_box():
return None
# Prend les deux premiere faces ayant la même normal (géré exception !! si une seule face)
# Pour chaque face recupere les points et calcule la plus petite distance entre chaque point
# de la premiere face et ceux de la deuxième face. La distance la plus petite est l'éppaisseur estimé.
def retrieve_thickness_from_biggest_face(freecad_object):
area_faces = biggest_area_faces(freecad_object.Shape)
# order faces by normals
<|code_end|>
using the current file's imports:
import FreeCAD
import Part
import collections
from lasercut.helper import ObjectProperties, sort_quad_vertex, biggest_area_faces, sort_area_shape_list, compare_value
and any relevant context from other files:
# Path: lasercut/helper.py
# class ObjectProperties(object):
# def __init__(self, **kwargs):
# self.obj_class = str(type(self).__name__)
# for k, v in kwargs.items():
# if not hasattr(self, "_allowed") or str(k) in self._allowed:
# setattr(self, k, v)
# #else:
# # FreeCAD.Console.PrintWarning(str(k) + " is not allowed for " + str(type(self)))
#
# def sort_quad_vertex(list_edges, reverse):
# list_points = []
# if not reverse:
# list_points = [list_edges[0].Vertexes[0].Point, list_edges[0].Vertexes[1].Point]
# else:
# list_points = [list_edges[0].Vertexes[1].Point, list_edges[0].Vertexes[0].Point]
# for edge in list_edges[1:-1]:
# vertex1 = edge.Vertexes[0].Point
# vertex2 = edge.Vertexes[1].Point
#
# if compare_freecad_vector(vertex1, list_points[-1]):
# list_points.append(vertex2)
# elif compare_freecad_vector(vertex2, list_points[-1]):
# list_points.append(vertex1)
# else:
# return None
#
# return list_points
#
# def biggest_area_faces(freecad_shape):
# sorted_list = sort_area_shape_faces(freecad_shape)
# biggest_area_face = sorted_list[-1]
# # contains : 0:normal, 1:area mm2, 2; list of faces
# return biggest_area_face
#
# def sort_area_shape_list(faces_list):
# return sort_area_face_common(faces_list, compare_freecad_vector)
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
. Output only the next line. | sub_areas_face = sort_area_shape_list(area_faces[2]) |
Given snippet: <|code_start|> self.type = self.TYPE_LASER_CUT
if not hasattr(self, 'thickness'):
self.thickness = 5.0
try:
#self.thickness = retrieve_thickness_from_biggest_face(self.freecad_object)
self.thickness = retrieve_thickness_from_biggest_face(kwargs['freecad_object'])
# FreeCAD.Console.PrintError("found : %f\n" % self.thickness)
except ValueError as e:
FreeCAD.Console.PrintError(e)
if not hasattr(self, 'thickness_tolerance'):
self.thickness_tolerance = 0.1 * self.thickness
if not hasattr(self, 'laser_beam_diameter'):
self.laser_beam_diameter = self.thickness / 15.0
if not hasattr(self, 'new_name'):
self.new_name = "%s_tab" % kwargs['freecad_object'].Label
if not hasattr(self, 'hole_width_tolerance'):
self.hole_width_tolerance = 0.0
# For cross part
if not hasattr(self, 'dog_bone'):
self.dog_bone = True
if not hasattr(self, 'node_type'):
self.node_type = self.NODE_NO
if not hasattr(self, 'node_thickness'):
self.node_thickness = 0.05 * self.thickness
if not hasattr(self, 'link_name'):
self.link_name = ""
def recomputeInit(self, freecad_obj):
self.freecad_object = freecad_obj
thickness = retrieve_thickness_from_biggest_face(freecad_obj)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import FreeCAD
import Part
import collections
from lasercut.helper import ObjectProperties, sort_quad_vertex, biggest_area_faces, sort_area_shape_list, compare_value
and context:
# Path: lasercut/helper.py
# class ObjectProperties(object):
# def __init__(self, **kwargs):
# self.obj_class = str(type(self).__name__)
# for k, v in kwargs.items():
# if not hasattr(self, "_allowed") or str(k) in self._allowed:
# setattr(self, k, v)
# #else:
# # FreeCAD.Console.PrintWarning(str(k) + " is not allowed for " + str(type(self)))
#
# def sort_quad_vertex(list_edges, reverse):
# list_points = []
# if not reverse:
# list_points = [list_edges[0].Vertexes[0].Point, list_edges[0].Vertexes[1].Point]
# else:
# list_points = [list_edges[0].Vertexes[1].Point, list_edges[0].Vertexes[0].Point]
# for edge in list_edges[1:-1]:
# vertex1 = edge.Vertexes[0].Point
# vertex2 = edge.Vertexes[1].Point
#
# if compare_freecad_vector(vertex1, list_points[-1]):
# list_points.append(vertex2)
# elif compare_freecad_vector(vertex2, list_points[-1]):
# list_points.append(vertex1)
# else:
# return None
#
# return list_points
#
# def biggest_area_faces(freecad_shape):
# sorted_list = sort_area_shape_faces(freecad_shape)
# biggest_area_face = sorted_list[-1]
# # contains : 0:normal, 1:area mm2, 2; list of faces
# return biggest_area_face
#
# def sort_area_shape_list(faces_list):
# return sort_area_face_common(faces_list, compare_freecad_vector)
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
which might include code, classes, or functions. Output only the next line. | if compare_value(thickness, self.thickness) is False: |
Predict the next line for this snippet: <|code_start|> wire = Part.Wire([l1, a2])
face = Part.Face(wire)
node = face.extrude(FreeCAD.Vector(0, thickness, 0))
return node
def make_node_yz(width, height, thickness, x_positive = True):
p1 = FreeCAD.Vector(-thickness/2.0, 0, height / 2.0)
p2 = FreeCAD.Vector(-thickness/2.0, 0, -height / 2.0)
if x_positive is True:
pa = FreeCAD.Vector(-thickness/2.0, width, 0.)
else:
pa = FreeCAD.Vector(-thickness/2.0, -width, 0.)
l1 = Part.makeLine(p1, p2)
a2 = Part.Arc(p2, pa, p1).toShape()
wire = Part.Wire([l1, a2])
face = Part.Face(wire)
node = face.extrude(FreeCAD.Vector(thickness, 0, 0))
return node
# noeud court = 1/4 hauteur
# noeud long = 1/2 hauteur
# 2 neouds court = 2 * 1/4 hauteur espace de 16 % de la hateur au centre
def make_nodes_xz(shape, x_length, thickness, z_length, node_type, node_thickness):
<|code_end|>
with the help of current file imports:
import Part
import FreeCAD
import itertools
import lasercut.helper as helper
from lasercut.material import MaterialProperties
and context from other files:
# Path: lasercut/material.py
# class MaterialProperties(ObjectProperties):
#
# _allowed = ('type', 'thickness', 'thickness_tolerance', 'hole_width_tolerance',
# 'laser_beam_diameter', 'name', 'label', 'link_name',
# # For cross Part
# 'dog_bone', 'node_type', 'node_thickness') #'freecad_object_index'
# TYPE_LASER_CUT = 1
# NODE_NO = "No node"
# NODE_SINGLE_SHORT = 'Single short'
# NODE_SINGLE_LONG = 'Single long'
# NODE_DUAL_SHORT = 'Dual short'
#
# def __init__(self, **kwargs):
# super(MaterialProperties, self).__init__(**kwargs)
# self.freecad_object = None
# #if not hasattr(self, 'freecad_object_index'):
# # raise ValueError("Must defined freecad object")
# if not hasattr(self, 'type'):
# self.type = self.TYPE_LASER_CUT
# if not hasattr(self, 'thickness'):
# self.thickness = 5.0
# try:
# #self.thickness = retrieve_thickness_from_biggest_face(self.freecad_object)
# self.thickness = retrieve_thickness_from_biggest_face(kwargs['freecad_object'])
# # FreeCAD.Console.PrintError("found : %f\n" % self.thickness)
# except ValueError as e:
# FreeCAD.Console.PrintError(e)
# if not hasattr(self, 'thickness_tolerance'):
# self.thickness_tolerance = 0.1 * self.thickness
# if not hasattr(self, 'laser_beam_diameter'):
# self.laser_beam_diameter = self.thickness / 15.0
# if not hasattr(self, 'new_name'):
# self.new_name = "%s_tab" % kwargs['freecad_object'].Label
# if not hasattr(self, 'hole_width_tolerance'):
# self.hole_width_tolerance = 0.0
# # For cross part
# if not hasattr(self, 'dog_bone'):
# self.dog_bone = True
# if not hasattr(self, 'node_type'):
# self.node_type = self.NODE_NO
# if not hasattr(self, 'node_thickness'):
# self.node_thickness = 0.05 * self.thickness
# if not hasattr(self, 'link_name'):
# self.link_name = ""
#
# def recomputeInit(self, freecad_obj):
# self.freecad_object = freecad_obj
# thickness = retrieve_thickness_from_biggest_face(freecad_obj)
# if compare_value(thickness, self.thickness) is False:
# FreeCAD.Console.PrintError("Recomputed thickness for %s is different (%f != %f)\n" % (self.name, thickness, self.thickness))
, which may contain function names, class names, or code. Output only the next line. | if node_type == MaterialProperties.NODE_NO: |
Based on the snippet: <|code_start|># * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
def make_box(dimension_properties, top_properties, bottom_properties):
part_list = []
if dimension_properties.outside_measure is True:
make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list)
else:
make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list)
return part_list
def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
thickness = dimension_properties.thickness
height = dimension_properties.height
shift_height = 0.
exceeding_length = 0.
exceeding_width = 0.
<|code_end|>
, predict the immediate next line with the help of imports:
from FreeCAD import Gui
from lasercut.boxproperties import BoxProperties, TopBottomProperties
import FreeCAD
import FreeCADGui
import Part
and context (classes, functions, sometimes code) from other files:
# Path: lasercut/boxproperties.py
# class BoxProperties(ObjectProperties):
#
# _allowed = ('length', 'width', 'height', 'thickness', 'outside_measure',
# 'length_width_priority', 'length_outside', 'width_outside',
# 'bottom_position', 'bottom_shift', 'inner_radius',
# 'length_shift', 'width_shift')
#
# LENGTH_PRIORTY = "Length"
# WIDTH_PRIORTY = "Width"
# CROSS_PRIORTY = "No - Cross"
# ROUND_PRIORTY = "No - For rounding"
#
# BOTTOM_OUTSIDE = "Outside"
# BOTTOM_INSIDE = "Inside"
#
# def __init__(self, **kwargs):
# super(BoxProperties, self).__init__(**kwargs)
# if not hasattr(self, 'length'):
# self.length = 90.
# if not hasattr(self, 'width'):
# self.width = 50.
# if not hasattr(self, 'height'):
# self.height = 30.
# if not hasattr(self, 'thickness'):
# self.thickness = 3.
# if not hasattr(self, 'outside_measure'):
# self.outside_measure = True
# # Length/width
# if not hasattr(self, 'length_width_priority'):
# self.length_width_priority = self.LENGTH_PRIORTY
# if not hasattr(self, 'length_outside'):
# self.length_outside = 0.
# if not hasattr(self, 'width_outside'):
# self.width_outside = 0.
# if not hasattr(self, 'inner_radius'):
# self.inner_radius = 0.
# if not hasattr(self, 'length_shift'):
# self.length_shift = 0.
# if not hasattr(self, 'width_shift'):
# self.width_shift = 0.
#
# class TopBottomProperties(ObjectProperties):
# _allowed = ('position', 'height_shis notift', 'length_outside', 'width_outside', 'top_type', 'cover_length_tolerance')
#
# POSITION_OUTSIDE = "Outside"
# POSITION_INSIDE = "Inside"
# TOP_TYPE_NORMAL = "Normal"
# TOP_TYPE_COVER = "Openable"
#
# def __init__(self, **kwargs):
# super(TopBottomProperties, self).__init__(**kwargs)
# if not hasattr(self, 'position'):
# self.position = self.POSITION_OUTSIDE
# if not hasattr(self, 'height_shift'):
# self.height_shift = 0.
# if not hasattr(self, 'length_outside'):
# self.length_outside = 0.
# if not hasattr(self, 'width_outside'):
# self.width_outside = 0.
# if not hasattr(self, 'top_type'):
# self.top_type = self.TOP_TYPE_NORMAL
# if not hasattr(self, 'cover_length_tolerance'):
# self.cover_length_tolerance = 3.
. Output only the next line. | if dimension_properties.length_width_priority == BoxProperties.LENGTH_PRIORTY: |
Given the code snippet: <|code_start|> thickness = dimension_properties.thickness
height = dimension_properties.height
shift_height = 0.
exceeding_length = 0.
exceeding_width = 0.
if dimension_properties.length_width_priority == BoxProperties.LENGTH_PRIORTY:
length = dimension_properties.length
length_spacing = dimension_properties.width - 2. * thickness
width = dimension_properties.width - 2. * thickness
width_spacing = dimension_properties.length - 2. * thickness - 2. * dimension_properties.width_shift
elif dimension_properties.length_width_priority == BoxProperties.WIDTH_PRIORTY:
length = dimension_properties.length - 2. * thickness
length_spacing = dimension_properties.width - 2. * thickness - 2. * dimension_properties.length_shift
width = dimension_properties.width
width_spacing = dimension_properties.length - 2. * thickness
elif dimension_properties.length_width_priority == BoxProperties.CROSS_PRIORTY:
length = dimension_properties.length
length_spacing = dimension_properties.width - 2. * thickness - dimension_properties.width_outside
width = dimension_properties.width
width_spacing = dimension_properties.length - 2. * thickness - dimension_properties.length_outside
elif dimension_properties.length_width_priority == BoxProperties.ROUND_PRIORTY:
length = dimension_properties.length - dimension_properties.inner_radius
length_spacing = dimension_properties.width - 2. * thickness
width = dimension_properties.width - dimension_properties.inner_radius
width_spacing = dimension_properties.length - 2. * thickness
else:
raise ValueError("Length/Width priority not defined")
<|code_end|>
, generate the next line using the imports in this file:
from FreeCAD import Gui
from lasercut.boxproperties import BoxProperties, TopBottomProperties
import FreeCAD
import FreeCADGui
import Part
and context (functions, classes, or occasionally code) from other files:
# Path: lasercut/boxproperties.py
# class BoxProperties(ObjectProperties):
#
# _allowed = ('length', 'width', 'height', 'thickness', 'outside_measure',
# 'length_width_priority', 'length_outside', 'width_outside',
# 'bottom_position', 'bottom_shift', 'inner_radius',
# 'length_shift', 'width_shift')
#
# LENGTH_PRIORTY = "Length"
# WIDTH_PRIORTY = "Width"
# CROSS_PRIORTY = "No - Cross"
# ROUND_PRIORTY = "No - For rounding"
#
# BOTTOM_OUTSIDE = "Outside"
# BOTTOM_INSIDE = "Inside"
#
# def __init__(self, **kwargs):
# super(BoxProperties, self).__init__(**kwargs)
# if not hasattr(self, 'length'):
# self.length = 90.
# if not hasattr(self, 'width'):
# self.width = 50.
# if not hasattr(self, 'height'):
# self.height = 30.
# if not hasattr(self, 'thickness'):
# self.thickness = 3.
# if not hasattr(self, 'outside_measure'):
# self.outside_measure = True
# # Length/width
# if not hasattr(self, 'length_width_priority'):
# self.length_width_priority = self.LENGTH_PRIORTY
# if not hasattr(self, 'length_outside'):
# self.length_outside = 0.
# if not hasattr(self, 'width_outside'):
# self.width_outside = 0.
# if not hasattr(self, 'inner_radius'):
# self.inner_radius = 0.
# if not hasattr(self, 'length_shift'):
# self.length_shift = 0.
# if not hasattr(self, 'width_shift'):
# self.width_shift = 0.
#
# class TopBottomProperties(ObjectProperties):
# _allowed = ('position', 'height_shis notift', 'length_outside', 'width_outside', 'top_type', 'cover_length_tolerance')
#
# POSITION_OUTSIDE = "Outside"
# POSITION_INSIDE = "Inside"
# TOP_TYPE_NORMAL = "Normal"
# TOP_TYPE_COVER = "Openable"
#
# def __init__(self, **kwargs):
# super(TopBottomProperties, self).__init__(**kwargs)
# if not hasattr(self, 'position'):
# self.position = self.POSITION_OUTSIDE
# if not hasattr(self, 'height_shift'):
# self.height_shift = 0.
# if not hasattr(self, 'length_outside'):
# self.length_outside = 0.
# if not hasattr(self, 'width_outside'):
# self.width_outside = 0.
# if not hasattr(self, 'top_type'):
# self.top_type = self.TOP_TYPE_NORMAL
# if not hasattr(self, 'cover_length_tolerance'):
# self.cover_length_tolerance = 3.
. Output only the next line. | if bottom_properties.position == TopBottomProperties.POSITION_OUTSIDE: |
Here is a snippet: <|code_start|># * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
__dir__ = os.path.dirname(__file__)
iconPath = os.path.join(__dir__, 'icons')
def get_freecad_object():
if len(FreeCADGui.Selection.getSelectionEx()) < 1:
raise ValueError("No selection")
obj_list = []
for selection in FreeCADGui.Selection.getSelectionEx():
obj_list.append(selection.Object)
return obj_list
def transform_shape(part_feature, new_part_feature, freecad_document):
new_part_feature.Shape = part_feature.Shape.removeSplitter()
freecad_document.recompute()
<|code_end|>
. Write the next line using the current file imports:
from FreeCAD import Gui
from lasercut.helper import biggest_area_faces
import FreeCAD
import FreeCADGui
import os
import math
import Draft
and context from other files:
# Path: lasercut/helper.py
# def biggest_area_faces(freecad_shape):
# sorted_list = sort_area_shape_faces(freecad_shape)
# biggest_area_face = sorted_list[-1]
# # contains : 0:normal, 1:area mm2, 2; list of faces
# return biggest_area_face
, which may include functions, classes, or code. Output only the next line. | normal_face_prop = biggest_area_faces(part_feature.Shape) |
Given the following code snippet before the placeholder: <|code_start|> hinges_to_removes.append(make_hinges(hinge, material_properties, last_face))
last_face = find_same_normal_face(flat_connection, last_face)
sum_angle += hinge.deg_angle
if rotation_vector is None:
rotation_vector = hinge.rotation_vector
second_shape_transformed = assemble_shape(last_face, hinge.freecad_object_2.Shape, hinge.freecad_face_2,
rotation_vector, -sum_angle)
parts_to_fuse.append(second_shape_transformed)
if index < (len(hinges_list) - 1):
last_face = find_same_normal_face(second_shape_transformed, last_face)
flat_part = assemble_list_element(parts_to_fuse)
for hinge_to_remove in hinges_to_removes:
flat_part = flat_part.cut(hinge_to_remove)
solid = hinges_list[0].solid.copy()
for hinge in hinges_list[1:]:
solid = solid.fuse(hinge.solid)
return flat_part, solid
def create_flat_connection(hinge_properties, referentiel_face):
box_x_size = hinge_properties.arc_length
box_y_size = hinge_properties.extrustion_vector.Length
box_z_size = hinge_properties.thickness
box = Part.makeBox(box_x_size, box_y_size, box_z_size, FreeCAD.Vector(0., -box_y_size/2.0, -box_z_size/2.0))
<|code_end|>
, predict the next line using imports from the current file:
import Part
import FreeCAD
import math
from lasercut.helper import transform, compare_freecad_vector, compare_value, Segment, assemble_list_element
and context including class names, function names, and sometimes code from other files:
# Path: lasercut/helper.py
# def transform(part, referentiel_face, transform_matrix=None, y_invert = False):
# normal_face = referentiel_face.normalAt(0, 0)
# # original center is (0,0,0)
# transformed_center = referentiel_face.CenterOfMass #+ normal_face.normalize() * x_origin
# if transform_matrix is None:
# transform_matrix = get_matrix_transform(referentiel_face)
# part.Placement = FreeCAD.Placement(transform_matrix).multiply(part.Placement)
# part.translate(transformed_center)
# if y_invert:
# part.rotate(transformed_center, normal_face, 180.)
# return part
#
# def compare_freecad_vector(vector1, vector2, epsilon=10e-6):
# vector = vector1.sub(vector2)
# if math.fabs(vector.x) < epsilon and math.fabs(vector.y) < epsilon and math.fabs(vector.z) < epsilon:
# return True
# return False
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
#
# class Segment:
# def __init__(self, first=FreeCAD.Vector(0, 0, 0), second=FreeCAD.Vector(0, 0, 0)):
# self.A = first
# self.B = second
#
# def a(self):
# return self.A
#
# def b(self):
# return self.B * 1
#
# def clone_a(self):
# return self.A * 1
#
# def clone_b(self):
# return self.B * 1
#
# def vector(self):
# return self.B.sub(self.A)
#
# def length(self):
# return self.vector().Length
#
# def mid_point(self):
# mid_point_b = self.A.add(self.B)
# mid_point_b.scale(0.5, 0.5, 0.5)
# return mid_point_b
#
# def get_angle(self, segment):
# return self.vector().getAngle(segment.vector())
#
# def add(self, vector):
# return Segment(self.A.add(vector), self.B.add(vector))
#
# def rotate_z(self, angle):
# return Segment(rotate_vector_z(self.A, angle), rotate_vector_z(self.B, angle))
#
# def __repr__(self):
# return "Segment A: " + str(self.A) + ", B: " + str(self.B)
#
# def assemble_list_element(el_list):
# if len(el_list) == 0:
# return None
#
# part = el_list[0]
# for el in el_list[1:]:
# part = part.fuse(el)
#
# return part
. Output only the next line. | flat_connection = transform(box, referentiel_face) |
Given snippet: <|code_start|> if index % 2 == 0:
pos_list = y_pos_list
else:
pos_list = y_pos_list_2
for pos_y in pos_list:
new_hinge = create_hole_hinge(global_hinges_properties.link_clearance, hole_length, thickness,
global_hinges_properties.laser_beam_diameter)
new_hinge.translate(FreeCAD.Vector(hinge_x, pos_y, 0))
hinges_list.append(new_hinge)
index += 1
hinges_to_remove = assemble_list_element(hinges_list)
hinges_to_remove_transformed = transform(hinges_to_remove, referentiel_face)
return hinges_to_remove_transformed
def assemble_shape(face1, shape, face2, rotation_vector, rotation_angle):
new_shape = shape.copy()
new_shape.rotate(face2.CenterOfMass, rotation_vector, rotation_angle)
new_shape.translate(face1.CenterOfMass.sub(face2.CenterOfMass))
return new_shape
def find_same_normal_face(obj, ref_face):
ref_normal = ref_face.normalAt(0, 0)
found_face = None
for face in obj.Faces:
normal = face.normalAt(0, 0)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import Part
import FreeCAD
import math
from lasercut.helper import transform, compare_freecad_vector, compare_value, Segment, assemble_list_element
and context:
# Path: lasercut/helper.py
# def transform(part, referentiel_face, transform_matrix=None, y_invert = False):
# normal_face = referentiel_face.normalAt(0, 0)
# # original center is (0,0,0)
# transformed_center = referentiel_face.CenterOfMass #+ normal_face.normalize() * x_origin
# if transform_matrix is None:
# transform_matrix = get_matrix_transform(referentiel_face)
# part.Placement = FreeCAD.Placement(transform_matrix).multiply(part.Placement)
# part.translate(transformed_center)
# if y_invert:
# part.rotate(transformed_center, normal_face, 180.)
# return part
#
# def compare_freecad_vector(vector1, vector2, epsilon=10e-6):
# vector = vector1.sub(vector2)
# if math.fabs(vector.x) < epsilon and math.fabs(vector.y) < epsilon and math.fabs(vector.z) < epsilon:
# return True
# return False
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
#
# class Segment:
# def __init__(self, first=FreeCAD.Vector(0, 0, 0), second=FreeCAD.Vector(0, 0, 0)):
# self.A = first
# self.B = second
#
# def a(self):
# return self.A
#
# def b(self):
# return self.B * 1
#
# def clone_a(self):
# return self.A * 1
#
# def clone_b(self):
# return self.B * 1
#
# def vector(self):
# return self.B.sub(self.A)
#
# def length(self):
# return self.vector().Length
#
# def mid_point(self):
# mid_point_b = self.A.add(self.B)
# mid_point_b.scale(0.5, 0.5, 0.5)
# return mid_point_b
#
# def get_angle(self, segment):
# return self.vector().getAngle(segment.vector())
#
# def add(self, vector):
# return Segment(self.A.add(vector), self.B.add(vector))
#
# def rotate_z(self, angle):
# return Segment(rotate_vector_z(self.A, angle), rotate_vector_z(self.B, angle))
#
# def __repr__(self):
# return "Segment A: " + str(self.A) + ", B: " + str(self.B)
#
# def assemble_list_element(el_list):
# if len(el_list) == 0:
# return None
#
# part = el_list[0]
# for el in el_list[1:]:
# part = part.fuse(el)
#
# return part
which might include code, classes, or functions. Output only the next line. | if compare_freecad_vector(ref_normal, normal) is True and compare_value(ref_face.Area, face.Area) is True: |
Based on the snippet: <|code_start|># * *
# * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
def complete_hinges_properties(hinge, face_1, face_2, storeAll = False):
edge1, edge2, extrusion_vector = get_coplanar_edge(face_1, face_2)
seg_face_1, seg_face_2 = get_segment_from_edge(edge1, edge2)
intersection_point = do_intersection(seg_face_1,seg_face_2)
#print "intersection point : => " + str(intersection_point)
#print "seg1 " +str(seg_face_1)
#print "seg2 " +str(seg_face_2)
<|code_end|>
, predict the immediate next line with the help of imports:
import Part
import FreeCAD
import math
from lasercut.helper import transform, compare_freecad_vector, compare_value, Segment, assemble_list_element
and context (classes, functions, sometimes code) from other files:
# Path: lasercut/helper.py
# def transform(part, referentiel_face, transform_matrix=None, y_invert = False):
# normal_face = referentiel_face.normalAt(0, 0)
# # original center is (0,0,0)
# transformed_center = referentiel_face.CenterOfMass #+ normal_face.normalize() * x_origin
# if transform_matrix is None:
# transform_matrix = get_matrix_transform(referentiel_face)
# part.Placement = FreeCAD.Placement(transform_matrix).multiply(part.Placement)
# part.translate(transformed_center)
# if y_invert:
# part.rotate(transformed_center, normal_face, 180.)
# return part
#
# def compare_freecad_vector(vector1, vector2, epsilon=10e-6):
# vector = vector1.sub(vector2)
# if math.fabs(vector.x) < epsilon and math.fabs(vector.y) < epsilon and math.fabs(vector.z) < epsilon:
# return True
# return False
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
#
# class Segment:
# def __init__(self, first=FreeCAD.Vector(0, 0, 0), second=FreeCAD.Vector(0, 0, 0)):
# self.A = first
# self.B = second
#
# def a(self):
# return self.A
#
# def b(self):
# return self.B * 1
#
# def clone_a(self):
# return self.A * 1
#
# def clone_b(self):
# return self.B * 1
#
# def vector(self):
# return self.B.sub(self.A)
#
# def length(self):
# return self.vector().Length
#
# def mid_point(self):
# mid_point_b = self.A.add(self.B)
# mid_point_b.scale(0.5, 0.5, 0.5)
# return mid_point_b
#
# def get_angle(self, segment):
# return self.vector().getAngle(segment.vector())
#
# def add(self, vector):
# return Segment(self.A.add(vector), self.B.add(vector))
#
# def rotate_z(self, angle):
# return Segment(rotate_vector_z(self.A, angle), rotate_vector_z(self.B, angle))
#
# def __repr__(self):
# return "Segment A: " + str(self.A) + ", B: " + str(self.B)
#
# def assemble_list_element(el_list):
# if len(el_list) == 0:
# return None
#
# part = el_list[0]
# for el in el_list[1:]:
# part = part.fuse(el)
#
# return part
. Output only the next line. | diff_length_test = compare_value(intersection_point.sub(seg_face_1.B).Length, |
Continue the code snippet: <|code_start|> edge1, edge2, extrusion_vector = get_coplanar_edge(face_1, face_2)
seg_face_1, seg_face_2 = get_segment_from_edge(edge1, edge2)
intersection_point = do_intersection(seg_face_1,seg_face_2)
#print "intersection point : => " + str(intersection_point)
#print "seg1 " +str(seg_face_1)
#print "seg2 " +str(seg_face_2)
diff_length_test = compare_value(intersection_point.sub(seg_face_1.B).Length,
intersection_point.sub(seg_face_2.B).Length,
10e-3)
if diff_length_test is False :
raise ValueError("Not an arc %f %f" %(intersection_point.sub(seg_face_1.B).Length, intersection_point.sub(seg_face_2.B).Length))
inner_arc_radius = intersection_point.sub(seg_face_1.B).Length
outer_arc_radius = intersection_point.sub(seg_face_1.A).Length
mid_arc_radius = intersection_point.sub(seg_face_1.mid_point()).Length
mid_point_b = seg_face_1.B.add(seg_face_2.B)
mid_point_b.scale(0.5, 0.5, 0.5)
dir_mid_point = mid_point_b.sub(intersection_point)
dir_mid_point.normalize()
inner_arc_point = dir_mid_point * inner_arc_radius
outter_arc_point = dir_mid_point * outer_arc_radius
if hinge.reversed_angle:
inner_arc_point = intersection_point.sub(inner_arc_point)
outter_arc_point = intersection_point.sub(outter_arc_point)
else:
inner_arc_point = inner_arc_point.add(intersection_point)
outter_arc_point = outter_arc_point.add(intersection_point)
<|code_end|>
. Use current file imports:
import Part
import FreeCAD
import math
from lasercut.helper import transform, compare_freecad_vector, compare_value, Segment, assemble_list_element
and context (classes, functions, or code) from other files:
# Path: lasercut/helper.py
# def transform(part, referentiel_face, transform_matrix=None, y_invert = False):
# normal_face = referentiel_face.normalAt(0, 0)
# # original center is (0,0,0)
# transformed_center = referentiel_face.CenterOfMass #+ normal_face.normalize() * x_origin
# if transform_matrix is None:
# transform_matrix = get_matrix_transform(referentiel_face)
# part.Placement = FreeCAD.Placement(transform_matrix).multiply(part.Placement)
# part.translate(transformed_center)
# if y_invert:
# part.rotate(transformed_center, normal_face, 180.)
# return part
#
# def compare_freecad_vector(vector1, vector2, epsilon=10e-6):
# vector = vector1.sub(vector2)
# if math.fabs(vector.x) < epsilon and math.fabs(vector.y) < epsilon and math.fabs(vector.z) < epsilon:
# return True
# return False
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
#
# class Segment:
# def __init__(self, first=FreeCAD.Vector(0, 0, 0), second=FreeCAD.Vector(0, 0, 0)):
# self.A = first
# self.B = second
#
# def a(self):
# return self.A
#
# def b(self):
# return self.B * 1
#
# def clone_a(self):
# return self.A * 1
#
# def clone_b(self):
# return self.B * 1
#
# def vector(self):
# return self.B.sub(self.A)
#
# def length(self):
# return self.vector().Length
#
# def mid_point(self):
# mid_point_b = self.A.add(self.B)
# mid_point_b.scale(0.5, 0.5, 0.5)
# return mid_point_b
#
# def get_angle(self, segment):
# return self.vector().getAngle(segment.vector())
#
# def add(self, vector):
# return Segment(self.A.add(vector), self.B.add(vector))
#
# def rotate_z(self, angle):
# return Segment(rotate_vector_z(self.A, angle), rotate_vector_z(self.B, angle))
#
# def __repr__(self):
# return "Segment A: " + str(self.A) + ", B: " + str(self.B)
#
# def assemble_list_element(el_list):
# if len(el_list) == 0:
# return None
#
# part = el_list[0]
# for el in el_list[1:]:
# part = part.fuse(el)
#
# return part
. Output only the next line. | arc_middle_segment = Segment(outter_arc_point, inner_arc_point) |
Continue the code snippet: <|code_start|> "will contains lines too close and the laser will almost go twice in the same position.\n" + \
"It is advisable to choose a clearance at least twice the kerf. You could also set clearance " + \
"equals to kerf if you want laser have one passage. But in the exported SVG, hinges apertures " + \
"are in fact square with 10e-3 width, so you have to remove manually the three others sides " + \
"for all square to avoid laser returning to same place.")
parts_to_fuse = [hinges_list[0].freecad_object_1.Shape.copy()]
hinges_to_removes = []
last_face = hinges_list[0].freecad_face_1
sum_angle = 0.
rotation_vector = None
for index, hinge in enumerate(hinges_list):
if hinge.nb_link < hinge.min_links_nb:
FreeCAD.Console.PrintError("Min. link is not respected for living hinges named " + str(hinge.name))
flat_connection = create_flat_connection(hinge, last_face)
parts_to_fuse.append(flat_connection)
hinges_to_removes.append(make_hinges(hinge, material_properties, last_face))
last_face = find_same_normal_face(flat_connection, last_face)
sum_angle += hinge.deg_angle
if rotation_vector is None:
rotation_vector = hinge.rotation_vector
second_shape_transformed = assemble_shape(last_face, hinge.freecad_object_2.Shape, hinge.freecad_face_2,
rotation_vector, -sum_angle)
parts_to_fuse.append(second_shape_transformed)
if index < (len(hinges_list) - 1):
last_face = find_same_normal_face(second_shape_transformed, last_face)
<|code_end|>
. Use current file imports:
import Part
import FreeCAD
import math
from lasercut.helper import transform, compare_freecad_vector, compare_value, Segment, assemble_list_element
and context (classes, functions, or code) from other files:
# Path: lasercut/helper.py
# def transform(part, referentiel_face, transform_matrix=None, y_invert = False):
# normal_face = referentiel_face.normalAt(0, 0)
# # original center is (0,0,0)
# transformed_center = referentiel_face.CenterOfMass #+ normal_face.normalize() * x_origin
# if transform_matrix is None:
# transform_matrix = get_matrix_transform(referentiel_face)
# part.Placement = FreeCAD.Placement(transform_matrix).multiply(part.Placement)
# part.translate(transformed_center)
# if y_invert:
# part.rotate(transformed_center, normal_face, 180.)
# return part
#
# def compare_freecad_vector(vector1, vector2, epsilon=10e-6):
# vector = vector1.sub(vector2)
# if math.fabs(vector.x) < epsilon and math.fabs(vector.y) < epsilon and math.fabs(vector.z) < epsilon:
# return True
# return False
#
# def compare_value(value1, value2, epsilon=10e-6):
# value = value1 - value2
# if math.fabs(value) < epsilon:
# return True
# return False
#
# class Segment:
# def __init__(self, first=FreeCAD.Vector(0, 0, 0), second=FreeCAD.Vector(0, 0, 0)):
# self.A = first
# self.B = second
#
# def a(self):
# return self.A
#
# def b(self):
# return self.B * 1
#
# def clone_a(self):
# return self.A * 1
#
# def clone_b(self):
# return self.B * 1
#
# def vector(self):
# return self.B.sub(self.A)
#
# def length(self):
# return self.vector().Length
#
# def mid_point(self):
# mid_point_b = self.A.add(self.B)
# mid_point_b.scale(0.5, 0.5, 0.5)
# return mid_point_b
#
# def get_angle(self, segment):
# return self.vector().getAngle(segment.vector())
#
# def add(self, vector):
# return Segment(self.A.add(vector), self.B.add(vector))
#
# def rotate_z(self, angle):
# return Segment(rotate_vector_z(self.A, angle), rotate_vector_z(self.B, angle))
#
# def __repr__(self):
# return "Segment A: " + str(self.A) + ", B: " + str(self.B)
#
# def assemble_list_element(el_list):
# if len(el_list) == 0:
# return None
#
# part = el_list[0]
# for el in el_list[1:]:
# part = part.fuse(el)
#
# return part
. Output only the next line. | flat_part = assemble_list_element(parts_to_fuse) |
Next line prediction: <|code_start|># ***************************************************************************
# * *
# * Copyright (c) 2016 execuc *
# * *
# * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
__dir__ = os.path.dirname(__file__)
iconPath = os.path.join(__dir__, 'icons')
class GroupRoundedBox:
def __init__(self, obj):
<|code_end|>
. Use current file imports:
(from FreeCAD import Gui
from panel.roundedbox import RoundedBoxProperties, TopBottomRoundedProperties, DimensionRoundedBoxParam, BottomRoundedBoxParam, TopBoxRoundedParam
from lasercut import makeroundedbox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
)
and context including class names, function names, or small code snippets from other files:
# Path: panel/roundedbox.py
# class DimensionRoundedBoxParam(ParamWidget):
# class BottomRoundedBoxParam(ParamWidget):
# class TopBoxRoundedParam(ParamWidget):
# def __init__(self, properties = None):
# def update_information(self):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makeroundedbox.py
# def make_rounded_box(dimension_properties, top_properties, bottom_properties):
# def create_contours(radius, nb_face, side_length, thickness):
# def chunkIt(seq, num):
# def create_sides(polygon_segment, height, nb_cut):
# def create_shape(p1, p2, p3, p4):
# def retrieve_segments_arc(polygon_segment):
# def get_contours_with_arc(edge, arcs_segment_list):
# def create_plane_part(dimension_properties, plane_properties):
. Output only the next line. | obj.addProperty("App::PropertyPythonObject", "box_properties").box_properties = RoundedBoxProperties()
|
Using the snippet: <|code_start|># * *
# * Copyright (c) 2016 execuc *
# * *
# * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
__dir__ = os.path.dirname(__file__)
iconPath = os.path.join(__dir__, 'icons')
class GroupRoundedBox:
def __init__(self, obj):
obj.addProperty("App::PropertyPythonObject", "box_properties").box_properties = RoundedBoxProperties()
<|code_end|>
, determine the next line of code. You have imports:
from FreeCAD import Gui
from panel.roundedbox import RoundedBoxProperties, TopBottomRoundedProperties, DimensionRoundedBoxParam, BottomRoundedBoxParam, TopBoxRoundedParam
from lasercut import makeroundedbox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context (class names, function names, or code) available:
# Path: panel/roundedbox.py
# class DimensionRoundedBoxParam(ParamWidget):
# class BottomRoundedBoxParam(ParamWidget):
# class TopBoxRoundedParam(ParamWidget):
# def __init__(self, properties = None):
# def update_information(self):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makeroundedbox.py
# def make_rounded_box(dimension_properties, top_properties, bottom_properties):
# def create_contours(radius, nb_face, side_length, thickness):
# def chunkIt(seq, num):
# def create_sides(polygon_segment, height, nb_cut):
# def create_shape(p1, p2, p3, p4):
# def retrieve_segments_arc(polygon_segment):
# def get_contours_with_arc(edge, arcs_segment_list):
# def create_plane_part(dimension_properties, plane_properties):
. Output only the next line. | obj.addProperty("App::PropertyPythonObject", "top_properties").top_properties = TopBottomRoundedProperties()
|
Based on the snippet: <|code_start|> '''
return None
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def claimChildren(self):
return self.Object.parts
class MakeRoundedBox:
def __init__(self, obj_box):
self.form = []
self.main_widget = QtGui.QWidget()
self.main_widget.setWindowTitle("Make rounded box")
self.parts_vbox = QtGui.QGridLayout(self.main_widget)
self.form.append(self.main_widget)
self.preview_button = QtGui.QPushButton('Preview', self.main_widget)
self.parts_vbox.addWidget(self.preview_button, 0, 0, 1, 2)
self.preview_button.clicked.connect(self.preview)
self.obj_box = obj_box
self.box_properties_origin = copy.deepcopy(self.obj_box.box_properties)
self.bottom_properties_origin = copy.deepcopy(self.obj_box.bottom_properties)
self.top_properties_origin = copy.deepcopy(self.obj_box.top_properties)
<|code_end|>
, predict the immediate next line with the help of imports:
from FreeCAD import Gui
from panel.roundedbox import RoundedBoxProperties, TopBottomRoundedProperties, DimensionRoundedBoxParam, BottomRoundedBoxParam, TopBoxRoundedParam
from lasercut import makeroundedbox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context (classes, functions, sometimes code) from other files:
# Path: panel/roundedbox.py
# class DimensionRoundedBoxParam(ParamWidget):
# class BottomRoundedBoxParam(ParamWidget):
# class TopBoxRoundedParam(ParamWidget):
# def __init__(self, properties = None):
# def update_information(self):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makeroundedbox.py
# def make_rounded_box(dimension_properties, top_properties, bottom_properties):
# def create_contours(radius, nb_face, side_length, thickness):
# def chunkIt(seq, num):
# def create_sides(polygon_segment, height, nb_cut):
# def create_shape(p1, p2, p3, p4):
# def retrieve_segments_arc(polygon_segment):
# def get_contours_with_arc(edge, arcs_segment_list):
# def create_plane_part(dimension_properties, plane_properties):
. Output only the next line. | self.box_properties = DimensionRoundedBoxParam(self.obj_box.box_properties)
|
Based on the snippet: <|code_start|> return None
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def claimChildren(self):
return self.Object.parts
class MakeRoundedBox:
def __init__(self, obj_box):
self.form = []
self.main_widget = QtGui.QWidget()
self.main_widget.setWindowTitle("Make rounded box")
self.parts_vbox = QtGui.QGridLayout(self.main_widget)
self.form.append(self.main_widget)
self.preview_button = QtGui.QPushButton('Preview', self.main_widget)
self.parts_vbox.addWidget(self.preview_button, 0, 0, 1, 2)
self.preview_button.clicked.connect(self.preview)
self.obj_box = obj_box
self.box_properties_origin = copy.deepcopy(self.obj_box.box_properties)
self.bottom_properties_origin = copy.deepcopy(self.obj_box.bottom_properties)
self.top_properties_origin = copy.deepcopy(self.obj_box.top_properties)
self.box_properties = DimensionRoundedBoxParam(self.obj_box.box_properties)
<|code_end|>
, predict the immediate next line with the help of imports:
from FreeCAD import Gui
from panel.roundedbox import RoundedBoxProperties, TopBottomRoundedProperties, DimensionRoundedBoxParam, BottomRoundedBoxParam, TopBoxRoundedParam
from lasercut import makeroundedbox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context (classes, functions, sometimes code) from other files:
# Path: panel/roundedbox.py
# class DimensionRoundedBoxParam(ParamWidget):
# class BottomRoundedBoxParam(ParamWidget):
# class TopBoxRoundedParam(ParamWidget):
# def __init__(self, properties = None):
# def update_information(self):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makeroundedbox.py
# def make_rounded_box(dimension_properties, top_properties, bottom_properties):
# def create_contours(radius, nb_face, side_length, thickness):
# def chunkIt(seq, num):
# def create_sides(polygon_segment, height, nb_cut):
# def create_shape(p1, p2, p3, p4):
# def retrieve_segments_arc(polygon_segment):
# def get_contours_with_arc(edge, arcs_segment_list):
# def create_plane_part(dimension_properties, plane_properties):
. Output only the next line. | self.bottom_box_param = BottomRoundedBoxParam(self.obj_box.bottom_properties)
|
Given the following code snippet before the placeholder: <|code_start|>
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def claimChildren(self):
return self.Object.parts
class MakeRoundedBox:
def __init__(self, obj_box):
self.form = []
self.main_widget = QtGui.QWidget()
self.main_widget.setWindowTitle("Make rounded box")
self.parts_vbox = QtGui.QGridLayout(self.main_widget)
self.form.append(self.main_widget)
self.preview_button = QtGui.QPushButton('Preview', self.main_widget)
self.parts_vbox.addWidget(self.preview_button, 0, 0, 1, 2)
self.preview_button.clicked.connect(self.preview)
self.obj_box = obj_box
self.box_properties_origin = copy.deepcopy(self.obj_box.box_properties)
self.bottom_properties_origin = copy.deepcopy(self.obj_box.bottom_properties)
self.top_properties_origin = copy.deepcopy(self.obj_box.top_properties)
self.box_properties = DimensionRoundedBoxParam(self.obj_box.box_properties)
self.bottom_box_param = BottomRoundedBoxParam(self.obj_box.bottom_properties)
<|code_end|>
, predict the next line using imports from the current file:
from FreeCAD import Gui
from panel.roundedbox import RoundedBoxProperties, TopBottomRoundedProperties, DimensionRoundedBoxParam, BottomRoundedBoxParam, TopBoxRoundedParam
from lasercut import makeroundedbox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context including class names, function names, and sometimes code from other files:
# Path: panel/roundedbox.py
# class DimensionRoundedBoxParam(ParamWidget):
# class BottomRoundedBoxParam(ParamWidget):
# class TopBoxRoundedParam(ParamWidget):
# def __init__(self, properties = None):
# def update_information(self):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makeroundedbox.py
# def make_rounded_box(dimension_properties, top_properties, bottom_properties):
# def create_contours(radius, nb_face, side_length, thickness):
# def chunkIt(seq, num):
# def create_sides(polygon_segment, height, nb_cut):
# def create_shape(p1, p2, p3, p4):
# def retrieve_segments_arc(polygon_segment):
# def get_contours_with_arc(edge, arcs_segment_list):
# def create_plane_part(dimension_properties, plane_properties):
. Output only the next line. | self.top_box_param = TopBoxRoundedParam(self.obj_box.top_properties)
|
Based on the snippet: <|code_start|># * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
__dir__ = os.path.dirname(__file__)
iconPath = os.path.join(__dir__, 'icons')
class GroupRoundedBox:
def __init__(self, obj):
obj.addProperty("App::PropertyPythonObject", "box_properties").box_properties = RoundedBoxProperties()
obj.addProperty("App::PropertyPythonObject", "top_properties").top_properties = TopBottomRoundedProperties()
obj.addProperty("App::PropertyPythonObject", "bottom_properties").bottom_properties = TopBottomRoundedProperties()
obj.addProperty('App::PropertyPythonObject', 'need_recompute').need_recompute = False
obj.addProperty('App::PropertyLinkList', 'parts').parts= []
obj.Proxy = self
def onChanged(self, fp, prop):
if prop == "need_recompute":
self.execute(fp)
def execute(self, fp):
if fp.need_recompute:
fp.need_recompute = False
document = fp.Document
<|code_end|>
, predict the immediate next line with the help of imports:
from FreeCAD import Gui
from panel.roundedbox import RoundedBoxProperties, TopBottomRoundedProperties, DimensionRoundedBoxParam, BottomRoundedBoxParam, TopBoxRoundedParam
from lasercut import makeroundedbox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context (classes, functions, sometimes code) from other files:
# Path: panel/roundedbox.py
# class DimensionRoundedBoxParam(ParamWidget):
# class BottomRoundedBoxParam(ParamWidget):
# class TopBoxRoundedParam(ParamWidget):
# def __init__(self, properties = None):
# def update_information(self):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makeroundedbox.py
# def make_rounded_box(dimension_properties, top_properties, bottom_properties):
# def create_contours(radius, nb_face, side_length, thickness):
# def chunkIt(seq, num):
# def create_sides(polygon_segment, height, nb_cut):
# def create_shape(p1, p2, p3, p4):
# def retrieve_segments_arc(polygon_segment):
# def get_contours_with_arc(edge, arcs_segment_list):
# def create_plane_part(dimension_properties, plane_properties):
. Output only the next line. | computed_parts = makeroundedbox.make_rounded_box(fp.box_properties, fp.top_properties, fp.bottom_properties)
|
Based on the snippet: <|code_start|> part_interactor.toRemove.append(helper.transform_part(screw_way_hole_plane, tab))
break
return
def make_tabs_joins(parts, tabs):
parts_element = []
for part in parts:
mat_element = helper.MaterialElement(part)
parts_element.append(mat_element)
removeParts = {}
#test to improve speed
for tab in tabs:
keyid = str(tab.group_id)
if keyid not in removeParts:
removeParts[keyid] = []
removeParts[keyid].append(tab.freecad_object.Name)
for tab in tabs:
tab_part = None
other_parts = []
keyid = str(tab.group_id)
for part in parts_element:
if part.get_name() == tab.freecad_object.Name:
tab_part = part
# else:
elif part.get_name() not in removeParts[keyid]:
other_parts.append(part)
<|code_end|>
, predict the immediate next line with the help of imports:
import Part
import FreeCAD
import lasercut.flextab as flextab
import lasercut.helper as helper
from panel.tab import TabProperties
and context (classes, functions, sometimes code) from other files:
# Path: panel/tab.py
# class BaseTabWidget(ParamWidget):
# class TabWidget(BaseTabWidget):
# class TSlotWidget(BaseTabWidget):
# class TabContinuousWidget(BaseTabWidget):
# class TabFlexWidget(BaseTabWidget):
# class TabLink(ParamWidget):
# class TabsList(object):
# def __init__(self, tab_properties):
# def get_tab_type(self):
# def __init__(self, tab_properties):
# def __init__(self, tab_properties):
# def __init__(self, tab_properties):
# def __init__(self, tab_properties):
# def __init__(self, tab_properties):
# def get_group_box(self, widget):
# def __init__(self, faces):
# def createWidgetFromTabProperties(self, tab_properties):
# def append(self, face, tab_type):
# def append_link(self, face, src_tab_name):
# def remove(self, name):
# def exist(self, name):
# def get(self, name):
# def get_linked_tabs(self, name):
# def __iter__(self):
# def get_tabs_properties(self):
. Output only the next line. | if tab.tab_type == TabProperties.TYPE_TAB: |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2016 execuc *
# * *
# * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
__dir__ = os.path.dirname(__file__)
iconPath = os.path.join(__dir__, 'icons')
class GroupBox:
def __init__(self, obj):
<|code_end|>
, determine the next line of code. You have imports:
from FreeCAD import Gui
from panel.box import BoxProperties, TopBottomProperties, DimensionBoxParam, LengthWidthBoxParam, BottomBoxParam, TopBoxParam
from lasercut import makebox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context (class names, function names, or code) available:
# Path: panel/box.py
# class DimensionBoxParam(ParamWidget):
# class LengthWidthBoxParam(ParamWidget):
# class BottomBoxParam(ParamWidget):
# class TopBoxParam(ParamWidget):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makebox.py
# def make_box(dimension_properties, top_properties, bottom_properties):
# def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_z_panel(length, width, thickness, z_pos):
# def make_front_panels(length, height, thickness, spacing):
# def make_twice_half_front_panel(length, height, thickness, spacing):
# def make_side_panels(width, height, thickness, spacing):
. Output only the next line. | obj.addProperty("App::PropertyPythonObject", "box_properties").box_properties = BoxProperties() # supported https://www.freecadweb.org/wiki/Scripted_objects |
Given the code snippet: <|code_start|>
# ***************************************************************************
# * *
# * Copyright (c) 2016 execuc *
# * *
# * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
__dir__ = os.path.dirname(__file__)
iconPath = os.path.join(__dir__, 'icons')
class GroupBox:
def __init__(self, obj):
obj.addProperty("App::PropertyPythonObject", "box_properties").box_properties = BoxProperties() # supported https://www.freecadweb.org/wiki/Scripted_objects
<|code_end|>
, generate the next line using the imports in this file:
from FreeCAD import Gui
from panel.box import BoxProperties, TopBottomProperties, DimensionBoxParam, LengthWidthBoxParam, BottomBoxParam, TopBoxParam
from lasercut import makebox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context (functions, classes, or occasionally code) from other files:
# Path: panel/box.py
# class DimensionBoxParam(ParamWidget):
# class LengthWidthBoxParam(ParamWidget):
# class BottomBoxParam(ParamWidget):
# class TopBoxParam(ParamWidget):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makebox.py
# def make_box(dimension_properties, top_properties, bottom_properties):
# def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_z_panel(length, width, thickness, z_pos):
# def make_front_panels(length, height, thickness, spacing):
# def make_twice_half_front_panel(length, height, thickness, spacing):
# def make_side_panels(width, height, thickness, spacing):
. Output only the next line. | obj.addProperty("App::PropertyPythonObject", "top_properties").top_properties = TopBottomProperties() |
Next line prediction: <|code_start|> internals here. Since no data were pickled nothing needs to be done here.
'''
return None
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def claimChildren(self):
return self.Object.parts
class MakeBox:
def __init__(self, obj_box):
self.form = []
self.main_widget = QtGui.QWidget()
self.main_widget.setWindowTitle("Make box")
self.parts_vbox = QtGui.QGridLayout(self.main_widget)
self.form.append(self.main_widget)
self.preview_button = QtGui.QPushButton('Preview', self.main_widget)
self.parts_vbox.addWidget(self.preview_button, 0, 0, 1, 2)
self.preview_button.clicked.connect(self.preview)
self.obj_box = obj_box
self.box_properties_origin = copy.deepcopy(self.obj_box.box_properties)
self.bottom_properties_origin = copy.deepcopy(self.obj_box.bottom_properties)
self.top_properties_origin = copy.deepcopy(self.obj_box.top_properties)
<|code_end|>
. Use current file imports:
(from FreeCAD import Gui
from panel.box import BoxProperties, TopBottomProperties, DimensionBoxParam, LengthWidthBoxParam, BottomBoxParam, TopBoxParam
from lasercut import makebox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy)
and context including class names, function names, or small code snippets from other files:
# Path: panel/box.py
# class DimensionBoxParam(ParamWidget):
# class LengthWidthBoxParam(ParamWidget):
# class BottomBoxParam(ParamWidget):
# class TopBoxParam(ParamWidget):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makebox.py
# def make_box(dimension_properties, top_properties, bottom_properties):
# def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_z_panel(length, width, thickness, z_pos):
# def make_front_panels(length, height, thickness, spacing):
# def make_twice_half_front_panel(length, height, thickness, spacing):
# def make_side_panels(width, height, thickness, spacing):
. Output only the next line. | self.dim_box_param = DimensionBoxParam(self.obj_box.box_properties) |
Given snippet: <|code_start|> '''
return None
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def claimChildren(self):
return self.Object.parts
class MakeBox:
def __init__(self, obj_box):
self.form = []
self.main_widget = QtGui.QWidget()
self.main_widget.setWindowTitle("Make box")
self.parts_vbox = QtGui.QGridLayout(self.main_widget)
self.form.append(self.main_widget)
self.preview_button = QtGui.QPushButton('Preview', self.main_widget)
self.parts_vbox.addWidget(self.preview_button, 0, 0, 1, 2)
self.preview_button.clicked.connect(self.preview)
self.obj_box = obj_box
self.box_properties_origin = copy.deepcopy(self.obj_box.box_properties)
self.bottom_properties_origin = copy.deepcopy(self.obj_box.bottom_properties)
self.top_properties_origin = copy.deepcopy(self.obj_box.top_properties)
self.dim_box_param = DimensionBoxParam(self.obj_box.box_properties)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from FreeCAD import Gui
from panel.box import BoxProperties, TopBottomProperties, DimensionBoxParam, LengthWidthBoxParam, BottomBoxParam, TopBoxParam
from lasercut import makebox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context:
# Path: panel/box.py
# class DimensionBoxParam(ParamWidget):
# class LengthWidthBoxParam(ParamWidget):
# class BottomBoxParam(ParamWidget):
# class TopBoxParam(ParamWidget):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makebox.py
# def make_box(dimension_properties, top_properties, bottom_properties):
# def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_z_panel(length, width, thickness, z_pos):
# def make_front_panels(length, height, thickness, spacing):
# def make_twice_half_front_panel(length, height, thickness, spacing):
# def make_side_panels(width, height, thickness, spacing):
which might include code, classes, or functions. Output only the next line. | self.general_box_param = LengthWidthBoxParam(self.obj_box.box_properties) |
Predict the next line after this snippet: <|code_start|> return None
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def claimChildren(self):
return self.Object.parts
class MakeBox:
def __init__(self, obj_box):
self.form = []
self.main_widget = QtGui.QWidget()
self.main_widget.setWindowTitle("Make box")
self.parts_vbox = QtGui.QGridLayout(self.main_widget)
self.form.append(self.main_widget)
self.preview_button = QtGui.QPushButton('Preview', self.main_widget)
self.parts_vbox.addWidget(self.preview_button, 0, 0, 1, 2)
self.preview_button.clicked.connect(self.preview)
self.obj_box = obj_box
self.box_properties_origin = copy.deepcopy(self.obj_box.box_properties)
self.bottom_properties_origin = copy.deepcopy(self.obj_box.bottom_properties)
self.top_properties_origin = copy.deepcopy(self.obj_box.top_properties)
self.dim_box_param = DimensionBoxParam(self.obj_box.box_properties)
self.general_box_param = LengthWidthBoxParam(self.obj_box.box_properties)
<|code_end|>
using the current file's imports:
from FreeCAD import Gui
from panel.box import BoxProperties, TopBottomProperties, DimensionBoxParam, LengthWidthBoxParam, BottomBoxParam, TopBoxParam
from lasercut import makebox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and any relevant context from other files:
# Path: panel/box.py
# class DimensionBoxParam(ParamWidget):
# class LengthWidthBoxParam(ParamWidget):
# class BottomBoxParam(ParamWidget):
# class TopBoxParam(ParamWidget):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makebox.py
# def make_box(dimension_properties, top_properties, bottom_properties):
# def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_z_panel(length, width, thickness, z_pos):
# def make_front_panels(length, height, thickness, spacing):
# def make_twice_half_front_panel(length, height, thickness, spacing):
# def make_side_panels(width, height, thickness, spacing):
. Output only the next line. | self.bottom_box_param = BottomBoxParam(self.obj_box.bottom_properties) |
Based on the snippet: <|code_start|>
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def claimChildren(self):
return self.Object.parts
class MakeBox:
def __init__(self, obj_box):
self.form = []
self.main_widget = QtGui.QWidget()
self.main_widget.setWindowTitle("Make box")
self.parts_vbox = QtGui.QGridLayout(self.main_widget)
self.form.append(self.main_widget)
self.preview_button = QtGui.QPushButton('Preview', self.main_widget)
self.parts_vbox.addWidget(self.preview_button, 0, 0, 1, 2)
self.preview_button.clicked.connect(self.preview)
self.obj_box = obj_box
self.box_properties_origin = copy.deepcopy(self.obj_box.box_properties)
self.bottom_properties_origin = copy.deepcopy(self.obj_box.bottom_properties)
self.top_properties_origin = copy.deepcopy(self.obj_box.top_properties)
self.dim_box_param = DimensionBoxParam(self.obj_box.box_properties)
self.general_box_param = LengthWidthBoxParam(self.obj_box.box_properties)
self.bottom_box_param = BottomBoxParam(self.obj_box.bottom_properties)
<|code_end|>
, predict the immediate next line with the help of imports:
from FreeCAD import Gui
from panel.box import BoxProperties, TopBottomProperties, DimensionBoxParam, LengthWidthBoxParam, BottomBoxParam, TopBoxParam
from lasercut import makebox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context (classes, functions, sometimes code) from other files:
# Path: panel/box.py
# class DimensionBoxParam(ParamWidget):
# class LengthWidthBoxParam(ParamWidget):
# class BottomBoxParam(ParamWidget):
# class TopBoxParam(ParamWidget):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makebox.py
# def make_box(dimension_properties, top_properties, bottom_properties):
# def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_z_panel(length, width, thickness, z_pos):
# def make_front_panels(length, height, thickness, spacing):
# def make_twice_half_front_panel(length, height, thickness, spacing):
# def make_side_panels(width, height, thickness, spacing):
. Output only the next line. | self.top_box_param = TopBoxParam(self.obj_box.top_properties) |
Given the following code snippet before the placeholder: <|code_start|># * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
__dir__ = os.path.dirname(__file__)
iconPath = os.path.join(__dir__, 'icons')
class GroupBox:
def __init__(self, obj):
obj.addProperty("App::PropertyPythonObject", "box_properties").box_properties = BoxProperties() # supported https://www.freecadweb.org/wiki/Scripted_objects
obj.addProperty("App::PropertyPythonObject", "top_properties").top_properties = TopBottomProperties()
obj.addProperty("App::PropertyPythonObject", "bottom_properties").bottom_properties = TopBottomProperties()
obj.addProperty('App::PropertyPythonObject', 'need_recompute').need_recompute = False
obj.addProperty('App::PropertyLinkList', 'parts').parts = []
obj.Proxy = self
def onChanged(self, fp, prop):
if prop == "need_recompute":
self.execute(fp)
def execute(self, fp):
if fp.need_recompute:
fp.need_recompute = False
document = fp.Document
<|code_end|>
, predict the next line using imports from the current file:
from FreeCAD import Gui
from panel.box import BoxProperties, TopBottomProperties, DimensionBoxParam, LengthWidthBoxParam, BottomBoxParam, TopBoxParam
from lasercut import makebox
from PySide import QtCore, QtGui
import FreeCAD
import FreeCADGui
import Part
import os
import math
import Draft
import copy
and context including class names, function names, and sometimes code from other files:
# Path: panel/box.py
# class DimensionBoxParam(ParamWidget):
# class LengthWidthBoxParam(ParamWidget):
# class BottomBoxParam(ParamWidget):
# class TopBoxParam(ParamWidget):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
# def __init__(self, properties = None):
#
# Path: lasercut/makebox.py
# def make_box(dimension_properties, top_properties, bottom_properties):
# def make_box_outside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_box_inside_measure(dimension_properties, top_properties, bottom_properties, part_list):
# def make_z_panel(length, width, thickness, z_pos):
# def make_front_panels(length, height, thickness, spacing):
# def make_twice_half_front_panel(length, height, thickness, spacing):
# def make_side_panels(width, height, thickness, spacing):
. Output only the next line. | computed_parts = makebox.make_box(fp.box_properties, fp.top_properties, fp.bottom_properties) |
Given the code snippet: <|code_start|># * This file is part of LCInterlocking module. *
# * LCInterlocking module is free software; you can redistribute it and/or*
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This module is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, *
# * MA 02110-1301 USA *
# * *
# ***************************************************************************
def make_rounded_box(dimension_properties, top_properties, bottom_properties):
height = dimension_properties.height
part_list = []
if dimension_properties.side_length >= dimension_properties.max_side_length:
raise ValueError("Side length is bigger than its maximum")
if dimension_properties.cut > dimension_properties.nb_face:
raise ValueError("Number of cut is bigger than number of face")
bottom_part = create_plane_part(dimension_properties, bottom_properties)
<|code_end|>
, generate the next line using the imports in this file:
from FreeCAD import Gui
from lasercut.roundedboxproperties import RoundedBoxProperties, TopBottomRoundedProperties
from lasercut.makehinges import do_intersection
import FreeCAD
import FreeCADGui
import Part
import math
import lasercut.helper as helper
and context (functions, classes, or occasionally code) from other files:
# Path: lasercut/roundedboxproperties.py
# class RoundedBoxProperties(ObjectProperties):
#
# _allowed = ('nb_face', 'inradius', 'height', 'side_length', 'max_side_length', 'thickness', 'cut')
#
# def __init__(self, **kwargs):
# super(RoundedBoxProperties, self).__init__(**kwargs)
# if not hasattr(self, 'nb_face'):
# self.nb_face = int(5)
# if not hasattr(self, 'inradius'):
# self.inradius = 50.
# if not hasattr(self, 'height'):
# self.height = 30.
# if not hasattr(self, 'thickness'):
# self.thickness = 3.
# if not hasattr(self, 'cut_nb'):
# self.cut = int(0)
#
# self.side_length = 0.
# self.max_side_length = 0.
# self.compute_information(True)
#
# def compute_information(self, update_side_length = False):
# circumradius = self.inradius / math.cos(math.pi / float(self.nb_face))
# self.max_side_length = 2 * circumradius * math.sin(math.pi / float(self.nb_face))
# self.max_side_length = round(self.max_side_length - 0.01, 2)
#
# if update_side_length is True or self.side_length >= self.max_side_length :
# self.side_length = math.floor(self.max_side_length * 0.75)
#
# if self.cut > self.nb_face:
# self.cut = int(self.nb_face)
#
# class TopBottomRoundedProperties(ObjectProperties):
# _allowed = ('position', 'height_shift', 'radius_outside')
#
# POSITION_OUTSIDE = "Outside"
# POSITION_INSIDE = "Inside"
#
# def __init__(self, **kwargs):
# super(TopBottomRoundedProperties, self).__init__(**kwargs)
# if not hasattr(self, 'position'):
# self.position = self.POSITION_INSIDE
# if not hasattr(self, 'height_shift'):
# self.height_shift = 0.
# if not hasattr(self, 'radius_outside'):
# self.radius_outside = 0.
#
# Path: lasercut/makehinges.py
# def do_intersection(seg1, seg2):
# da = seg1.B - seg1.A
# db = seg2.B - seg2.A
# dc = seg2.A - seg1.A
#
# coplanar_val = dc.dot(da.cross(db))
# if not compare_value(coplanar_val, 0.):
# raise ValueError("Not coplanar (seg1: %s, seg2: %s) => res: %s" %(str(seg1), str(seg2), str(coplanar_val)))
#
# a = dc.cross(db).dot(da.cross(db))
# s = dc.cross(db).dot(da.cross(db)) / da.cross(db).dot(da.cross(db))
# if s >= 10e-6:
# da.scale(s, s, s)
# ip = seg1.A.add(da)
# return ip
# else:
# raise ValueError("Wrong scale")
. Output only the next line. | if bottom_properties.position == TopBottomRoundedProperties.POSITION_INSIDE: |
Given snippet: <|code_start|> face_2 = create_shape(mid_point_a, seg_b.A, seg_b.B, mid_point_b)
part_2 = face_2.extrude(FreeCAD.Vector(0, 0, height))
part_2.translate(FreeCAD.Vector(0, 0., -height/2))
part_list.append({'shape': part_1, 'name': "side_face_%d_a" % index})
part_list.append({'shape': part_2, 'name': "side_face_%d_b" % index})
index += 1
return part_list
def create_shape(p1, p2, p3, p4):
l1 = Part.makeLine(p1, p2)
l2 = Part.makeLine(p2, p3)
l3 = Part.makeLine(p3, p4)
l4 = Part.makeLine(p4, p1)
wire = Part.Wire([l1, l2, l3, l4])
face = Part.Face(wire)
return face
def retrieve_segments_arc(polygon_segment):
arcs_segment_list = []
nb_face = len(polygon_segment)
for index in range(nb_face):
first_segment = polygon_segment[index][1]
second_segment = polygon_segment[(index + 1) % nb_face][0]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from FreeCAD import Gui
from lasercut.roundedboxproperties import RoundedBoxProperties, TopBottomRoundedProperties
from lasercut.makehinges import do_intersection
import FreeCAD
import FreeCADGui
import Part
import math
import lasercut.helper as helper
and context:
# Path: lasercut/roundedboxproperties.py
# class RoundedBoxProperties(ObjectProperties):
#
# _allowed = ('nb_face', 'inradius', 'height', 'side_length', 'max_side_length', 'thickness', 'cut')
#
# def __init__(self, **kwargs):
# super(RoundedBoxProperties, self).__init__(**kwargs)
# if not hasattr(self, 'nb_face'):
# self.nb_face = int(5)
# if not hasattr(self, 'inradius'):
# self.inradius = 50.
# if not hasattr(self, 'height'):
# self.height = 30.
# if not hasattr(self, 'thickness'):
# self.thickness = 3.
# if not hasattr(self, 'cut_nb'):
# self.cut = int(0)
#
# self.side_length = 0.
# self.max_side_length = 0.
# self.compute_information(True)
#
# def compute_information(self, update_side_length = False):
# circumradius = self.inradius / math.cos(math.pi / float(self.nb_face))
# self.max_side_length = 2 * circumradius * math.sin(math.pi / float(self.nb_face))
# self.max_side_length = round(self.max_side_length - 0.01, 2)
#
# if update_side_length is True or self.side_length >= self.max_side_length :
# self.side_length = math.floor(self.max_side_length * 0.75)
#
# if self.cut > self.nb_face:
# self.cut = int(self.nb_face)
#
# class TopBottomRoundedProperties(ObjectProperties):
# _allowed = ('position', 'height_shift', 'radius_outside')
#
# POSITION_OUTSIDE = "Outside"
# POSITION_INSIDE = "Inside"
#
# def __init__(self, **kwargs):
# super(TopBottomRoundedProperties, self).__init__(**kwargs)
# if not hasattr(self, 'position'):
# self.position = self.POSITION_INSIDE
# if not hasattr(self, 'height_shift'):
# self.height_shift = 0.
# if not hasattr(self, 'radius_outside'):
# self.radius_outside = 0.
#
# Path: lasercut/makehinges.py
# def do_intersection(seg1, seg2):
# da = seg1.B - seg1.A
# db = seg2.B - seg2.A
# dc = seg2.A - seg1.A
#
# coplanar_val = dc.dot(da.cross(db))
# if not compare_value(coplanar_val, 0.):
# raise ValueError("Not coplanar (seg1: %s, seg2: %s) => res: %s" %(str(seg1), str(seg2), str(coplanar_val)))
#
# a = dc.cross(db).dot(da.cross(db))
# s = dc.cross(db).dot(da.cross(db)) / da.cross(db).dot(da.cross(db))
# if s >= 10e-6:
# da.scale(s, s, s)
# ip = seg1.A.add(da)
# return ip
# else:
# raise ValueError("Wrong scale")
which might include code, classes, or functions. Output only the next line. | intersection_point = do_intersection(first_segment, second_segment) |
Given the following code snippet before the placeholder: <|code_start|>
def assertDictEquals(self, d1, d2):
"""compares two dicts
If the two dict differ, the first difference is shown in the error
message
"""
d1 = d1.copy()
msgs = []
for key, value in list(d2.items()):
try:
if d1[key] != value:
msgs.append('%r != %r for key %r' % (d1[key], value, key))
del d1[key]
except KeyError:
msgs.append('missing %r key' % key)
if d1:
msgs.append('d2 is lacking %r' % d1)
if msgs:
self.fail('\n'.join(msgs))
assertDictEqual = assertDictEquals
def assertSetEquals(self, got, expected, msg=None):
"""compares two iterables and shows difference between both"""
got, expected = list(got), list(expected)
if msg is None:
msg1 = '%s != %s' % (got, expected)
else:
msg1 = msg
self.assertEquals(len(got), len(expected), msg1)
<|code_end|>
, predict the next line using imports from the current file:
from future import standard_library
from builtins import zip
from builtins import str
from builtins import input
from builtins import map
from builtins import *
from builtins import object
from warnings import warn
from compiler.consts import CO_GENERATOR
from test import test_support
from clonedigger.logilab.common.deprecation import class_renamed, deprecated_function, \
obsolete
from clonedigger.logilab.common.compat import set, enumerate, any
from clonedigger.logilab.common.modutils import load_module_from_name
from clonedigger.logilab.common.debugger import Debugger
from clonedigger.logilab.common.decorators import cached
from hotshot import Profile
from hotshot import stats
from io import StringIO
from pprint import pprint
from xml.sax import make_parser, SAXParseException
from clonedigger.logilab.aspects.weaver import weaver
from clonedigger.logilab.aspects.lib.contracts import ContractAspect
import sys
import os, os.path as osp
import re
import time
import getopt
import traceback
import unittest
import difflib
import types
import readline
import getopt
import doctest
and context including class names, function names, and sometimes code from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# def enumerate(iterable):
# """emulates the python2.3 enumerate() function"""
# i = 0
# for val in iterable:
# yield i, val
# i += 1
# #return zip(range(len(iterable)), iterable)
#
# def any(iterable):
# """any(iterable) -> bool
#
# Return True if bool(x) is True for any x in the iterable.
# """
# for elt in iterable:
# if elt:
# return True
# return False
#
# Path: clonedigger/logilab/common/modutils.py
# def load_module_from_name(dotted_name, path=None, use_sys=1):
# """load a Python module from it's name
#
# :type dotted_name: str
# :param dotted_name: python name of a module or package
#
# :type path: list or None
# :param path:
# optional list of path where the module or package should be
# searched (use sys.path if nothing or None is given)
#
# :type use_sys: bool
# :param use_sys:
# boolean indicating whether the sys.modules dictionary should be
# used or not
#
#
# :raise ImportError: if the module or package is not found
#
# :rtype: module
# :return: the loaded module
# """
# return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
. Output only the next line. | got, expected = set(got), set(expected) |
Next line prediction: <|code_start|> type, value = sys.exc_info()[:2]
msg = "test %s crashed -- %s : %s" % (test, type, value)
if verbose:
traceback.print_exc()
return msg
def _count(n, word):
"""format word according to n"""
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
## PostMortem Debug facilities #####
def start_interactive_mode(result):
"""starts an interactive shell so that the user can inspect errors
"""
debuggers = result.debuggers
descrs = result.error_descrs + result.fail_descrs
if len(debuggers) == 1:
# don't ask for test name if there's only one failure
debuggers[0].start()
else:
while True:
testindex = 0
print("Choose a test to debug:")
# order debuggers in the same way than errors were printed
<|code_end|>
. Use current file imports:
(from future import standard_library
from builtins import zip
from builtins import str
from builtins import input
from builtins import map
from builtins import *
from builtins import object
from warnings import warn
from compiler.consts import CO_GENERATOR
from test import test_support
from clonedigger.logilab.common.deprecation import class_renamed, deprecated_function, \
obsolete
from clonedigger.logilab.common.compat import set, enumerate, any
from clonedigger.logilab.common.modutils import load_module_from_name
from clonedigger.logilab.common.debugger import Debugger
from clonedigger.logilab.common.decorators import cached
from hotshot import Profile
from hotshot import stats
from io import StringIO
from pprint import pprint
from xml.sax import make_parser, SAXParseException
from clonedigger.logilab.aspects.weaver import weaver
from clonedigger.logilab.aspects.lib.contracts import ContractAspect
import sys
import os, os.path as osp
import re
import time
import getopt
import traceback
import unittest
import difflib
import types
import readline
import getopt
import doctest)
and context including class names, function names, or small code snippets from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# def enumerate(iterable):
# """emulates the python2.3 enumerate() function"""
# i = 0
# for val in iterable:
# yield i, val
# i += 1
# #return zip(range(len(iterable)), iterable)
#
# def any(iterable):
# """any(iterable) -> bool
#
# Return True if bool(x) is True for any x in the iterable.
# """
# for elt in iterable:
# if elt:
# return True
# return False
#
# Path: clonedigger/logilab/common/modutils.py
# def load_module_from_name(dotted_name, path=None, use_sys=1):
# """load a Python module from it's name
#
# :type dotted_name: str
# :param dotted_name: python name of a module or package
#
# :type path: list or None
# :param path:
# optional list of path where the module or package should be
# searched (use sys.path if nothing or None is given)
#
# :type use_sys: bool
# :param use_sys:
# boolean indicating whether the sys.modules dictionary should be
# used or not
#
#
# :raise ImportError: if the module or package is not found
#
# :rtype: module
# :return: the loaded module
# """
# return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
. Output only the next line. | print("\n".join(['\t%s : %s' % (i, descr) for i, (_, descr) in enumerate(descrs)])) |
Given the following code snippet before the placeholder: <|code_start|> def run(self, result, runcondition=None, options=None):
for test in self._tests:
if result.shouldStop:
break
test(result, runcondition, options)
return result
# python2.3 compat
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
class SkipAwareTextTestRunner(unittest.TextTestRunner):
def __init__(self, stream=sys.stderr, verbosity=1,
exitfirst=False, capture=False, printonly=None,
pdbmode=False, cvg=None, test_pattern=None, skipped_patterns=(),
options=None):
super(SkipAwareTextTestRunner, self).__init__(stream=stream,
verbosity=verbosity)
self.exitfirst = exitfirst
self.capture = capture
self.printonly = printonly
self.pdbmode = pdbmode
self.cvg = cvg
self.test_pattern = test_pattern
self.skipped_patterns = skipped_patterns
self.options = options
def _this_is_skipped(self, testedname):
<|code_end|>
, predict the next line using imports from the current file:
from future import standard_library
from builtins import zip
from builtins import str
from builtins import input
from builtins import map
from builtins import *
from builtins import object
from warnings import warn
from compiler.consts import CO_GENERATOR
from test import test_support
from clonedigger.logilab.common.deprecation import class_renamed, deprecated_function, \
obsolete
from clonedigger.logilab.common.compat import set, enumerate, any
from clonedigger.logilab.common.modutils import load_module_from_name
from clonedigger.logilab.common.debugger import Debugger
from clonedigger.logilab.common.decorators import cached
from hotshot import Profile
from hotshot import stats
from io import StringIO
from pprint import pprint
from xml.sax import make_parser, SAXParseException
from clonedigger.logilab.aspects.weaver import weaver
from clonedigger.logilab.aspects.lib.contracts import ContractAspect
import sys
import os, os.path as osp
import re
import time
import getopt
import traceback
import unittest
import difflib
import types
import readline
import getopt
import doctest
and context including class names, function names, and sometimes code from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# def enumerate(iterable):
# """emulates the python2.3 enumerate() function"""
# i = 0
# for val in iterable:
# yield i, val
# i += 1
# #return zip(range(len(iterable)), iterable)
#
# def any(iterable):
# """any(iterable) -> bool
#
# Return True if bool(x) is True for any x in the iterable.
# """
# for elt in iterable:
# if elt:
# return True
# return False
#
# Path: clonedigger/logilab/common/modutils.py
# def load_module_from_name(dotted_name, path=None, use_sys=1):
# """load a Python module from it's name
#
# :type dotted_name: str
# :param dotted_name: python name of a module or package
#
# :type path: list or None
# :param path:
# optional list of path where the module or package should be
# searched (use sys.path if nothing or None is given)
#
# :type use_sys: bool
# :param use_sys:
# boolean indicating whether the sys.modules dictionary should be
# used or not
#
#
# :raise ImportError: if the module or package is not found
#
# :rtype: module
# :return: the loaded module
# """
# return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
. Output only the next line. | return any([(pat in testedname) for pat in self.skipped_patterns]) |
Predict the next line after this snippet: <|code_start|>
def find_tests(testdir,
prefixes=DEFAULT_PREFIXES, suffix=".py",
excludes=(),
remove_suffix=True):
"""
Return a list of all applicable test modules.
"""
tests = []
for name in os.listdir(testdir):
if not suffix or name.endswith(suffix):
for prefix in prefixes:
if name.startswith(prefix):
if remove_suffix and name.endswith(suffix):
name = name[:-len(suffix)]
if name not in excludes:
tests.append(name)
tests.sort()
return tests
def run_test(test, verbose, runner=None, capture=0):
"""
Run a single test.
test -- the name of the test
verbose -- if true, print more messages
"""
test_support.unload(test)
try:
<|code_end|>
using the current file's imports:
from future import standard_library
from builtins import zip
from builtins import str
from builtins import input
from builtins import map
from builtins import *
from builtins import object
from warnings import warn
from compiler.consts import CO_GENERATOR
from test import test_support
from clonedigger.logilab.common.deprecation import class_renamed, deprecated_function, \
obsolete
from clonedigger.logilab.common.compat import set, enumerate, any
from clonedigger.logilab.common.modutils import load_module_from_name
from clonedigger.logilab.common.debugger import Debugger
from clonedigger.logilab.common.decorators import cached
from hotshot import Profile
from hotshot import stats
from io import StringIO
from pprint import pprint
from xml.sax import make_parser, SAXParseException
from clonedigger.logilab.aspects.weaver import weaver
from clonedigger.logilab.aspects.lib.contracts import ContractAspect
import sys
import os, os.path as osp
import re
import time
import getopt
import traceback
import unittest
import difflib
import types
import readline
import getopt
import doctest
and any relevant context from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# def enumerate(iterable):
# """emulates the python2.3 enumerate() function"""
# i = 0
# for val in iterable:
# yield i, val
# i += 1
# #return zip(range(len(iterable)), iterable)
#
# def any(iterable):
# """any(iterable) -> bool
#
# Return True if bool(x) is True for any x in the iterable.
# """
# for elt in iterable:
# if elt:
# return True
# return False
#
# Path: clonedigger/logilab/common/modutils.py
# def load_module_from_name(dotted_name, path=None, use_sys=1):
# """load a Python module from it's name
#
# :type dotted_name: str
# :param dotted_name: python name of a module or package
#
# :type path: list or None
# :param path:
# optional list of path where the module or package should be
# searched (use sys.path if nothing or None is given)
#
# :type use_sys: bool
# :param use_sys:
# boolean indicating whether the sys.modules dictionary should be
# used or not
#
#
# :raise ImportError: if the module or package is not found
#
# :rtype: module
# :return: the loaded module
# """
# return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
. Output only the next line. | m = load_module_from_name(test, path=sys.path) |
Given snippet: <|code_start|> group = OptionGroup(self._optik_parser,
title=group_name.capitalize())
self._optik_parser.add_option_group(group)
# add provider's specific options
for opt_name, opt_dict in options:
args, opt_dict = self.optik_option(provider, opt_name, opt_dict)
group.add_option(*args, **opt_dict)
self._all_options[opt_name] = provider
def optik_option(self, provider, opt_name, opt_dict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
opt_dict = copy(opt_dict)
if 'action' in opt_dict:
self._nocallback_options[provider] = opt_name
else:
opt_dict['action'] = 'callback'
opt_dict['callback'] = self.cb_set_provider_option
for specific in ('default', 'group', 'inputlevel'):
if specific in opt_dict:
del opt_dict[specific]
if (OPTPARSE_FORMAT_DEFAULT
and specific == 'default' and 'help' in opt_dict):
opt_dict['help'] += ' [current: %default]'
args = ['--' + opt_name]
if 'short' in opt_dict:
self._short_options[opt_dict['short']] = opt_name
args.append('-' + opt_dict['short'])
del opt_dict['short']
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
which might include code, classes, or functions. Output only the next line. | available_keys = set(self._optik_parser.option_class.ATTRS) |
Given the code snippet: <|code_start|> for optname, optdict, value in options:
help = optdict.get('help')
print(':%s:' % optname, file=stream)
if help:
print(normalize_text(help, line_len=79, indent=' '), file=stream)
if value:
print(' Default: %s' % format_option_value(optdict, value), file=stream)
class OptionsManagerMixIn(object):
"""MixIn to handle a configuration from both a configuration file and
command line options
"""
def __init__(self, usage, config_file=None, version=None, quiet=0):
self.config_file = config_file
self.reset_parsers(usage, version=version)
# list of registered options providers
self.options_providers = []
# dictionary assocating option name to checker
self._all_options = {}
self._short_options = {}
self._nocallback_options = {}
# verbosity
self.quiet = quiet
def reset_parsers(self, usage='', version=None):
# configuration file parser
self._config_parser = ConfigParser()
# command line parser
<|code_end|>
, generate the next line using the imports in this file:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context (functions, classes, or occasionally code) from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | self._optik_parser = OptionParser(usage=usage, version=version) |
Given snippet: <|code_start|> assert provider.priority <= 0, "provider's priority can't be >= 0"
for i in range(len(self.options_providers)):
if provider.priority > self.options_providers[i].priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [option for option in provider.options
if 'group' not in option[1]]
groups = getattr(provider, 'option_groups', ())
if own_group:
self.add_option_group(provider.name.upper(), provider.__doc__,
non_group_spec_options, provider)
else:
for opt_name, opt_dict in non_group_spec_options:
args, opt_dict = self.optik_option(provider, opt_name, opt_dict)
self._optik_parser.add_option(*args, **opt_dict)
self._all_options[opt_name] = provider
for gname, gdoc in groups:
goptions = [option for option in provider.options
if option[1].get('group') == gname]
self.add_option_group(gname, gdoc, goptions, provider)
def add_option_group(self, group_name, doc, options, provider):
"""add an option group including the listed options
"""
# add section to the config file
self._config_parser.add_section(group_name)
# add option group to the command line parser
if options:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
which might include code, classes, or functions. Output only the next line. | group = OptionGroup(self._optik_parser, |
Here is a snippet: <|code_start|> return self._optik_parser.format_help()
class Method(object):
"""used to ease late binding of default method (so you can define options
on the class using default methods on the configuration instance)
"""
def __init__(self, methname):
self.method = methname
self._inst = None
def bind(self, instance):
"""bind the method to its instance"""
if self._inst is None:
self._inst = instance
def __call__(self):
assert self._inst, 'unbound method'
return getattr(self._inst, self.method)()
class OptionsProviderMixIn(object):
"""Mixin to provide options to an OptionsManager"""
# those attributes should be overridden
priority = -1
name = 'default'
options = ()
def __init__(self):
<|code_end|>
. Write the next line using the current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
, which may include functions, classes, or code. Output only the next line. | self.config = Values() |
Continue the code snippet: <|code_start|>:copyright: 2003-2008 LOGILAB S.A. (Paris, FRANCE)
:contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org
"""
from __future__ import generators
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
standard_library.install_aliases()
__docformat__ = "restructuredtext en"
__all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn',
'ConfigurationMixIn', 'Configuration',
'OptionsManager2ConfigurationAdapter')
REQUIRED = []
class UnsupportedAction(Exception):
"""raised by set_option when it doesn't know what to do for an action"""
# validation functions ########################################################
def choice_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
if not value in opt_dict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
<|code_end|>
. Use current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context (classes, functions, or code) from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | raise OptionValueError(msg % (name, value, opt_dict['choices'])) |
Given snippet: <|code_start|> raise UnsupportedAction(action)
def input_option(self, option, optdict, inputlevel=99):
default = self.option_default(option, optdict)
if default is REQUIRED:
defaultstr = '(required): '
elif optdict.get('inputlevel', 0) > inputlevel:
self.set_option(option, default, opt_dict=optdict)
return
elif optdict['type'] == 'password' or default is None:
defaultstr = ': '
else:
defaultstr = '(default: %s): ' % format_option_value(optdict, default)
print(':%s:' % option)
print(optdict.get('help') or option)
inputfunc = INPUT_FUNCTIONS[optdict['type']]
value = inputfunc(optdict, defaultstr)
while default is REQUIRED and not value:
print('please specify a value')
value = inputfunc(optdict, '%s: ' % option)
if value is None and default is not None:
value = default
self.set_option(option, value, opt_dict=optdict)
def get_option_def(self, opt_name):
"""return the dictionary defining an option given it's name"""
assert self.options
for opt in self.options:
if opt[0] == opt_name:
return opt[1]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
which might include code, classes, or functions. Output only the next line. | raise OptionError('no such option in section %r' % self.name, opt_name) |
Given the code snippet: <|code_start|> self.generate_config(stream)
def load_config_file(self):
"""dispatch values previously read from a configuration file to each
options provider)
"""
parser = self._config_parser
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
try:
value = parser.get(section, option)
provider.set_option(option, value, opt_dict=optdict)
except (NoSectionError, NoOptionError) as ex:
continue
def load_configuration(self, **kwargs):
"""override configuration according to given parameters
"""
for opt_name, opt_value in list(kwargs.items()):
opt_name = opt_name.replace('_', '-')
provider = self._all_options[opt_name]
provider.set_option(opt_name, opt_value)
def load_command_line_configuration(self, args=None):
"""override configuration according to command line parameters
return additional arguments
"""
# monkey patch optparse to deal with our default values
try:
<|code_end|>
, generate the next line using the imports in this file:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context (functions, classes, or occasionally code) from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | expand_default_backup = HelpFormatter.expand_default |
Here is a snippet: <|code_start|> def global_set_option(self, opt_name, value):
"""set option on the correct option provider"""
self._all_options[opt_name].set_option(opt_name, value)
def generate_config(self, stream=None, skipsections=()):
"""write a configuration file according to the current configuration
into the given stream or stdout
"""
stream = stream or sys.stdout
printed = False
for provider in self.options_providers:
default_options = []
sections = {}
for section, options in provider.options_by_section():
if section in skipsections:
continue
options = [(n, d, v) for (n, d, v) in options
if d.get('type') is not None]
if not options:
continue
if section is None:
section = provider.name
doc = provider.__doc__
else:
doc = None
if printed:
print('\n', file=stream)
format_section(stream, section.upper(), options, doc)
printed = True
<|code_end|>
. Write the next line using the current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
, which may include functions, classes, or code. Output only the next line. | def generate_manpage(self, pkginfo, section=1, stream=None): |
Using the snippet: <|code_start|>
# validation functions ########################################################
def choice_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
if not value in opt_dict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, opt_dict['choices']))
return value
def multiple_choice_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = opt_dict['choices']
values = check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, choices))
return values
def csv_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return check_csv(None, name, value)
def yn_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
<|code_end|>
, determine the next line of code. You have imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context (class names, function names, or code) available:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | return check_yn(None, name, value) |
Given snippet: <|code_start|>from __future__ import division
from __future__ import absolute_import
standard_library.install_aliases()
__docformat__ = "restructuredtext en"
__all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn',
'ConfigurationMixIn', 'Configuration',
'OptionsManager2ConfigurationAdapter')
REQUIRED = []
class UnsupportedAction(Exception):
"""raised by set_option when it doesn't know what to do for an action"""
# validation functions ########################################################
def choice_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
if not value in opt_dict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, opt_dict['choices']))
return value
def multiple_choice_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = opt_dict['choices']
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
which might include code, classes, or functions. Output only the next line. | values = check_csv(None, name, value) |
Predict the next line after this snippet: <|code_start|> return value
def multiple_choice_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = opt_dict['choices']
values = check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, choices))
return values
def csv_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return check_csv(None, name, value)
def yn_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return check_yn(None, name, value)
def named_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'named'
"""
return check_named(None, name, value)
def file_validator(opt_dict, name, value):
"""validate and return a filepath for option of type 'file'"""
<|code_end|>
using the current file's imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and any relevant context from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | return check_file(None, name, value) |
Continue the code snippet: <|code_start|> """
choices = opt_dict['choices']
values = check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, choices))
return values
def csv_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return check_csv(None, name, value)
def yn_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return check_yn(None, name, value)
def named_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'named'
"""
return check_named(None, name, value)
def file_validator(opt_dict, name, value):
"""validate and return a filepath for option of type 'file'"""
return check_file(None, name, value)
def color_validator(opt_dict, name, value):
"""validate and return a valid color for option of type 'color'"""
<|code_end|>
. Use current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context (classes, functions, or code) from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | return check_color(None, name, value) |
Here is a snippet: <|code_start|> """
if not value in opt_dict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, opt_dict['choices']))
return value
def multiple_choice_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = opt_dict['choices']
values = check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, choices))
return values
def csv_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return check_csv(None, name, value)
def yn_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return check_yn(None, name, value)
def named_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'named'
"""
<|code_end|>
. Write the next line using the current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
, which may include functions, classes, or code. Output only the next line. | return check_named(None, name, value) |
Continue the code snippet: <|code_start|> if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise OptionValueError(msg % (name, value, choices))
return values
def csv_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return check_csv(None, name, value)
def yn_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return check_yn(None, name, value)
def named_validator(opt_dict, name, value):
"""validate and return a converted value for option of type 'named'
"""
return check_named(None, name, value)
def file_validator(opt_dict, name, value):
"""validate and return a filepath for option of type 'file'"""
return check_file(None, name, value)
def color_validator(opt_dict, name, value):
"""validate and return a valid color for option of type 'color'"""
return check_color(None, name, value)
def password_validator(opt_dict, name, value):
"""validate and return a string for option of type 'password'"""
<|code_end|>
. Use current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context (classes, functions, or code) from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | return check_password(None, name, value) |
Next line prediction: <|code_start|> except OptionValueError as ex:
msg = str(ex).split(':', 1)[-1].strip()
print('bad value: %s' % msg)
return input_validator
INPUT_FUNCTIONS = {
'string': input_string,
'password': input_password,
}
for opttype in list(VALIDATORS.keys()):
INPUT_FUNCTIONS.setdefault(opttype, _make_input_function(opttype))
def expand_default(self, option):
"""monkey patch OptionParser.expand_default since we have a particular
way to handle defaults to avoid overriding values in the configuration
file
"""
if self.parser is None or not self.default_tag:
return option.help
optname = option._long_opts[0][2:]
try:
provider = self.parser.options_manager._all_options[optname]
except KeyError:
value = None
else:
optdict = provider.get_option_def(optname)
optname = provider.option_name(optname, optdict)
value = getattr(provider.config, optname, optdict)
value = format_option_value(optdict, value)
<|code_end|>
. Use current file imports:
(from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re)
and context including class names, function names, or small code snippets from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
. Output only the next line. | if value is NO_DEFAULT or not value: |
Predict the next line for this snippet: <|code_start|>
def add_option_group(self, group_name, doc, options, provider):
"""add an option group including the listed options
"""
# add section to the config file
self._config_parser.add_section(group_name)
# add option group to the command line parser
if options:
group = OptionGroup(self._optik_parser,
title=group_name.capitalize())
self._optik_parser.add_option_group(group)
# add provider's specific options
for opt_name, opt_dict in options:
args, opt_dict = self.optik_option(provider, opt_name, opt_dict)
group.add_option(*args, **opt_dict)
self._all_options[opt_name] = provider
def optik_option(self, provider, opt_name, opt_dict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
opt_dict = copy(opt_dict)
if 'action' in opt_dict:
self._nocallback_options[provider] = opt_name
else:
opt_dict['action'] = 'callback'
opt_dict['callback'] = self.cb_set_provider_option
for specific in ('default', 'group', 'inputlevel'):
if specific in opt_dict:
del opt_dict[specific]
<|code_end|>
with the help of current file imports:
from future import standard_library
from builtins import input
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from os.path import exists
from copy import copy
from configparser import ConfigParser, NoOptionError, NoSectionError
from clonedigger.logilab.common.compat import set
from clonedigger.logilab.common.textutils import normalize_text, unquote
from clonedigger.logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
OptionValueError, OptionError, HelpFormatter, generate_manpage, check_date, \
check_yn, check_csv, check_file, check_color, check_named, check_password,\
NO_DEFAULT, OPTPARSE_FORMAT_DEFAULT
from getpass import getpass
import os
import sys
import re
and context from other files:
# Path: clonedigger/logilab/common/compat.py
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
#
# Path: clonedigger/logilab/common/optik_ext.py
# NO_DEFAULT = []
# HAS_MX_DATETIME = True
# HAS_MX_DATETIME = False
# OPTPARSE_FORMAT_DEFAULT = sys.version_info >= (2, 4)
# TYPES = BaseOption.TYPES + ('regexp', 'csv', 'yn', 'named', 'password',
# 'multiple_choice', 'file', 'font', 'color')
# TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
# def check_regexp(option, opt, value):
# def check_csv(option, opt, value):
# def check_yn(option, opt, value):
# def check_named(option, opt, value):
# def check_password(option, opt, value):
# def check_file(option, opt, value):
# def check_date(option, opt, value):
# def check_color(option, opt, value):
# def _check_choice(self):
# def process(self, opt, value, values, parser):
# def __init__(self, option_class=Option, *args, **kwargs):
# def __init__ (self,
# indent_increment=0,
# max_help_position=24,
# width=79,
# short_first=0):
# def format_heading(self, heading):
# def format_description(self, description):
# def format_option(self, option):
# def format_head(self, optparser, pkginfo, section=1):
# def format_title(self, pgm, section):
# def format_short_description(self, pgm, short_desc):
# def format_synopsis(self, pgm):
# def format_long_description(self, pgm, long_desc):
# def format_tail(self, pkginfo):
# def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
# class Option(BaseOption):
# class OptionParser(BaseParser):
# class ManHelpFormatter(HelpFormatter):
, which may contain function names, class names, or code. Output only the next line. | if (OPTPARSE_FORMAT_DEFAULT |
Given the code snippet: <|code_start|> self.row_names.insert(index, row_name)
self.data.insert(index, row_data)
def delete_row(self, index):
"""Deletes the 'index' row in the table, and returns it.
Raises an IndexError if index is out of range
"""
self.row_names.pop(index)
return self.data.pop(index)
def delete_row_by_id(self, row_id):
"""Deletes the 'row_id' row in the table.
Raises a KeyError if row_id was not found.
"""
try:
row_index = self.row_names.index(row_id)
self.delete_row(row_index)
except ValueError:
raise KeyError('Row (%s) not found in table' % (row_id))
def set_column(self, col_index, col_data):
"""sets the 'col_index' column
pre:
type(col_data) == types.ListType
len(col_data) == len(self.row_names)
"""
<|code_end|>
, generate the next line using the imports in this file:
from future import standard_library
from builtins import zip
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from warnings import warn
from clonedigger.logilab.common.compat import enumerate, sum, set
import re
and context (functions, classes, or occasionally code) from other files:
# Path: clonedigger/logilab/common/compat.py
# def enumerate(iterable):
# """emulates the python2.3 enumerate() function"""
# i = 0
# for val in iterable:
# yield i, val
# i += 1
# #return zip(range(len(iterable)), iterable)
#
# def sum(seq, start=0):
# """Returns the sum of all elements in the sequence"""
# return reduce(operator.add, seq, start)
#
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
. Output only the next line. | for row_index, cell_data in enumerate(col_data): |
Based on the snippet: <|code_start|> col_index = self.col_names.index(col_id)
except ValueError:
raise KeyError("Column (%s) not found in table" % (col_id))
return self.data[row_index][col_index]
def get_row(self, row_index):
"""Returns the 'row_index' row
"""
warn('table.get_row(i) is deprecated, use table[i] instead',
DeprecationWarning, stacklevel=2)
return self.data[row_index]
def get_row_by_id(self, row_id):
"""Returns the 'row_id' row
"""
#warn('table.get_row_by_id(i) is deprecated, use table[i] instead',
# DeprecationWarning, stacklevel=2)
try:
row_index = self.row_names.index(row_id)
except ValueError:
raise KeyError("Row (%s) not found in table" % (row_id))
return self.data[row_index]
def get_column(self, col_index, distinct=False):
"""Returns the 'col_index' col
"""
warn('table.get_column(i) is deprecated, use table[:,i] instead',
DeprecationWarning, stacklevel=2)
col = [row[col_index] for row in self.data]
if distinct:
<|code_end|>
, predict the immediate next line with the help of imports:
from future import standard_library
from builtins import zip
from builtins import str
from builtins import range
from builtins import *
from builtins import object
from warnings import warn
from clonedigger.logilab.common.compat import enumerate, sum, set
import re
and context (classes, functions, sometimes code) from other files:
# Path: clonedigger/logilab/common/compat.py
# def enumerate(iterable):
# """emulates the python2.3 enumerate() function"""
# i = 0
# for val in iterable:
# yield i, val
# i += 1
# #return zip(range(len(iterable)), iterable)
#
# def sum(seq, start=0):
# """Returns the sum of all elements in the sequence"""
# return reduce(operator.add, seq, start)
#
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
. Output only the next line. | return set(col) |
Given the code snippet: <|code_start|> if ext in ('.py', '.pyc', '.pyo') and exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
node = build_module(modname or module.__name__, module.__doc__)
node.file = node.path = path and abspath(path) or path
if self._manager is not None:
self._manager._cache[node.file] = self._manager._cache[node.name] = node
node.package = hasattr(module, '__path__')
attach___dict__(node)
self._done = {}
self.object_build(node, module)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
<|code_end|>
, generate the next line using the imports in this file:
from future import standard_library
from builtins import *
from builtins import object
from os.path import splitext, basename, dirname, exists, abspath
from parser import ParserError
from compiler import parse
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from clonedigger.logilab.common.fileutils import norm_read
from clonedigger.logilab.common.modutils import modpath_from_file
from clonedigger.logilab.astng import nodes, YES, Instance
from clonedigger.logilab.astng.utils import ASTWalker
from clonedigger.logilab.astng._exceptions import ASTNGBuildingException, InferenceError
from clonedigger.logilab.astng.raw_building import *
from clonedigger.logilab.astng.astutils import cvrtr
from compiler import transformer, consts
from types import TupleType
from clonedigger.logilab.astng import MANAGER as manager
import sys
import token
and context (functions, classes, or occasionally code) from other files:
# Path: clonedigger/logilab/common/fileutils.py
# def norm_read(path):
# """return the content of the file with normalized line feeds
#
# :type path: str
# :param path: path to the file to read
#
# :rtype: str
# :return: the content of the file with normalized line feeds
# """
# if _HAS_UNIV_OPEN:
# return open(path, 'U').read()
# return _LINE_RGX.sub('\n', open(path).read())
#
# Path: clonedigger/logilab/common/modutils.py
# def modpath_from_file(filename):
# """given a file path return the corresponding splitted module's name
# (i.e name of a module or package splitted on '.')
#
# :type filename: str
# :param filename: file's path for which we want the module's name
#
#
# :raise ImportError:
# if the corresponding module's name has not been found
#
# :rtype: list(str)
# :return: the corresponding splitted module's name
# """
# base = splitext(abspath(filename))[0]
# for path in sys.path:
# path = abspath(path)
# if path and base[:len(path)] == path:
# if filename.find('site-packages') != -1 and \
# path.find('site-packages') == -1:
# continue
# mod_path = [module for module in base[len(path):].split(os.sep)
# if module]
# for part in mod_path[:-1]:
# path = join(path, part)
# if not _has_init(path):
# break
# else:
# break
# else:
# raise ImportError('Unable to find module for %s in %s' % (
# filename, ', \n'.join(sys.path)))
# return mod_path
#
# Path: clonedigger/logilab/astng/_exceptions.py
# class ASTNGBuildingException(ASTNGError):
# """exception class when we are not able to build an astng representation"""
#
# class InferenceError(ResolveError):
# """raised when we are unabled to infer a node"""
#
# Path: clonedigger/logilab/astng/astutils.py
# def cvrtr(tuple):
# """debug method returning an ast string in a readable fashion"""
# if type(tuple) is TupleType:
# try:
# try:
# txt = 'token.'+token.tok_name[tuple[0]]
# except:
# txt = 'symbol.'+symbol.sym_name[tuple[0]]
# except:
# txt = 'Unknown token/symbol'
# return [txt] + list(map(cvrtr, tuple[1:]))
# else:
# return tuple
. Output only the next line. | data = norm_read(path) |
Here is a snippet: <|code_start|> def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
node = build_module(modname or module.__name__, module.__doc__)
node.file = node.path = path and abspath(path) or path
if self._manager is not None:
self._manager._cache[node.file] = self._manager._cache[node.name] = node
node.package = hasattr(module, '__path__')
attach___dict__(node)
self._done = {}
self.object_build(node, module)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
data = norm_read(path)
except IOError as ex:
msg = 'Unable to load file %r (%s)' % (path, ex)
raise ASTNGBuildingException(msg)
self._file = path
# get module name if necessary, *before modifying sys.path*
if modname is None:
try:
<|code_end|>
. Write the next line using the current file imports:
from future import standard_library
from builtins import *
from builtins import object
from os.path import splitext, basename, dirname, exists, abspath
from parser import ParserError
from compiler import parse
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from clonedigger.logilab.common.fileutils import norm_read
from clonedigger.logilab.common.modutils import modpath_from_file
from clonedigger.logilab.astng import nodes, YES, Instance
from clonedigger.logilab.astng.utils import ASTWalker
from clonedigger.logilab.astng._exceptions import ASTNGBuildingException, InferenceError
from clonedigger.logilab.astng.raw_building import *
from clonedigger.logilab.astng.astutils import cvrtr
from compiler import transformer, consts
from types import TupleType
from clonedigger.logilab.astng import MANAGER as manager
import sys
import token
and context from other files:
# Path: clonedigger/logilab/common/fileutils.py
# def norm_read(path):
# """return the content of the file with normalized line feeds
#
# :type path: str
# :param path: path to the file to read
#
# :rtype: str
# :return: the content of the file with normalized line feeds
# """
# if _HAS_UNIV_OPEN:
# return open(path, 'U').read()
# return _LINE_RGX.sub('\n', open(path).read())
#
# Path: clonedigger/logilab/common/modutils.py
# def modpath_from_file(filename):
# """given a file path return the corresponding splitted module's name
# (i.e name of a module or package splitted on '.')
#
# :type filename: str
# :param filename: file's path for which we want the module's name
#
#
# :raise ImportError:
# if the corresponding module's name has not been found
#
# :rtype: list(str)
# :return: the corresponding splitted module's name
# """
# base = splitext(abspath(filename))[0]
# for path in sys.path:
# path = abspath(path)
# if path and base[:len(path)] == path:
# if filename.find('site-packages') != -1 and \
# path.find('site-packages') == -1:
# continue
# mod_path = [module for module in base[len(path):].split(os.sep)
# if module]
# for part in mod_path[:-1]:
# path = join(path, part)
# if not _has_init(path):
# break
# else:
# break
# else:
# raise ImportError('Unable to find module for %s in %s' % (
# filename, ', \n'.join(sys.path)))
# return mod_path
#
# Path: clonedigger/logilab/astng/_exceptions.py
# class ASTNGBuildingException(ASTNGError):
# """exception class when we are not able to build an astng representation"""
#
# class InferenceError(ResolveError):
# """raised when we are unabled to infer a node"""
#
# Path: clonedigger/logilab/astng/astutils.py
# def cvrtr(tuple):
# """debug method returning an ast string in a readable fashion"""
# if type(tuple) is TupleType:
# try:
# try:
# txt = 'token.'+token.tok_name[tuple[0]]
# except:
# txt = 'symbol.'+symbol.sym_name[tuple[0]]
# except:
# txt = 'Unknown token/symbol'
# return [txt] + list(map(cvrtr, tuple[1:]))
# else:
# return tuple
, which may include functions, classes, or code. Output only the next line. | modname = '.'.join(modpath_from_file(path)) |
Given snippet: <|code_start|> # this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
node = build_module(modname or module.__name__, module.__doc__)
node.file = node.path = path and abspath(path) or path
if self._manager is not None:
self._manager._cache[node.file] = self._manager._cache[node.name] = node
node.package = hasattr(module, '__path__')
attach___dict__(node)
self._done = {}
self.object_build(node, module)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
data = norm_read(path)
except IOError as ex:
msg = 'Unable to load file %r (%s)' % (path, ex)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from future import standard_library
from builtins import *
from builtins import object
from os.path import splitext, basename, dirname, exists, abspath
from parser import ParserError
from compiler import parse
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from clonedigger.logilab.common.fileutils import norm_read
from clonedigger.logilab.common.modutils import modpath_from_file
from clonedigger.logilab.astng import nodes, YES, Instance
from clonedigger.logilab.astng.utils import ASTWalker
from clonedigger.logilab.astng._exceptions import ASTNGBuildingException, InferenceError
from clonedigger.logilab.astng.raw_building import *
from clonedigger.logilab.astng.astutils import cvrtr
from compiler import transformer, consts
from types import TupleType
from clonedigger.logilab.astng import MANAGER as manager
import sys
import token
and context:
# Path: clonedigger/logilab/common/fileutils.py
# def norm_read(path):
# """return the content of the file with normalized line feeds
#
# :type path: str
# :param path: path to the file to read
#
# :rtype: str
# :return: the content of the file with normalized line feeds
# """
# if _HAS_UNIV_OPEN:
# return open(path, 'U').read()
# return _LINE_RGX.sub('\n', open(path).read())
#
# Path: clonedigger/logilab/common/modutils.py
# def modpath_from_file(filename):
# """given a file path return the corresponding splitted module's name
# (i.e name of a module or package splitted on '.')
#
# :type filename: str
# :param filename: file's path for which we want the module's name
#
#
# :raise ImportError:
# if the corresponding module's name has not been found
#
# :rtype: list(str)
# :return: the corresponding splitted module's name
# """
# base = splitext(abspath(filename))[0]
# for path in sys.path:
# path = abspath(path)
# if path and base[:len(path)] == path:
# if filename.find('site-packages') != -1 and \
# path.find('site-packages') == -1:
# continue
# mod_path = [module for module in base[len(path):].split(os.sep)
# if module]
# for part in mod_path[:-1]:
# path = join(path, part)
# if not _has_init(path):
# break
# else:
# break
# else:
# raise ImportError('Unable to find module for %s in %s' % (
# filename, ', \n'.join(sys.path)))
# return mod_path
#
# Path: clonedigger/logilab/astng/_exceptions.py
# class ASTNGBuildingException(ASTNGError):
# """exception class when we are not able to build an astng representation"""
#
# class InferenceError(ResolveError):
# """raised when we are unabled to infer a node"""
#
# Path: clonedigger/logilab/astng/astutils.py
# def cvrtr(tuple):
# """debug method returning an ast string in a readable fashion"""
# if type(tuple) is TupleType:
# try:
# try:
# txt = 'token.'+token.tok_name[tuple[0]]
# except:
# txt = 'symbol.'+symbol.sym_name[tuple[0]]
# except:
# txt = 'Unknown token/symbol'
# return [txt] + list(map(cvrtr, tuple[1:]))
# else:
# return tuple
which might include code, classes, or functions. Output only the next line. | raise ASTNGBuildingException(msg) |
Given snippet: <|code_start|> self.visit_default(node)
self._delayed.append(node)
def delayed_visit_assattr(self, node):
"""visit a stmt.AssAttr node -> add name to locals, handle members
definition
"""
try:
frame = node.frame()
for infered in node.expr.infer():
if infered is YES:
continue
try:
if infered.__class__ is Instance:
infered = infered._proxied
iattrs = infered.instance_attrs
else:
iattrs = infered.locals
except AttributeError:
continue
values = iattrs.setdefault(node.attrname, [])
if node in values:
continue
# get assign in __init__ first XXX useful ?
if frame.name == '__init__' and values and not \
values[0].frame().name == '__init__':
values.insert(0, node)
else:
values.append(node)
#print node.attrname, infered, values
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from future import standard_library
from builtins import *
from builtins import object
from os.path import splitext, basename, dirname, exists, abspath
from parser import ParserError
from compiler import parse
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from clonedigger.logilab.common.fileutils import norm_read
from clonedigger.logilab.common.modutils import modpath_from_file
from clonedigger.logilab.astng import nodes, YES, Instance
from clonedigger.logilab.astng.utils import ASTWalker
from clonedigger.logilab.astng._exceptions import ASTNGBuildingException, InferenceError
from clonedigger.logilab.astng.raw_building import *
from clonedigger.logilab.astng.astutils import cvrtr
from compiler import transformer, consts
from types import TupleType
from clonedigger.logilab.astng import MANAGER as manager
import sys
import token
and context:
# Path: clonedigger/logilab/common/fileutils.py
# def norm_read(path):
# """return the content of the file with normalized line feeds
#
# :type path: str
# :param path: path to the file to read
#
# :rtype: str
# :return: the content of the file with normalized line feeds
# """
# if _HAS_UNIV_OPEN:
# return open(path, 'U').read()
# return _LINE_RGX.sub('\n', open(path).read())
#
# Path: clonedigger/logilab/common/modutils.py
# def modpath_from_file(filename):
# """given a file path return the corresponding splitted module's name
# (i.e name of a module or package splitted on '.')
#
# :type filename: str
# :param filename: file's path for which we want the module's name
#
#
# :raise ImportError:
# if the corresponding module's name has not been found
#
# :rtype: list(str)
# :return: the corresponding splitted module's name
# """
# base = splitext(abspath(filename))[0]
# for path in sys.path:
# path = abspath(path)
# if path and base[:len(path)] == path:
# if filename.find('site-packages') != -1 and \
# path.find('site-packages') == -1:
# continue
# mod_path = [module for module in base[len(path):].split(os.sep)
# if module]
# for part in mod_path[:-1]:
# path = join(path, part)
# if not _has_init(path):
# break
# else:
# break
# else:
# raise ImportError('Unable to find module for %s in %s' % (
# filename, ', \n'.join(sys.path)))
# return mod_path
#
# Path: clonedigger/logilab/astng/_exceptions.py
# class ASTNGBuildingException(ASTNGError):
# """exception class when we are not able to build an astng representation"""
#
# class InferenceError(ResolveError):
# """raised when we are unabled to infer a node"""
#
# Path: clonedigger/logilab/astng/astutils.py
# def cvrtr(tuple):
# """debug method returning an ast string in a readable fashion"""
# if type(tuple) is TupleType:
# try:
# try:
# txt = 'token.'+token.tok_name[tuple[0]]
# except:
# txt = 'symbol.'+symbol.sym_name[tuple[0]]
# except:
# txt = 'Unknown token/symbol'
# return [txt] + list(map(cvrtr, tuple[1:]))
# else:
# return tuple
which might include code, classes, or functions. Output only the next line. | except InferenceError: |
Predict the next line for this snippet: <|code_start|> # 2. first argument of instance/class method
if argindex == 0 and funcnode.type in ('method', 'classmethod'):
if context.boundnode is not None:
boundnode = context.boundnode
else:
# XXX can do better ?
boundnode = funcnode.parent.frame()
if funcnode.type == 'method':
return iter((Instance(boundnode),))
if funcnode.type == 'classmethod':
return iter((boundnode,))
# 2. search arg index
try:
return self.args[argindex].infer(context)
except IndexError:
pass
# 3. search in *args (.starargs)
if self.starargs is not None:
its = []
for infered in self.starargs.infer(context):
if infered is YES:
its.append((YES,))
continue
try:
its.append(infered.getitem(argindex).infer(context))
except (InferenceError, AttributeError):
its.append((YES,))
except IndexError:
continue
if its:
<|code_end|>
with the help of current file imports:
from future import standard_library
from builtins import next
from builtins import range
from builtins import *
from past.utils import old_div
from builtins import object
from copy import copy
from clonedigger.logilab.common.compat import imap, chain, set
from clonedigger.logilab.astng import MANAGER, YES, InferenceContext, Instance, Generator, \
unpack_infer, _infer_stmts, nodes, copy_context
from clonedigger.logilab.astng import ASTNGError, InferenceError, UnresolvableName, \
NoDefault, NotFoundError, ASTNGBuildingException
and context from other files:
# Path: clonedigger/logilab/common/compat.py
# def imap(function, *iterables):
# iterables = list(map(iter, iterables))
# while True:
# args = [next(i) for i in iterables]
# if function is None:
# yield tuple(args)
# else:
# yield function(*args)
#
# def chain(*iterables):
# for it in iterables:
# for element in it:
# yield element
#
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
, which may contain function names, class names, or code. Output only the next line. | return chain(*its) |
Given the following code snippet before the placeholder: <|code_start|># 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""this module contains a set of functions to handle inference on astng trees
:author: Sylvain Thenault
:copyright: 2003-2008 LOGILAB S.A. (Paris, FRANCE)
:contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org
:copyright: 2003-2008 Sylvain Thenault
:contact: mailto:thenault@gmail.com
"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
standard_library.install_aliases()
from __future__ import generators
__doctype__ = "restructuredtext en"
def path_wrapper(func):
"""return the given infer function wrapped to handle the path"""
def wrapped(node, context=None, _func=func, **kwargs):
"""wrapper function handling context"""
if context is None:
context = InferenceContext(node)
context.push(node)
<|code_end|>
, predict the next line using imports from the current file:
from future import standard_library
from builtins import next
from builtins import range
from builtins import *
from past.utils import old_div
from builtins import object
from copy import copy
from clonedigger.logilab.common.compat import imap, chain, set
from clonedigger.logilab.astng import MANAGER, YES, InferenceContext, Instance, Generator, \
unpack_infer, _infer_stmts, nodes, copy_context
from clonedigger.logilab.astng import ASTNGError, InferenceError, UnresolvableName, \
NoDefault, NotFoundError, ASTNGBuildingException
and context including class names, function names, and sometimes code from other files:
# Path: clonedigger/logilab/common/compat.py
# def imap(function, *iterables):
# iterables = list(map(iter, iterables))
# while True:
# args = [next(i) for i in iterables]
# if function is None:
# yield tuple(args)
# else:
# yield function(*args)
#
# def chain(*iterables):
# for it in iterables:
# for element in it:
# yield element
#
# class set(_baseset):
# """mutable set"""
# def add(self, value):
# self._data[value] = 1
#
# def remove(self, element):
# """removes <element> from set"""
# del self._data[element]
#
# def pop(self):
# """pops an arbitrary element from set"""
# return self._data.popitem()[0]
#
# def __hash__(self):
# """mutable et cannot be hashed."""
# raise TypeError("set objects are not hashable")
. Output only the next line. | yielded = set() |
Predict the next line after this snippet: <|code_start|> elif parse('U({:f},{:f})', L_cov_21.replace(' ', '')) is not None:
return True
else:
raise ValueError("Invalid value of L_cov_21: %s" % L_cov_21)
def _get_L_cov(L_cov_21, floatX, Uniform, tt):
if type(L_cov_21) in (float, int):
return np.array([[1.0, L_cov_21],
[L_cov_21, 1.0]]).astype(floatX)
elif _is_uniform(L_cov_21):
r = parse('U({:f},{:f})', L_cov_21.replace(' ', ''))
L_cov_21_ = Uniform('L_cov_21_', lower=r[0], upper=r[1])
return tt.stack([1.0, L_cov_21_, L_cov_21_, 1.0]).reshape((2, 2))
def _noise_model(hparams, h1, h2, xs_, Laplace, floatX, gamma):
u"""Distribution of observation noise.
"""
# ---- Noise model ----
if hparams['dist_noise'] == 'laplace':
obs1 = lambda mu: Laplace(
'x1s', mu=mu, b=h1 / np.float32(np.sqrt(2.)),
observed=xs_[:, 0], dtype=floatX
)
obs2 = lambda mu: Laplace(
'x2s', mu=mu, b=h2 / np.float32(np.sqrt(2.)),
observed=xs_[:, 1], dtype=floatX
)
elif hparams['dist_noise'] == 'gg':
beta = hparams['beta_noise']
<|code_end|>
using the current file's imports:
import numpy as np
import theano.tensor as tt
from parse import parse
from bmlingam.prob import GeneralizedGaussian
from bmlingam.utils import standardize_samples
from pymc3 import Metropolis, sample
from pymc3 import Normal, Laplace, StudentT, Model, HalfNormal, \
Deterministic, Gamma, Lognormal, Uniform
from scipy.special import gamma
from theano.tensor import sgn
from theano import config
from theano.tensor.slinalg import cholesky
and any relevant context from other files:
# Path: bmlingam/prob.py
# class GeneralizedGaussian(Continuous):
# def __init__(self, mu=0.0, beta=None, cov=None, *args, **kwargs):
# super(GeneralizedGaussian, self).__init__(*args, **kwargs)
# # assert(mu.shape[0] == cov.shape[0] == cov.shape[1])
# dim = mu.shape[0]
#
# self.mu = mu
# self.beta = beta
# self.prec = tt.nlinalg.pinv(cov)
# # self.k = (dim * tt.gamma(dim / 2.0)) / \
# # ((np.pi**(dim / 2.0)) * tt.gamma(1 + dim / (2 * beta)) * (2**(1 + dim / (2 * beta))))
# self.logk = tt.log(dim) + tt.gammaln(dim / 2.0) - \
# (dim / 2.0) * tt.log(np.pi) - \
# tt.gammaln(1 + dim / (2 * beta)) - \
# (1 + dim / (2 * beta)) * tt.log(2.0)
#
# def logp(self, value):
# x = value - self.mu
#
# if x.tag.test_value.ndim == 1:
# xpx = tt.sum(x * self.prec * x)
# normalize = tt.log(self.prec)
# else:
# (x.dot(self.prec) * x).sum(axis=x.ndim - 1)
# # normalize = tt.log(tt.nlinalg.Det(self.prec))
# normalize = logabsdet(self.prec)
#
# # return tt.log(self.k) + 0.5 * normalize - 0.5 * xpx
# return self.logk + 0.5 * normalize - 0.5 * xpx
#
# Path: bmlingam/utils/standardize_samples.py
# def standardize_samples(xs, standardize):
# xs = deepcopy(xs)
# if standardize == 'keepratio':
# s = np.std(xs)
# xs = (xs - np.mean(xs, axis=0)) / s
# elif standardize == 'scaling':
# xs = xs / np.std(xs, axis=0)
# elif standardize == 'commonscaling':
# xs = xs / np.std(xs)
# elif standardize is True:
# xs = (xs - np.mean(xs, axis=0)) / np.std(xs, axis=0)
# elif standardize is False:
# xs = xs
# else:
# raise ValueError("Invalid value of standardize: %s" % standardize)
#
# return xs
. Output only the next line. | obs1 = lambda mu: GeneralizedGaussian( |
Given the code snippet: <|code_start|> n_burn=10000, # Samples in burn-in period
n_mcmc_samples=10000, # Samples in MCMC (after burn-in)
seed_burn=1, # Random seed for burn-in period
seed=2 # Random seed for MCMC
)
trace = do_mcmc_bmlingam(data['xs'], hparams, mcmc_params)
b_post = np.mean(trace['b'])
:code:`xs` is the numpy.ndarray containing samples.
:param xs: Data array.
:type xs: numpy.ndarray, shape=(n_samples, 2)
:code:`hparams` is a dict including hyperparameters.
See :func:`bmlingam.hparam.define_hparam_searchspace`.
:param hparams: Set of hyperparameters.
:type hparams: dict
:code:`mcmc_params` includes parameters for MCMC.
:param mcmc_params: Parameters for MCMC.
:type mcmc_params: :class:`bmlingam.MCMCParams`
"""
assert(type(mcmc_params) == MCMCParams)
# ---- Import PyMC3 modules when required ----
# ---- Standardization ----
scale_ratio = np.std(xs[:, 1]) / np.std(xs[:, 0])
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import theano.tensor as tt
from parse import parse
from bmlingam.prob import GeneralizedGaussian
from bmlingam.utils import standardize_samples
from pymc3 import Metropolis, sample
from pymc3 import Normal, Laplace, StudentT, Model, HalfNormal, \
Deterministic, Gamma, Lognormal, Uniform
from scipy.special import gamma
from theano.tensor import sgn
from theano import config
from theano.tensor.slinalg import cholesky
and context (functions, classes, or occasionally code) from other files:
# Path: bmlingam/prob.py
# class GeneralizedGaussian(Continuous):
# def __init__(self, mu=0.0, beta=None, cov=None, *args, **kwargs):
# super(GeneralizedGaussian, self).__init__(*args, **kwargs)
# # assert(mu.shape[0] == cov.shape[0] == cov.shape[1])
# dim = mu.shape[0]
#
# self.mu = mu
# self.beta = beta
# self.prec = tt.nlinalg.pinv(cov)
# # self.k = (dim * tt.gamma(dim / 2.0)) / \
# # ((np.pi**(dim / 2.0)) * tt.gamma(1 + dim / (2 * beta)) * (2**(1 + dim / (2 * beta))))
# self.logk = tt.log(dim) + tt.gammaln(dim / 2.0) - \
# (dim / 2.0) * tt.log(np.pi) - \
# tt.gammaln(1 + dim / (2 * beta)) - \
# (1 + dim / (2 * beta)) * tt.log(2.0)
#
# def logp(self, value):
# x = value - self.mu
#
# if x.tag.test_value.ndim == 1:
# xpx = tt.sum(x * self.prec * x)
# normalize = tt.log(self.prec)
# else:
# (x.dot(self.prec) * x).sum(axis=x.ndim - 1)
# # normalize = tt.log(tt.nlinalg.Det(self.prec))
# normalize = logabsdet(self.prec)
#
# # return tt.log(self.k) + 0.5 * normalize - 0.5 * xpx
# return self.logk + 0.5 * normalize - 0.5 * xpx
#
# Path: bmlingam/utils/standardize_samples.py
# def standardize_samples(xs, standardize):
# xs = deepcopy(xs)
# if standardize == 'keepratio':
# s = np.std(xs)
# xs = (xs - np.mean(xs, axis=0)) / s
# elif standardize == 'scaling':
# xs = xs / np.std(xs, axis=0)
# elif standardize == 'commonscaling':
# xs = xs / np.std(xs)
# elif standardize is True:
# xs = (xs - np.mean(xs, axis=0)) / np.std(xs, axis=0)
# elif standardize is False:
# xs = xs
# else:
# raise ValueError("Invalid value of standardize: %s" % standardize)
#
# return xs
. Output only the next line. | xs = standardize_samples(xs, hparams['standardize']) |
Given snippet: <|code_start|>
:param infer_params_names: Name of inference parameters.
:type infer_params_names: str or list of str
:param mcmc_params_names: Name of MCMC parameters.
:type mcmc_params_names: str or list of str
:param gen_data_params_names: Name of data generation parameters.
:type gen_data_params_names: str or list of str
See get_infer_paramss(), get_mcmc_paramss() and get_data_gen_paramss().
"""
infer_paramss = _get_infer_paramss()
mcmc_paramss = _get_mcmc_paramss()
gen_data_paramss = _get_gen_data_paramss()
infer_params_names_ = _wrap_list(infer_params_names)
mcmc_params_names_ = _wrap_list(mcmc_params_names)
gen_data_params_names_ = _wrap_list(gen_data_params_names)
for ip in infer_params_names_:
infer_params = infer_paramss[ip]
for mp in mcmc_params_names_:
mcmc_params = mcmc_paramss[mp]
for gp in gen_data_params_names_:
gen_data_params = gen_data_paramss[gp]
print('Test condition')
print(' infer_params_name = {}'.format(ip))
print(' mcmc_params_name = {}'.format(mp))
print(' gen_data_params_name = {}\n'.format(gp))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from copy import deepcopy
from bmlingam.tests.test_pm3 import test_posterior_inference, \
infer_params_default, \
mcmc_params_default, \
gen_data_params_default
import six
and context:
# Path: bmlingam/tests/test_pm3.py
# def _gen_data(i, gen_data_params):
# def _estimate_hparams(xs, infer_params):
# def _mainloop(n_trials, gen_data_params, infer_params, mcmc_params):
# def test_posterior_inference(
# infer_params=None, mcmc_params=None, gen_data_params=None,
# n_trials=50, plot_result=False, plot_samples_all=False, show_result=False,
# show_result_all=False):
which might include code, classes, or functions. Output only the next line. | test_posterior_inference(
|
Using the snippet: <|code_start|> return [a]
def _get_test_conditions():
"""Get a list of test conditions.
Each test condition consists of parameters for causal inference,
estimation of regression coefficient via MCMC, and artificial data
generation.
"""
infer_paramss = _get_infer_paramss()
mcmc_paramss = _get_mcmc_paramss()
gen_data_paramss = _get_gen_data_paramss()
test_conds = [
{
'infer_params_name': infer_params_name,
'mcmc_params_name': mcmc_params_name,
'gen_data_params_name': gen_data_params_name,
'infer_params': infer_params,
'mcmc_params': mcmc_params,
'gen_data_params': gen_data_params,
}
for infer_params_name, infer_params in six.iteritems(infer_paramss)
for mcmc_params_name, mcmc_params in six.iteritems(mcmc_paramss)
for gen_data_params_name, gen_data_params in six.iteritems(gen_data_paramss)
]
return test_conds
def _get_infer_paramss():
<|code_end|>
, determine the next line of code. You have imports:
from copy import deepcopy
from bmlingam.tests.test_pm3 import test_posterior_inference, \
infer_params_default, \
mcmc_params_default, \
gen_data_params_default
import six
and context (class names, function names, or code) available:
# Path: bmlingam/tests/test_pm3.py
# def _gen_data(i, gen_data_params):
# def _estimate_hparams(xs, infer_params):
# def _mainloop(n_trials, gen_data_params, infer_params, mcmc_params):
# def test_posterior_inference(
# infer_params=None, mcmc_params=None, gen_data_params=None,
# n_trials=50, plot_result=False, plot_samples_all=False, show_result=False,
# show_result_all=False):
. Output only the next line. | infer_params_mp4 = deepcopy(infer_params_default)
|
Predict the next line for this snippet: <|code_start|>
return test_conds
def _get_infer_paramss():
infer_params_mp4 = deepcopy(infer_params_default)
infer_params_mp4.sampling_mode = 'cache_mp4'
infer_params_standardize = deepcopy(infer_params_mp4)
infer_params_standardize.standardize = True
infer_params_keepratio = deepcopy(infer_params_mp4)
infer_params_keepratio.standardize = 'keepratio'
infer_params_scaling = deepcopy(infer_params_mp4)
infer_params_scaling.standardize = 'scaling'
infer_params_commonscaling = deepcopy(infer_params_mp4)
infer_params_commonscaling.standardize = 'commonscaling'
return {
'default': infer_params_default,
'cache_mp4': infer_params_mp4,
'standardize': infer_params_standardize,
'keepratio': infer_params_keepratio,
'scaling': infer_params_scaling,
'commonscaling': infer_params_commonscaling
}
def _get_mcmc_paramss():
return {
<|code_end|>
with the help of current file imports:
from copy import deepcopy
from bmlingam.tests.test_pm3 import test_posterior_inference, \
infer_params_default, \
mcmc_params_default, \
gen_data_params_default
import six
and context from other files:
# Path: bmlingam/tests/test_pm3.py
# def _gen_data(i, gen_data_params):
# def _estimate_hparams(xs, infer_params):
# def _mainloop(n_trials, gen_data_params, infer_params, mcmc_params):
# def test_posterior_inference(
# infer_params=None, mcmc_params=None, gen_data_params=None,
# n_trials=50, plot_result=False, plot_samples_all=False, show_result=False,
# show_result_all=False):
, which may contain function names, class names, or code. Output only the next line. | 'default': mcmc_params_default
|
Given the following code snippet before the placeholder: <|code_start|> infer_params_mp4 = deepcopy(infer_params_default)
infer_params_mp4.sampling_mode = 'cache_mp4'
infer_params_standardize = deepcopy(infer_params_mp4)
infer_params_standardize.standardize = True
infer_params_keepratio = deepcopy(infer_params_mp4)
infer_params_keepratio.standardize = 'keepratio'
infer_params_scaling = deepcopy(infer_params_mp4)
infer_params_scaling.standardize = 'scaling'
infer_params_commonscaling = deepcopy(infer_params_mp4)
infer_params_commonscaling.standardize = 'commonscaling'
return {
'default': infer_params_default,
'cache_mp4': infer_params_mp4,
'standardize': infer_params_standardize,
'keepratio': infer_params_keepratio,
'scaling': infer_params_scaling,
'commonscaling': infer_params_commonscaling
}
def _get_mcmc_paramss():
return {
'default': mcmc_params_default
}
def _get_gen_data_paramss():
<|code_end|>
, predict the next line using imports from the current file:
from copy import deepcopy
from bmlingam.tests.test_pm3 import test_posterior_inference, \
infer_params_default, \
mcmc_params_default, \
gen_data_params_default
import six
and context including class names, function names, and sometimes code from other files:
# Path: bmlingam/tests/test_pm3.py
# def _gen_data(i, gen_data_params):
# def _estimate_hparams(xs, infer_params):
# def _mainloop(n_trials, gen_data_params, infer_params, mcmc_params):
# def test_posterior_inference(
# infer_params=None, mcmc_params=None, gen_data_params=None,
# n_trials=50, plot_result=False, plot_samples_all=False, show_result=False,
# show_result_all=False):
. Output only the next line. | confounders0 = deepcopy(gen_data_params_default)
|
Predict the next line after this snippet: <|code_start|> @fix_causality.setter
def fix_causality(self, value):
self._fix_causality = value
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, value):
self._seed = value
def _gen_usr_distrib(size, dists, rng):
"""Generate random samples from usr_distrib.
"""
flt = lambda l: list(chain.from_iterable(l))
dist_names_local = flt([_dist_name_table[k] for k in dists])
# Check size
if isinstance(size, collections.Iterable):
n_samples, n_dim = (size[0], 1) if len(size) == 1 else size
else:
n_samples, n_dim = (size, 1)
# Select distributions
ixs_dist = rng.randint(0, len(dist_names_local), n_dim)
dists = [dist_names_local[ix_dist] for ix_dist in ixs_dist]
# Generate samples and normalize
xs_ = np.vstack(
<|code_end|>
using the current file's imports:
import collections
import numpy as np
from itertools import chain
from parse import parse
from bmlingam.utils import dist_names, usr_distrib
and any relevant context from other files:
# Path: bmlingam/utils/usr_distrib.py
# def usr_distrib(dist_name, query, param=None, rng=None):
# def _pdf_dblexp(mu, cov, xs):
# def _rnd_dblexp(mu, cov, rng, n_samples):
# def _krt_dblexp(mu, cov):
# def _pdf_mixdblexp(props, mus, covs, xs):
# def _rnd_mixdblexp(props, mus, covs, rng, n_samples):
# def _krt_mixdblexp(props, mus, covs):
# def _pdf_mixnorm(props, mus, covs, xs):
# def _rnd_mixnorm(props, mus, covs, rng, n_samples):
# def _krt_mixnorm(props, mus, covs):
# def rnd_discrete(props, rng, n_samples):
# def wrap_array(x):
. Output only the next line. | [usr_distrib(dist, 'rnd', n_samples, rng) for dist in dists]).T |
Here is a snippet: <|code_start|> s = s.replace(' ', '')
p = parse('uniform({},{})', s)
if p is None:
return None
else:
l = float(p[0])
u = float(p[1])
return (l, u)
_dist_name_table = {
'gmm4_symm_mul': ['m'], # mixture of 4 Gaussians, symmetric and multimodal
'gmm4_symm_trn': ['n'], # mixture of 4 Gaussians, symmetric and transitional
'gmm4_symm_uni': ['o'], # mixture of 4 Gaussians, symmetric and unimodal
'gmm4_asym_mul': ['p'], # mixture of 4 Gaussians, nonsymmetric and multimodal
'gmm4_asym_trn': ['q'], # mixture of 4 Gaussians, nonsymmetric and transitional
'gmm4_asym_uni': ['r'], # mixture of 4 Gaussians, nonsymmetric and unimodal
't5df': ['a'], # Student T with 3 degrees of freedom
't5df': ['d'], # Student T with 5 degrees of freedom
'exp': ['e'], # Simple exponential
'laplace': ['b'], # Double exponential (Laplace)
'uniform': ['c'], # Uniform
'dexp2': ['f'], # mixtures of 2 double exponential
'gmm2_symm_mul': ['g'], # mixture of 2 Gaussians, symmetric and multimodal
'gmm2_symm_trn': ['h'], # mixture of 2 Gaussians, symmetric and transitional
'gmm2_symm_uni': ['i'], # mixture of 2 Gaussians, symmetric and unimodal
'gmm2_asym_mul': ['j'], # mixture of 2 Gaussians, nonsymmetric and multimodal
'gmm2_asym_trn': ['k'], # mixture of 2 Gaussians, nonsymmetric and transitional
'gmm2_asym_uni': ['l'], # mixture of 2 Gaussians, nonsymmetric and unimodal
'gauss': ['s'], # Gaussian distribution
<|code_end|>
. Write the next line using the current file imports:
import collections
import numpy as np
from itertools import chain
from parse import parse
from bmlingam.utils import dist_names, usr_distrib
and context from other files:
# Path: bmlingam/utils/usr_distrib.py
# def usr_distrib(dist_name, query, param=None, rng=None):
# def _pdf_dblexp(mu, cov, xs):
# def _rnd_dblexp(mu, cov, rng, n_samples):
# def _krt_dblexp(mu, cov):
# def _pdf_mixdblexp(props, mus, covs, xs):
# def _rnd_mixdblexp(props, mus, covs, rng, n_samples):
# def _krt_mixdblexp(props, mus, covs):
# def _pdf_mixnorm(props, mus, covs, xs):
# def _rnd_mixnorm(props, mus, covs, rng, n_samples):
# def _krt_mixnorm(props, mus, covs):
# def rnd_discrete(props, rng, n_samples):
# def wrap_array(x):
, which may include functions, classes, or code. Output only the next line. | 'all': dist_names |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test functions for probability distributions.
"""
# Author: Taku Yoshioka
# License: MIT
def test_laplace_gg(plot=False):
"""Check if the outputs of ll_laplace() and ll_gg(, beta=0.5).
Outputs should be equivalent up to numerical error.
"""
xs = np.arange(-10., 10., .2)
<|code_end|>
, generate the next line using the imports in this file:
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_allclose
from scipy import stats
from scipy.special import gamma
from bmlingam.prob import ll_laplace, ll_gg, sample_gg
and context (functions, classes, or occasionally code) from other files:
# Path: bmlingam/prob.py
# def ll_laplace(e):
# """Return of log likelihood of the standard Laplace distribution.
#
# :param e: Sample values.
# :type e: ndarray, shape=(n_theta_sampling, n_samples)
# :return: pdf values at given samples.
# :rtype: ndarray, shape=(n_theta_sampling, n_samples)
# """
# b = 1 / sqrt(2)
# return -log(2 * b) - abs(e) / b
#
# def ll_gg(e, beta):
# """Return of log likelihood of generalized Gaussian distributions.
# """
# beta = float(beta)
# m = gamma(0.5 / beta) / ((2**(1 / beta)) * gamma(3 / (2 * beta)))
#
# return - 0.5 * power((e**2) / m, beta) + log(beta) \
# - gammaln(0.5 / beta) - (0.5 / beta) * log(2) - 0.5 * log(m)
#
# def sample_gg(scov, beta, n_samples, rng, dim, normalize):
# """Draw samples from GG distribution.
#
# See: Gómez, E., Gomez-Viilegas, M. A., & Marin, J. M. (1998).
# A multivariate generalization of the power exponential family of distributions.
# Communications in Statistics-Theory and Methods, 27(3), 589-600.
#
# Parameters
# ----------
# scov: numpy.ndarray, shape=(dim, dim)
# A scaled covariance matrix. For beta=1, it becomes the covariance matrix.
#
# beta: float
# Shape parameter, a positive value.
#
# n_samples: int
# Number of samples.
#
# rng: numpy.random.RandomState
# Random number generator.
#
# dim: int
# The dimension of the distribution.
#
# normalize: bool
# If true, each dimension is normalized such that its variance is 1.
# """
# # Draw samples from unit sphere
# gs = rng.normal(0, 1, size=(n_samples, dim))
# ss = gs + sign(sign(gs) + 1e-10) * 1e-10
# ns = np.linalg.norm(ss, ord=2, axis=1)[:, np.newaxis]
# us = ss / ns
#
# # Draw samples from GG
# S = np.linalg.cholesky(scov).T
# ts = rng.gamma(shape=dim / (2.0 * beta), scale=2.0,
# size=n_samples)[:, np.newaxis]
# xs = (ts**(1.0 / (2.0 * beta))) * us.dot(S)
#
# if normalize:
# v = (2**(1.0 / beta) * gamma((dim + 2.0) / (2.0 * beta))) / \
# (dim * gamma(dim / (2.0 * beta)))
# xs = xs / np.sqrt(v * np.diag(scov))
#
# return xs
. Output only the next line. | out1 = ll_laplace(xs)
|
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
"""Test functions for probability distributions.
"""
# Author: Taku Yoshioka
# License: MIT
def test_laplace_gg(plot=False):
"""Check if the outputs of ll_laplace() and ll_gg(, beta=0.5).
Outputs should be equivalent up to numerical error.
"""
xs = np.arange(-10., 10., .2)
out1 = ll_laplace(xs)
<|code_end|>
, predict the immediate next line with the help of imports:
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_allclose
from scipy import stats
from scipy.special import gamma
from bmlingam.prob import ll_laplace, ll_gg, sample_gg
and context (classes, functions, sometimes code) from other files:
# Path: bmlingam/prob.py
# def ll_laplace(e):
# """Return of log likelihood of the standard Laplace distribution.
#
# :param e: Sample values.
# :type e: ndarray, shape=(n_theta_sampling, n_samples)
# :return: pdf values at given samples.
# :rtype: ndarray, shape=(n_theta_sampling, n_samples)
# """
# b = 1 / sqrt(2)
# return -log(2 * b) - abs(e) / b
#
# def ll_gg(e, beta):
# """Return of log likelihood of generalized Gaussian distributions.
# """
# beta = float(beta)
# m = gamma(0.5 / beta) / ((2**(1 / beta)) * gamma(3 / (2 * beta)))
#
# return - 0.5 * power((e**2) / m, beta) + log(beta) \
# - gammaln(0.5 / beta) - (0.5 / beta) * log(2) - 0.5 * log(m)
#
# def sample_gg(scov, beta, n_samples, rng, dim, normalize):
# """Draw samples from GG distribution.
#
# See: Gómez, E., Gomez-Viilegas, M. A., & Marin, J. M. (1998).
# A multivariate generalization of the power exponential family of distributions.
# Communications in Statistics-Theory and Methods, 27(3), 589-600.
#
# Parameters
# ----------
# scov: numpy.ndarray, shape=(dim, dim)
# A scaled covariance matrix. For beta=1, it becomes the covariance matrix.
#
# beta: float
# Shape parameter, a positive value.
#
# n_samples: int
# Number of samples.
#
# rng: numpy.random.RandomState
# Random number generator.
#
# dim: int
# The dimension of the distribution.
#
# normalize: bool
# If true, each dimension is normalized such that its variance is 1.
# """
# # Draw samples from unit sphere
# gs = rng.normal(0, 1, size=(n_samples, dim))
# ss = gs + sign(sign(gs) + 1e-10) * 1e-10
# ns = np.linalg.norm(ss, ord=2, axis=1)[:, np.newaxis]
# us = ss / ns
#
# # Draw samples from GG
# S = np.linalg.cholesky(scov).T
# ts = rng.gamma(shape=dim / (2.0 * beta), scale=2.0,
# size=n_samples)[:, np.newaxis]
# xs = (ts**(1.0 / (2.0 * beta))) * us.dot(S)
#
# if normalize:
# v = (2**(1.0 / beta) * gamma((dim + 2.0) / (2.0 * beta))) / \
# (dim * gamma(dim / (2.0 * beta)))
# xs = xs / np.sqrt(v * np.diag(scov))
#
# return xs
. Output only the next line. | out2 = ll_gg(xs, beta=.5)
|
Given the following code snippet before the placeholder: <|code_start|> print('Mean: {}'.format(d.mean))
print('Var : {}'.format(d.variance))
print('Skew: {}'.format(d.skewness))
print('Kurt: {}'.format(d.kurtosis))
assert_allclose([d.mean, d.variance, d.skewness, d.kurtosis],
ss, rtol=5e-2, atol=5e-2)
def _mv_kurtosis(xs):
dim = xs.shape[1]
prec = np.linalg.pinv(np.cov(xs.T))
xs_ = xs - xs.mean(axis=0)
print(xs_.shape, prec.shape)
xpx = np.sum((xs_.dot(prec)) * xs_, axis=1)
k = np.mean(xpx**2) - dim * (dim + 2)
print('Mv kurtosis: {}'.format(k))
return k
def test_sample_gg(n_samples=1000000, plot=False):
"""Tests for generalized Gaussian.
"""
rng = np.random.RandomState(0)
# Test 1
print('Test1')
dim = 2
scov = np.eye(dim)
beta = 1.0
<|code_end|>
, predict the next line using imports from the current file:
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_allclose
from scipy import stats
from scipy.special import gamma
from bmlingam.prob import ll_laplace, ll_gg, sample_gg
and context including class names, function names, and sometimes code from other files:
# Path: bmlingam/prob.py
# def ll_laplace(e):
# """Return of log likelihood of the standard Laplace distribution.
#
# :param e: Sample values.
# :type e: ndarray, shape=(n_theta_sampling, n_samples)
# :return: pdf values at given samples.
# :rtype: ndarray, shape=(n_theta_sampling, n_samples)
# """
# b = 1 / sqrt(2)
# return -log(2 * b) - abs(e) / b
#
# def ll_gg(e, beta):
# """Return of log likelihood of generalized Gaussian distributions.
# """
# beta = float(beta)
# m = gamma(0.5 / beta) / ((2**(1 / beta)) * gamma(3 / (2 * beta)))
#
# return - 0.5 * power((e**2) / m, beta) + log(beta) \
# - gammaln(0.5 / beta) - (0.5 / beta) * log(2) - 0.5 * log(m)
#
# def sample_gg(scov, beta, n_samples, rng, dim, normalize):
# """Draw samples from GG distribution.
#
# See: Gómez, E., Gomez-Viilegas, M. A., & Marin, J. M. (1998).
# A multivariate generalization of the power exponential family of distributions.
# Communications in Statistics-Theory and Methods, 27(3), 589-600.
#
# Parameters
# ----------
# scov: numpy.ndarray, shape=(dim, dim)
# A scaled covariance matrix. For beta=1, it becomes the covariance matrix.
#
# beta: float
# Shape parameter, a positive value.
#
# n_samples: int
# Number of samples.
#
# rng: numpy.random.RandomState
# Random number generator.
#
# dim: int
# The dimension of the distribution.
#
# normalize: bool
# If true, each dimension is normalized such that its variance is 1.
# """
# # Draw samples from unit sphere
# gs = rng.normal(0, 1, size=(n_samples, dim))
# ss = gs + sign(sign(gs) + 1e-10) * 1e-10
# ns = np.linalg.norm(ss, ord=2, axis=1)[:, np.newaxis]
# us = ss / ns
#
# # Draw samples from GG
# S = np.linalg.cholesky(scov).T
# ts = rng.gamma(shape=dim / (2.0 * beta), scale=2.0,
# size=n_samples)[:, np.newaxis]
# xs = (ts**(1.0 / (2.0 * beta))) * us.dot(S)
#
# if normalize:
# v = (2**(1.0 / beta) * gamma((dim + 2.0) / (2.0 * beta))) / \
# (dim * gamma(dim / (2.0 * beta)))
# xs = xs / np.sqrt(v * np.diag(scov))
#
# return xs
. Output only the next line. | xs = sample_gg(scov, beta, n_samples, rng, dim, normalize=True)
|
Given the code snippet: <|code_start|> rng):
# Define conditional parameter sets
df_indvdls_ = lambda p: df_indvdls if p == 't' else [None]
beta_coeffs_ = lambda p: beta_coeffs if p == 'gg' else [None]
# Make list of parameter sets
paramss = flt([
[
(prior_indvdl, L_cov, df_indvdl, beta)
for df_indvdl in df_indvdls_(prior_indvdl)
for beta in beta_coeffs_(prior_indvdl)
]
for prior_indvdl in prior_indvdls
for L_cov in L_covs
])
# Define function to draw random samples
def f(params):
prior_indvdl = params[0]
L_covs = [_get_L_cov(params[1], rng) for _ in range(n_mc_samples)]
df_L = params[2]
beta = params[3]
if prior_indvdl == 't':
sample_func = lambda L_cov_: multivariatet(
0, L_cov_, df_L, n_samples, rng) / np.sqrt(df_L / (df_L - 2))
elif prior_indvdl == 'gauss':
sample_func = lambda L_cov_: rng.multivariate_normal(
np.zeros(L_cov_.shape[0]), L_cov_, n_samples)
elif prior_indvdl == 'gg':
<|code_end|>
, generate the next line using the imports in this file:
from itertools import chain
from parse import parse
from bmlingam.prob import sample_gg, multivariatet
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: bmlingam/prob.py
# def sample_gg(scov, beta, n_samples, rng, dim, normalize):
# """Draw samples from GG distribution.
#
# See: Gómez, E., Gomez-Viilegas, M. A., & Marin, J. M. (1998).
# A multivariate generalization of the power exponential family of distributions.
# Communications in Statistics-Theory and Methods, 27(3), 589-600.
#
# Parameters
# ----------
# scov: numpy.ndarray, shape=(dim, dim)
# A scaled covariance matrix. For beta=1, it becomes the covariance matrix.
#
# beta: float
# Shape parameter, a positive value.
#
# n_samples: int
# Number of samples.
#
# rng: numpy.random.RandomState
# Random number generator.
#
# dim: int
# The dimension of the distribution.
#
# normalize: bool
# If true, each dimension is normalized such that its variance is 1.
# """
# # Draw samples from unit sphere
# gs = rng.normal(0, 1, size=(n_samples, dim))
# ss = gs + sign(sign(gs) + 1e-10) * 1e-10
# ns = np.linalg.norm(ss, ord=2, axis=1)[:, np.newaxis]
# us = ss / ns
#
# # Draw samples from GG
# S = np.linalg.cholesky(scov).T
# ts = rng.gamma(shape=dim / (2.0 * beta), scale=2.0,
# size=n_samples)[:, np.newaxis]
# xs = (ts**(1.0 / (2.0 * beta))) * us.dot(S)
#
# if normalize:
# v = (2**(1.0 / beta) * gamma((dim + 2.0) / (2.0 * beta))) / \
# (dim * gamma(dim / (2.0 * beta)))
# xs = xs / np.sqrt(v * np.diag(scov))
#
# return xs
#
# def multivariatet(mu, Sigma, N, M, rng):
# """Return a sample (or samples) from the multivariate t distribution.
#
# This function is adopted from
# http://kennychowdhary.me/2013/03/python-code-to-generate-samples-from-multivariate-t/.
#
# :param mu: Mean.
# :type mu: ndarray, shape=(n_dim,), dtype=float
# :param Sigma: Scaling matrix.
# :type Sigma: ndarray, shape=(n_dim, n_dim), dtype=float
# :param float N: Degrees of freedom.
# :param int M: Number of samples to produce.
# :param np.random.RandomState rng: Random number generator.
# :return: M samples of (n_dim)-dimensional multivariate t distribution.
# :rtype: ndarray, shape=(n_samples, n_dim), dtype=float
#
# """
# d = len(Sigma)
# g = np.tile(rng.gamma(N/2., 2./N, M), (d, 1)).T
# Z = rng.multivariate_normal(np.zeros(d), Sigma, M)
# return mu + Z / np.sqrt(g)
. Output only the next line. | sample_func = lambda L_cov_: sample_gg(
|
Predict the next line for this snippet: <|code_start|> L_cov_21 = rng.uniform(r[0], r[1])
return np.array([[1.0, L_cov_21],
[L_cov_21, 1.0]])
def _cache_mu_indvdl(
prior_indvdls, L_covs, df_indvdls, beta_coeffs, n_mc_samples, n_samples,
rng):
# Define conditional parameter sets
df_indvdls_ = lambda p: df_indvdls if p == 't' else [None]
beta_coeffs_ = lambda p: beta_coeffs if p == 'gg' else [None]
# Make list of parameter sets
paramss = flt([
[
(prior_indvdl, L_cov, df_indvdl, beta)
for df_indvdl in df_indvdls_(prior_indvdl)
for beta in beta_coeffs_(prior_indvdl)
]
for prior_indvdl in prior_indvdls
for L_cov in L_covs
])
# Define function to draw random samples
def f(params):
prior_indvdl = params[0]
L_covs = [_get_L_cov(params[1], rng) for _ in range(n_mc_samples)]
df_L = params[2]
beta = params[3]
if prior_indvdl == 't':
<|code_end|>
with the help of current file imports:
from itertools import chain
from parse import parse
from bmlingam.prob import sample_gg, multivariatet
import numpy as np
and context from other files:
# Path: bmlingam/prob.py
# def sample_gg(scov, beta, n_samples, rng, dim, normalize):
# """Draw samples from GG distribution.
#
# See: Gómez, E., Gomez-Viilegas, M. A., & Marin, J. M. (1998).
# A multivariate generalization of the power exponential family of distributions.
# Communications in Statistics-Theory and Methods, 27(3), 589-600.
#
# Parameters
# ----------
# scov: numpy.ndarray, shape=(dim, dim)
# A scaled covariance matrix. For beta=1, it becomes the covariance matrix.
#
# beta: float
# Shape parameter, a positive value.
#
# n_samples: int
# Number of samples.
#
# rng: numpy.random.RandomState
# Random number generator.
#
# dim: int
# The dimension of the distribution.
#
# normalize: bool
# If true, each dimension is normalized such that its variance is 1.
# """
# # Draw samples from unit sphere
# gs = rng.normal(0, 1, size=(n_samples, dim))
# ss = gs + sign(sign(gs) + 1e-10) * 1e-10
# ns = np.linalg.norm(ss, ord=2, axis=1)[:, np.newaxis]
# us = ss / ns
#
# # Draw samples from GG
# S = np.linalg.cholesky(scov).T
# ts = rng.gamma(shape=dim / (2.0 * beta), scale=2.0,
# size=n_samples)[:, np.newaxis]
# xs = (ts**(1.0 / (2.0 * beta))) * us.dot(S)
#
# if normalize:
# v = (2**(1.0 / beta) * gamma((dim + 2.0) / (2.0 * beta))) / \
# (dim * gamma(dim / (2.0 * beta)))
# xs = xs / np.sqrt(v * np.diag(scov))
#
# return xs
#
# def multivariatet(mu, Sigma, N, M, rng):
# """Return a sample (or samples) from the multivariate t distribution.
#
# This function is adopted from
# http://kennychowdhary.me/2013/03/python-code-to-generate-samples-from-multivariate-t/.
#
# :param mu: Mean.
# :type mu: ndarray, shape=(n_dim,), dtype=float
# :param Sigma: Scaling matrix.
# :type Sigma: ndarray, shape=(n_dim, n_dim), dtype=float
# :param float N: Degrees of freedom.
# :param int M: Number of samples to produce.
# :param np.random.RandomState rng: Random number generator.
# :return: M samples of (n_dim)-dimensional multivariate t distribution.
# :rtype: ndarray, shape=(n_samples, n_dim), dtype=float
#
# """
# d = len(Sigma)
# g = np.tile(rng.gamma(N/2., 2./N, M), (d, 1)).T
# Z = rng.multivariate_normal(np.zeros(d), Sigma, M)
# return mu + Z / np.sqrt(g)
, which may contain function names, class names, or code. Output only the next line. | sample_func = lambda L_cov_: multivariatet(
|
Predict the next line after this snippet: <|code_start|> else:
##load basic logging
logconf = os.path.join(odpw.__path__[0], 'resources/logging', 'logging.yaml')
with open(logconf) as f:
logging.config.dictConfig(yaml.load(f))
except Exception as e:
print "Exception during config initialisation", e
return
else:
##load basic logging
logconf = os.path.join(odpw.__path__[0], 'resources/logging', 'logging.yaml')
with open(logconf) as f:
logging.config.dictConfig(yaml.load(f))
logging.basicConfig(level=args.loglevel)
# config the structlog
config_logging()
log = structlog.get_logger()
#try:
log.info("CMD ARGS", args=str(args))
cli(args)
#except Exception as e:
# log.fatal("Uncaught exception", exc_info=True)
end = time.time()
secs = end - start
msecs = secs * 1000
log.info("END MAIN", time_elapsed=msecs)
<|code_end|>
using the current file's imports:
import argparse
import logging.config
import os
import structlog
import time
import sys
import odpw
import yaml
import traceback
from odpw.utils.timing import Timer
from odpw.utils.error_handling import ErrorHandler
from odpw_restapi import api, conv, qa
from tornado.web import RequestHandler
from tornado.wsgi import WSGIContainer
from flask import Flask, jsonify, request, Blueprint
and any relevant context from other files:
# Path: odpw/utils/timing.py
# class Timer(object):
#
# measures={}
#
# def __init__(self, verbose=False, key=None):
# self.verbose = verbose
# self.key=key
#
# def __enter__(self):
# self.start = time.time()
# return self
#
# def __exit__(self, *args):
# end = time.time()
# secs = end - self.start
# msecs = secs * 1000 # millisecs
# if self.verbose:
# print '(%s) elapsed time: %f ms' % (self.key,msecs)
# if self.key:
# if self.key not in self.__class__.measures:
# self.__class__.measures[self.key]=faststat.Stats()
# if msecs>=0:
# self.__class__.measures[self.key].add(msecs)
#
#
# @classmethod
# def printStats(cls):
# print '>>>','--*'*10,'TIMER','*--'*10
# s=[" Timing stats:","\n"]
# for m,st in Timer.measures.items():
# p = st.percentiles
# if st.n < len(p):
# quartiles = "(n too small)"
# else:
# quartiles = (_sigfigs(p.get(0.25, -1)),
# _sigfigs(p.get(0.5, -1)), _sigfigs(p.get(0.75, -1)))
# d = [" ["+m+'] -', str(st.mean), 'avg ms for',m,str(st.n),'calls)'
# ,"\n (min:",str(st.min),"-",str(st.max),":max, quantils:",quartiles,")\n"]
# s=s+d
# s=s+['-'*50]
# print " ".join(s)
# print '<<<','--*'*10,'TIMER','*--'*10
#
# @classmethod
# def getStats(cls):
#
# stats={}
# for m,st in cls.measures.items():
# p = st.percentiles
# stats[m]={
# 'avg':st.mean
# , 'calls':st.n
# , 'min':st.min
# , 'max':st.max
# , 'q25':None, 'q5':None, 'q75':None
# }
# if st.n < len(p):
# quartiles = "(n too small)"
# else:
# stats[m]['q25']=_sigfigs(p.get(0.25, -1))
# stats[m]['q5']=_sigfigs(p.get(0.5, -1))
# stats[m]['q75']=_sigfigs(p.get(0.75, -1))
# return stats
#
# Path: odpw/utils/error_handling.py
# class ErrorHandler():
#
# exceptions=defaultdict(long)
#
# DEBUG=False
#
# @classmethod
# def handleError(cls, log, msg=None, exception=None, debug=False, **kwargs):
# name=type(exception).__name__
# cls.exceptions[name] +=1
#
# if debug:
# print(traceback.format_exc())
#
# log.error(msg, exctype=type(exception), excmsg=exception.message, **kwargs)
#
# @classmethod
# def printStats(cls):
# print '>>>','--*'*10,'EXCEPTIONS','*--'*10
# if len(cls.exceptions)==0:
# print "No exceptions handled"
# else:
# print " Numbers of Exceptions:"
# for exc, count in cls.exceptions.iteritems():
# print " ",exc, count
# print '<<<','--*'*25
. Output only the next line. | Timer.printStats() |
Predict the next line for this snippet: <|code_start|> ##load basic logging
logconf = os.path.join(odpw.__path__[0], 'resources/logging', 'logging.yaml')
with open(logconf) as f:
logging.config.dictConfig(yaml.load(f))
except Exception as e:
print "Exception during config initialisation", e
return
else:
##load basic logging
logconf = os.path.join(odpw.__path__[0], 'resources/logging', 'logging.yaml')
with open(logconf) as f:
logging.config.dictConfig(yaml.load(f))
logging.basicConfig(level=args.loglevel)
# config the structlog
config_logging()
log = structlog.get_logger()
#try:
log.info("CMD ARGS", args=str(args))
cli(args)
#except Exception as e:
# log.fatal("Uncaught exception", exc_info=True)
end = time.time()
secs = end - start
msecs = secs * 1000
log.info("END MAIN", time_elapsed=msecs)
Timer.printStats()
<|code_end|>
with the help of current file imports:
import argparse
import logging.config
import os
import structlog
import time
import sys
import odpw
import yaml
import traceback
from odpw.utils.timing import Timer
from odpw.utils.error_handling import ErrorHandler
from odpw_restapi import api, conv, qa
from tornado.web import RequestHandler
from tornado.wsgi import WSGIContainer
from flask import Flask, jsonify, request, Blueprint
and context from other files:
# Path: odpw/utils/timing.py
# class Timer(object):
#
# measures={}
#
# def __init__(self, verbose=False, key=None):
# self.verbose = verbose
# self.key=key
#
# def __enter__(self):
# self.start = time.time()
# return self
#
# def __exit__(self, *args):
# end = time.time()
# secs = end - self.start
# msecs = secs * 1000 # millisecs
# if self.verbose:
# print '(%s) elapsed time: %f ms' % (self.key,msecs)
# if self.key:
# if self.key not in self.__class__.measures:
# self.__class__.measures[self.key]=faststat.Stats()
# if msecs>=0:
# self.__class__.measures[self.key].add(msecs)
#
#
# @classmethod
# def printStats(cls):
# print '>>>','--*'*10,'TIMER','*--'*10
# s=[" Timing stats:","\n"]
# for m,st in Timer.measures.items():
# p = st.percentiles
# if st.n < len(p):
# quartiles = "(n too small)"
# else:
# quartiles = (_sigfigs(p.get(0.25, -1)),
# _sigfigs(p.get(0.5, -1)), _sigfigs(p.get(0.75, -1)))
# d = [" ["+m+'] -', str(st.mean), 'avg ms for',m,str(st.n),'calls)'
# ,"\n (min:",str(st.min),"-",str(st.max),":max, quantils:",quartiles,")\n"]
# s=s+d
# s=s+['-'*50]
# print " ".join(s)
# print '<<<','--*'*10,'TIMER','*--'*10
#
# @classmethod
# def getStats(cls):
#
# stats={}
# for m,st in cls.measures.items():
# p = st.percentiles
# stats[m]={
# 'avg':st.mean
# , 'calls':st.n
# , 'min':st.min
# , 'max':st.max
# , 'q25':None, 'q5':None, 'q75':None
# }
# if st.n < len(p):
# quartiles = "(n too small)"
# else:
# stats[m]['q25']=_sigfigs(p.get(0.25, -1))
# stats[m]['q5']=_sigfigs(p.get(0.5, -1))
# stats[m]['q75']=_sigfigs(p.get(0.75, -1))
# return stats
#
# Path: odpw/utils/error_handling.py
# class ErrorHandler():
#
# exceptions=defaultdict(long)
#
# DEBUG=False
#
# @classmethod
# def handleError(cls, log, msg=None, exception=None, debug=False, **kwargs):
# name=type(exception).__name__
# cls.exceptions[name] +=1
#
# if debug:
# print(traceback.format_exc())
#
# log.error(msg, exctype=type(exception), excmsg=exception.message, **kwargs)
#
# @classmethod
# def printStats(cls):
# print '>>>','--*'*10,'EXCEPTIONS','*--'*10
# if len(cls.exceptions)==0:
# print "No exceptions handled"
# else:
# print " Numbers of Exceptions:"
# for exc, count in cls.exceptions.iteritems():
# print " ",exc, count
# print '<<<','--*'*25
, which may contain function names, class names, or code. Output only the next line. | ErrorHandler.printStats() |
Predict the next line after this snippet: <|code_start|> params = {}
for k,v in comp.params.iteritems():
if isinstance(v, unicode):
v = v.encode(enc)
params[k] = sqlescape(v)
return (comp.string.encode(enc) % params).decode(enc)
@compiler.compiles(CreateView)
def compile(element, compiler, **kw):
return "CREATE MATERIALIZED VIEW %s AS %s" % (element.name, compile_query(element.selectable))
@compiler.compiles(DropView)
def compile(element, compiler, **kw):
return "DROP MATERIALIZED VIEW IF EXISTS %s" % (element.name)
def view(name, metadata, selectable):
t = table(name)
orig=selectable
if isinstance(selectable, Query):
selectable = selectable.subquery()
for c in selectable.c:
c._make_proxy(t)
CreateView(name, orig).execute_at('after-create', metadata)
DropView(name).execute_at('before-drop', metadata)
return t
<|code_end|>
using the current file's imports:
import sqlalchemy
from sqlalchemy import *
from sqlalchemy.orm import Query
from sqlalchemy.schema import DDLElement
from sqlalchemy.sql import table
from sqlalchemy.ext import compiler
from odpw.core.model import Base
from psycopg2.extensions import adapt as sqlescape
and any relevant context from other files:
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
. Output only the next line. | def withView(query, viewName, session, dbc, metadata=Base.metadata): |
Given the following code snippet before the placeholder: <|code_start|> v = f['@id']
return v.strip()
def getDistributionByteSize(dataset):
return accessDistribution(dataset, DCAT.byteSize)
def getContactPointValues(dataset):
points = accessDataset(dataset, DCAT.contactPoint)
values = []
for p in points:
fn = accessById(dataset, p, VCARD.fn)
if fn:
values.append(fn)
mail = accessById(dataset, p, VCARD.hasEmail)
if mail:
values.append(mail)
return values
def getPublisherValues(dataset):
points = accessDataset(dataset, DCT.publisher)
values = []
for p in points:
for v in [FOAF.mbox, FOAF.homepage, FOAF.name]:
fn = accessById(dataset, p, v)
if fn:
values.append(fn)
return values
<|code_end|>
, predict the next line using imports from the current file:
from rdflib.namespace import RDFS, RDF
from odpw.core.licenses_mapping import LicensesOpennessMapping
from odpw.core.dataset_converter import DCAT, DCT, VCARD, FOAF
and context including class names, function names, and sometimes code from other files:
# Path: odpw/core/licenses_mapping.py
# class LicensesOpennessMapping:
#
# __licenses_list=None
#
# def __init__(self):
# if not LicensesOpennessMapping.__licenses_list:
# try:
# resp = requests.get(OPEN_DEFINITION)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# try:
# resp = requests.get(GITHUB)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# with open(LOCAL_FILE, 'r') as f:
# self.licenses_list = json.load(f)
# LicensesOpennessMapping.__licenses_list= self.licenses_list
# else:
# self.licenses_list = LicensesOpennessMapping.__licenses_list
#
# def map_license(self, title, lid, url):
# res_id = None
#
# # at first check if ID is matching
# if lid:
# if lid in ID_MAPPING_TO_OPEN_DEF and ID_MAPPING_TO_OPEN_DEF[lid]:
# common_id = ID_MAPPING_TO_OPEN_DEF[lid]
# else:
# common_id = lid
#
# if common_id in self.licenses_list:
# res_id = common_id
#
# # check if title is ID or if title is matching
# if not res_id and title:
# if title in TITLE_MAPPING_TO_OPEN_DEF and TITLE_MAPPING_TO_OPEN_DEF[title]:
# common_id = TITLE_MAPPING_TO_OPEN_DEF[title]
# else:
# common_id = title
#
# if common_id in self.licenses_list:
# res_id = common_id
# else:
# for l in self.licenses_list:
# if self.licenses_list[l].get('title') == common_id:
# res_id = l
# break
#
# # check if any url is matching
# if not res_id and url:
# for l in self.licenses_list:
# if self.licenses_list[l].get('url') == url:
# res_id = l
# break
#
# # assign any possible ID if not already found
# if not res_id:
# if lid:
# res_id = lid
# else:
# res_id = title if title else url
#
# # return a tuple (ID, od_conformance)
# if res_id in self.licenses_list:
# return res_id, self.licenses_list[res_id].get('od_conformance', 'not found')
# return res_id, 'not found'
#
# def is_open(self, id):
# return 'approved' == self.get_od_conformance(id)
#
# def get_od_conformance(self, id):
# if id in self.licenses_list:
# return self.licenses_list[id].get('od_conformance', 'not found')
# return 'not found'
#
# Path: odpw/core/dataset_converter.py
# DCAT = Namespace("http://www.w3.org/ns/dcat#")
#
# DCT = Namespace("http://purl.org/dc/terms/")
#
# VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
#
# FOAF = Namespace("http://xmlns.com/foaf/0.1/")
. Output only the next line. | license_mapping = LicensesOpennessMapping() |
Based on the snippet: <|code_start|>'''
Created on Aug 26, 2015
@author: jumbrich
'''
# -*- coding: utf-8 -*-
def accessDataset(dataset, key ):
value=[]
key=str(key)
#for dcat_el in dataset['dcat']:
for dcat_el in getattr(dataset,'dcat',[]):
<|code_end|>
, predict the immediate next line with the help of imports:
from rdflib.namespace import RDFS, RDF
from odpw.core.licenses_mapping import LicensesOpennessMapping
from odpw.core.dataset_converter import DCAT, DCT, VCARD, FOAF
and context (classes, functions, sometimes code) from other files:
# Path: odpw/core/licenses_mapping.py
# class LicensesOpennessMapping:
#
# __licenses_list=None
#
# def __init__(self):
# if not LicensesOpennessMapping.__licenses_list:
# try:
# resp = requests.get(OPEN_DEFINITION)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# try:
# resp = requests.get(GITHUB)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# with open(LOCAL_FILE, 'r') as f:
# self.licenses_list = json.load(f)
# LicensesOpennessMapping.__licenses_list= self.licenses_list
# else:
# self.licenses_list = LicensesOpennessMapping.__licenses_list
#
# def map_license(self, title, lid, url):
# res_id = None
#
# # at first check if ID is matching
# if lid:
# if lid in ID_MAPPING_TO_OPEN_DEF and ID_MAPPING_TO_OPEN_DEF[lid]:
# common_id = ID_MAPPING_TO_OPEN_DEF[lid]
# else:
# common_id = lid
#
# if common_id in self.licenses_list:
# res_id = common_id
#
# # check if title is ID or if title is matching
# if not res_id and title:
# if title in TITLE_MAPPING_TO_OPEN_DEF and TITLE_MAPPING_TO_OPEN_DEF[title]:
# common_id = TITLE_MAPPING_TO_OPEN_DEF[title]
# else:
# common_id = title
#
# if common_id in self.licenses_list:
# res_id = common_id
# else:
# for l in self.licenses_list:
# if self.licenses_list[l].get('title') == common_id:
# res_id = l
# break
#
# # check if any url is matching
# if not res_id and url:
# for l in self.licenses_list:
# if self.licenses_list[l].get('url') == url:
# res_id = l
# break
#
# # assign any possible ID if not already found
# if not res_id:
# if lid:
# res_id = lid
# else:
# res_id = title if title else url
#
# # return a tuple (ID, od_conformance)
# if res_id in self.licenses_list:
# return res_id, self.licenses_list[res_id].get('od_conformance', 'not found')
# return res_id, 'not found'
#
# def is_open(self, id):
# return 'approved' == self.get_od_conformance(id)
#
# def get_od_conformance(self, id):
# if id in self.licenses_list:
# return self.licenses_list[id].get('od_conformance', 'not found')
# return 'not found'
#
# Path: odpw/core/dataset_converter.py
# DCAT = Namespace("http://www.w3.org/ns/dcat#")
#
# DCT = Namespace("http://purl.org/dc/terms/")
#
# VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
#
# FOAF = Namespace("http://xmlns.com/foaf/0.1/")
. Output only the next line. | if str(DCAT.Dataset) in dcat_el.get('@type',[]): |
Using the snippet: <|code_start|>'''
Created on Aug 26, 2015
@author: jumbrich
'''
# -*- coding: utf-8 -*-
def accessDataset(dataset, key ):
value=[]
key=str(key)
#for dcat_el in dataset['dcat']:
for dcat_el in getattr(dataset,'dcat',[]):
if str(DCAT.Dataset) in dcat_el.get('@type',[]):
for f in dcat_el.get(key,[]):
v=None
if '@value' in f:
v = f['@value']
elif '@id' in f:
v = f['@id']
value.append(v.strip())
return value
#http://www.w3.org/TR/vocab-dcat/#Class:_Dataset
def getTitle(dataset):
<|code_end|>
, determine the next line of code. You have imports:
from rdflib.namespace import RDFS, RDF
from odpw.core.licenses_mapping import LicensesOpennessMapping
from odpw.core.dataset_converter import DCAT, DCT, VCARD, FOAF
and context (class names, function names, or code) available:
# Path: odpw/core/licenses_mapping.py
# class LicensesOpennessMapping:
#
# __licenses_list=None
#
# def __init__(self):
# if not LicensesOpennessMapping.__licenses_list:
# try:
# resp = requests.get(OPEN_DEFINITION)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# try:
# resp = requests.get(GITHUB)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# with open(LOCAL_FILE, 'r') as f:
# self.licenses_list = json.load(f)
# LicensesOpennessMapping.__licenses_list= self.licenses_list
# else:
# self.licenses_list = LicensesOpennessMapping.__licenses_list
#
# def map_license(self, title, lid, url):
# res_id = None
#
# # at first check if ID is matching
# if lid:
# if lid in ID_MAPPING_TO_OPEN_DEF and ID_MAPPING_TO_OPEN_DEF[lid]:
# common_id = ID_MAPPING_TO_OPEN_DEF[lid]
# else:
# common_id = lid
#
# if common_id in self.licenses_list:
# res_id = common_id
#
# # check if title is ID or if title is matching
# if not res_id and title:
# if title in TITLE_MAPPING_TO_OPEN_DEF and TITLE_MAPPING_TO_OPEN_DEF[title]:
# common_id = TITLE_MAPPING_TO_OPEN_DEF[title]
# else:
# common_id = title
#
# if common_id in self.licenses_list:
# res_id = common_id
# else:
# for l in self.licenses_list:
# if self.licenses_list[l].get('title') == common_id:
# res_id = l
# break
#
# # check if any url is matching
# if not res_id and url:
# for l in self.licenses_list:
# if self.licenses_list[l].get('url') == url:
# res_id = l
# break
#
# # assign any possible ID if not already found
# if not res_id:
# if lid:
# res_id = lid
# else:
# res_id = title if title else url
#
# # return a tuple (ID, od_conformance)
# if res_id in self.licenses_list:
# return res_id, self.licenses_list[res_id].get('od_conformance', 'not found')
# return res_id, 'not found'
#
# def is_open(self, id):
# return 'approved' == self.get_od_conformance(id)
#
# def get_od_conformance(self, id):
# if id in self.licenses_list:
# return self.licenses_list[id].get('od_conformance', 'not found')
# return 'not found'
#
# Path: odpw/core/dataset_converter.py
# DCAT = Namespace("http://www.w3.org/ns/dcat#")
#
# DCT = Namespace("http://purl.org/dc/terms/")
#
# VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
#
# FOAF = Namespace("http://xmlns.com/foaf/0.1/")
. Output only the next line. | return accessDataset(dataset, DCT.title) |
Predict the next line after this snippet: <|code_start|> for f in dcat_el.get(key,[]):
v=None
if '@value' in f:
v = f['@value']
elif '@id' in f:
v = f['@id']
value.append(v.strip())
return value
#http://www.w3.org/TR/vocab-dcat/#Class:_Dataset
def getTitle(dataset):
return accessDataset(dataset, DCT.title)
def getDescription(dataset):
return accessDataset(dataset, DCT.description)
def getCreationDate(dataset):
return accessDataset(dataset, DCT.issued)
def getOrganization(dataset):
for dcat_el in getattr(dataset,'dcat',[]):
#TODO there is also a FOAF.Ogranisation
if str(FOAF.Organization) in dcat_el.get('@type',[]):
for tag in dcat_el.get(str(FOAF.name),[]):
orga=tag['@value']
if orga is not None and len(orga)>0:
return orga
for dcat_el in getattr(dataset,'dcat',[]):
#TODO there is also a FOAF.Ogranisation
<|code_end|>
using the current file's imports:
from rdflib.namespace import RDFS, RDF
from odpw.core.licenses_mapping import LicensesOpennessMapping
from odpw.core.dataset_converter import DCAT, DCT, VCARD, FOAF
and any relevant context from other files:
# Path: odpw/core/licenses_mapping.py
# class LicensesOpennessMapping:
#
# __licenses_list=None
#
# def __init__(self):
# if not LicensesOpennessMapping.__licenses_list:
# try:
# resp = requests.get(OPEN_DEFINITION)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# try:
# resp = requests.get(GITHUB)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# with open(LOCAL_FILE, 'r') as f:
# self.licenses_list = json.load(f)
# LicensesOpennessMapping.__licenses_list= self.licenses_list
# else:
# self.licenses_list = LicensesOpennessMapping.__licenses_list
#
# def map_license(self, title, lid, url):
# res_id = None
#
# # at first check if ID is matching
# if lid:
# if lid in ID_MAPPING_TO_OPEN_DEF and ID_MAPPING_TO_OPEN_DEF[lid]:
# common_id = ID_MAPPING_TO_OPEN_DEF[lid]
# else:
# common_id = lid
#
# if common_id in self.licenses_list:
# res_id = common_id
#
# # check if title is ID or if title is matching
# if not res_id and title:
# if title in TITLE_MAPPING_TO_OPEN_DEF and TITLE_MAPPING_TO_OPEN_DEF[title]:
# common_id = TITLE_MAPPING_TO_OPEN_DEF[title]
# else:
# common_id = title
#
# if common_id in self.licenses_list:
# res_id = common_id
# else:
# for l in self.licenses_list:
# if self.licenses_list[l].get('title') == common_id:
# res_id = l
# break
#
# # check if any url is matching
# if not res_id and url:
# for l in self.licenses_list:
# if self.licenses_list[l].get('url') == url:
# res_id = l
# break
#
# # assign any possible ID if not already found
# if not res_id:
# if lid:
# res_id = lid
# else:
# res_id = title if title else url
#
# # return a tuple (ID, od_conformance)
# if res_id in self.licenses_list:
# return res_id, self.licenses_list[res_id].get('od_conformance', 'not found')
# return res_id, 'not found'
#
# def is_open(self, id):
# return 'approved' == self.get_od_conformance(id)
#
# def get_od_conformance(self, id):
# if id in self.licenses_list:
# return self.licenses_list[id].get('od_conformance', 'not found')
# return 'not found'
#
# Path: odpw/core/dataset_converter.py
# DCAT = Namespace("http://www.w3.org/ns/dcat#")
#
# DCT = Namespace("http://purl.org/dc/terms/")
#
# VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
#
# FOAF = Namespace("http://xmlns.com/foaf/0.1/")
. Output only the next line. | if str(VCARD.Organization) in dcat_el.get('@type',[]): |
Given snippet: <|code_start|>
def accessDataset(dataset, key ):
value=[]
key=str(key)
#for dcat_el in dataset['dcat']:
for dcat_el in getattr(dataset,'dcat',[]):
if str(DCAT.Dataset) in dcat_el.get('@type',[]):
for f in dcat_el.get(key,[]):
v=None
if '@value' in f:
v = f['@value']
elif '@id' in f:
v = f['@id']
value.append(v.strip())
return value
#http://www.w3.org/TR/vocab-dcat/#Class:_Dataset
def getTitle(dataset):
return accessDataset(dataset, DCT.title)
def getDescription(dataset):
return accessDataset(dataset, DCT.description)
def getCreationDate(dataset):
return accessDataset(dataset, DCT.issued)
def getOrganization(dataset):
for dcat_el in getattr(dataset,'dcat',[]):
#TODO there is also a FOAF.Ogranisation
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from rdflib.namespace import RDFS, RDF
from odpw.core.licenses_mapping import LicensesOpennessMapping
from odpw.core.dataset_converter import DCAT, DCT, VCARD, FOAF
and context:
# Path: odpw/core/licenses_mapping.py
# class LicensesOpennessMapping:
#
# __licenses_list=None
#
# def __init__(self):
# if not LicensesOpennessMapping.__licenses_list:
# try:
# resp = requests.get(OPEN_DEFINITION)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# try:
# resp = requests.get(GITHUB)
# if resp.status_code != requests.codes.ok:
# raise Exception("(%s) Cannot get OpenDefinition licenses.", OPEN_DEFINITION)
# self.licenses_list = resp.json()
# except Exception as e:
# with open(LOCAL_FILE, 'r') as f:
# self.licenses_list = json.load(f)
# LicensesOpennessMapping.__licenses_list= self.licenses_list
# else:
# self.licenses_list = LicensesOpennessMapping.__licenses_list
#
# def map_license(self, title, lid, url):
# res_id = None
#
# # at first check if ID is matching
# if lid:
# if lid in ID_MAPPING_TO_OPEN_DEF and ID_MAPPING_TO_OPEN_DEF[lid]:
# common_id = ID_MAPPING_TO_OPEN_DEF[lid]
# else:
# common_id = lid
#
# if common_id in self.licenses_list:
# res_id = common_id
#
# # check if title is ID or if title is matching
# if not res_id and title:
# if title in TITLE_MAPPING_TO_OPEN_DEF and TITLE_MAPPING_TO_OPEN_DEF[title]:
# common_id = TITLE_MAPPING_TO_OPEN_DEF[title]
# else:
# common_id = title
#
# if common_id in self.licenses_list:
# res_id = common_id
# else:
# for l in self.licenses_list:
# if self.licenses_list[l].get('title') == common_id:
# res_id = l
# break
#
# # check if any url is matching
# if not res_id and url:
# for l in self.licenses_list:
# if self.licenses_list[l].get('url') == url:
# res_id = l
# break
#
# # assign any possible ID if not already found
# if not res_id:
# if lid:
# res_id = lid
# else:
# res_id = title if title else url
#
# # return a tuple (ID, od_conformance)
# if res_id in self.licenses_list:
# return res_id, self.licenses_list[res_id].get('od_conformance', 'not found')
# return res_id, 'not found'
#
# def is_open(self, id):
# return 'approved' == self.get_od_conformance(id)
#
# def get_od_conformance(self, id):
# if id in self.licenses_list:
# return self.licenses_list[id].get('od_conformance', 'not found')
# return 'not found'
#
# Path: odpw/core/dataset_converter.py
# DCAT = Namespace("http://www.w3.org/ns/dcat#")
#
# DCT = Namespace("http://purl.org/dc/terms/")
#
# VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
#
# FOAF = Namespace("http://xmlns.com/foaf/0.1/")
which might include code, classes, or functions. Output only the next line. | if str(FOAF.Organization) in dcat_el.get('@type',[]): |
Continue the code snippet: <|code_start|>
class DBClient(object):
def __init__(self, dbm=None, Session=None):
if dbm is not None:
self.dbm=dbm
self.Session = scoped_session(dbm.session_factory)
elif Session:
self.Session
<|code_end|>
. Use current file imports:
import pandas as pd
from sqlalchemy import and_
from sqlalchemy import func, exists
from sqlalchemy.orm import scoped_session
from odpw.core.db import row2dict
from odpw.core.model import PortalSnapshotQuality, ResourceCrawlLog, ResourceHistory
from odpw.utils.plots import qa
from odpw.core.model import DatasetData, DatasetQuality, Dataset, Base, Portal, PortalSnapshotQuality, PortalSnapshot, \
tab_datasets, tab_resourcesinfo, ResourceInfo, MetaResource, PortalSnapshotDynamicity
from contextlib import contextmanager
and context (classes, functions, or code) from other files:
# Path: odpw/core/db.py
# def row2dict(r):
# if hasattr(r, '_fields'):
# d = {}
# for field in r._fields:
# rf = r.__getattribute__(field)
# if isinstance(rf, Base):
# d.update(_row2dict(rf))
# else:
# d[field] = rf
# return d
# if isinstance(r, Base):
# return _row2dict(r)
#
# Path: odpw/core/model.py
# class PortalSnapshotQuality(Base):
# __tablename__ = tab_portalsnapshotquality
#
# portalid = Column(String, ForeignKey(tab_portals+'.id'), primary_key=True, index=True,nullable=False)
# snapshot= Column( SmallInteger, primary_key=True)
# portal = relationship("Portal", back_populates="snapshotsquality")
#
# cocu = Column(Float)
# cocuN = Column(Integer)
# coce = Column(Float)
# coceN = Column(Integer)
# coda = Column(Float)
# codaN = Column(Integer)
# cofo = Column(Float)
# cofoN = Column(Integer)
# coli = Column(Float)
# coliN = Column(Integer)
# coac = Column(Float)
# coacN = Column(Integer)
# exda = Column(Float)
# exdaN = Column(Integer)
# exri = Column(Float)
# exriN = Column(Integer)
# expr = Column(Float)
# exprN = Column(Integer)
# exac = Column(Float)
# exacN = Column(Integer)
# exdi = Column(Float)
# exdiN = Column(Integer)
# exte = Column(Float)
# exteN = Column(Integer)
# exsp = Column(Float)
# exspN = Column(Integer)
# exco = Column(Float)
# excoN = Column(Integer)
# opfo = Column(Float)
# opfoN = Column(Integer)
# opma = Column(Float)
# opmaN = Column(Integer)
# opli = Column(Float)
# opliN = Column(Integer)
# datasets=Column(Integer)
#
# def __repr__(self):
# return "<PortalSnapshotQuality(id=%s, snapshot=%s, agg=%s)>" % (
# self.portalid, self.snapshot, any([self.exda,self.coac,self.coce,self.cocu]))
#
# class ResourceCrawlLog(Base):
# __tablename__ = tab_resourcescrawllog
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP, primary_key=True)
# status=Column(SmallInteger, index=True)
#
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
# crawltime=Column(BigInteger)
#
# referrer=Column( String)
# disklocation=Column( String)
# digest=Column( String)
# contentchanged=Column( Integer)
# domain=Column( String, index=True)
#
# class ResourceHistory(Base):
# __tablename__ = tab_resourceshistory
#
# uri = Column(String, primary_key=True)
# snapshot = Column(SmallInteger, primary_key=True)
# md5 = Column(String,ForeignKey(DatasetData.md5), primary_key=True)
# modified = Column(TIMESTAMP)
# source = Column(String, primary_key=True)
#
# Path: odpw/utils/plots.py
# def hm():
# def getFetchProcessChart(db, snapshot, n=3):
# def getData(db, snapshot, n=3):
# def fetchProcessChart(data,cnts):
# def portalsScatter(df):
# def get_dataset(df, name):
# def qualityChart(df):
# def rad(mic):
# def evolSize(source,df):
# def evolutionCharts(df):
# def getWeekStringTick():
# def systemEvolutionBarPlot(df, yLabel, values):
# def systemEvolutionPlot(df):
# def portalDynamicity(df):
# def getWeekString(yearweek):
# LINE_ARGS = dict(color=c, line_color=None)
#
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
. Output only the next line. | Base.query = self.Session.query_property() |
Given the following code snippet before the placeholder: <|code_start|># head, tail = os.path.split(pname)
# head= head.replace("/data/datamonitor","")
#
# url= 'http://csvengine.ai.wu.ac.at/'+os.path.join(head,urllib.quote(tail))
# print url
# return redirect(url, code=302)
#
# except Exception as e:
# raise e
#===============================================================================
def get_data(pname):
head, tail = os.path.split(pname)
head = head.replace("/data/datamonitor", "")
filename = os.path.join(head, tail)
if not os.path.isfile(filename):
current_app.logger.error("File %s does not exists", filename)
return send_file(filename, as_attachment=True)
@ns.route('/data/<path:url>')
@ns.route('/memento/<path:url>')
@ns.doc(params={'url': 'URL (HTTP URL)'})
class GetDataWithoutDate(Resource):
def get(self, url):
session = current_app.config['dbsession']
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
import functools
from flask import after_this_request
from odpw.core.model import ResourceCrawlLog
from flask import request
from flask import send_file
from flask import url_for
from flask_restplus import Resource
from odpw.web_rest.rest.odpw_restapi import api
from odpw.utils.datamonitor_utils import parseDate
from flask import Blueprint, current_app, render_template
from flask import jsonify,Response
from datetime import datetime
and context including class names, function names, and sometimes code from other files:
# Path: odpw/core/model.py
# class ResourceCrawlLog(Base):
# __tablename__ = tab_resourcescrawllog
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP, primary_key=True)
# status=Column(SmallInteger, index=True)
#
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
# crawltime=Column(BigInteger)
#
# referrer=Column( String)
# disklocation=Column( String)
# digest=Column( String)
# contentchanged=Column( Integer)
# domain=Column( String, index=True)
#
# Path: odpw/web_rest/rest/odpw_restapi.py
# def default_error_handler(error):
# def database_not_found_error_handler(e):
#
# Path: odpw/utils/datamonitor_utils.py
# def parseDate(date):
# date_parts = []
# # YYYY<MM|DD|HH|MM|SS>
# if len(date) < 4:
# # error
# pass
# else:
# date_parts_length = [4, 2, 2, 2, 2, 2]
# date_parts = []
# for l in date_parts_length:
# if len(date) >= l:
# date_parts.append(int(date[0:l]))
# date = date[l:]
#
# if len(date_parts) == 0:
# return None
# elif len(date_parts) == 1:
# date_parts.append(1)
# date_parts.append(1)
# elif len(date_parts) == 2:
# date_parts.append(1)
#
# return datetime.datetime(*map(int, date_parts))
. Output only the next line. | res = session.query(ResourceCrawlLog.disklocation).filter(ResourceCrawlLog.uri == url).order_by(ResourceCrawlLog.timestamp.desc()).first() |
Here is a snippet: <|code_start|>'''
Created on Jan 3, 2016
@author: jumbrich
'''
log = logging.getLogger(__name__)
<|code_end|>
. Write the next line using the current file imports:
import logging
import os
import functools
from flask import after_this_request
from odpw.core.model import ResourceCrawlLog
from flask import request
from flask import send_file
from flask import url_for
from flask_restplus import Resource
from odpw.web_rest.rest.odpw_restapi import api
from odpw.utils.datamonitor_utils import parseDate
from flask import Blueprint, current_app, render_template
from flask import jsonify,Response
from datetime import datetime
and context from other files:
# Path: odpw/core/model.py
# class ResourceCrawlLog(Base):
# __tablename__ = tab_resourcescrawllog
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP, primary_key=True)
# status=Column(SmallInteger, index=True)
#
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
# crawltime=Column(BigInteger)
#
# referrer=Column( String)
# disklocation=Column( String)
# digest=Column( String)
# contentchanged=Column( Integer)
# domain=Column( String, index=True)
#
# Path: odpw/web_rest/rest/odpw_restapi.py
# def default_error_handler(error):
# def database_not_found_error_handler(e):
#
# Path: odpw/utils/datamonitor_utils.py
# def parseDate(date):
# date_parts = []
# # YYYY<MM|DD|HH|MM|SS>
# if len(date) < 4:
# # error
# pass
# else:
# date_parts_length = [4, 2, 2, 2, 2, 2]
# date_parts = []
# for l in date_parts_length:
# if len(date) >= l:
# date_parts.append(int(date[0:l]))
# date = date[l:]
#
# if len(date_parts) == 0:
# return None
# elif len(date_parts) == 1:
# date_parts.append(1)
# date_parts.append(1)
# elif len(date_parts) == 2:
# date_parts.append(1)
#
# return datetime.datetime(*map(int, date_parts))
, which may include functions, classes, or code. Output only the next line. | ns = api.namespace('data', description='Operations related to blog categories') |
Continue the code snippet: <|code_start|>
filename = os.path.join(head, tail)
if not os.path.isfile(filename):
current_app.logger.error("File %s does not exists", filename)
return send_file(filename, as_attachment=True)
@ns.route('/data/<path:url>')
@ns.route('/memento/<path:url>')
@ns.doc(params={'url': 'URL (HTTP URL)'})
class GetDataWithoutDate(Resource):
def get(self, url):
session = current_app.config['dbsession']
res = session.query(ResourceCrawlLog.disklocation).filter(ResourceCrawlLog.uri == url).order_by(ResourceCrawlLog.timestamp.desc()).first()
if res:
pname = res[0]
return get_data(pname)
else:
resp = jsonify({'error': 'no archived version available'})
return resp
@ns.route('/data/<date>/<path:url>')
@ns.route('/memento/<date>/<path:url>')
@ns.doc(params={'url': 'URL (HTTP URL)', 'date':'Date as \"YYYY<MM|DD|HH|MM|SS>\"'})
class GetData(Resource):
def get(self, date, url):
<|code_end|>
. Use current file imports:
import logging
import os
import functools
from flask import after_this_request
from odpw.core.model import ResourceCrawlLog
from flask import request
from flask import send_file
from flask import url_for
from flask_restplus import Resource
from odpw.web_rest.rest.odpw_restapi import api
from odpw.utils.datamonitor_utils import parseDate
from flask import Blueprint, current_app, render_template
from flask import jsonify,Response
from datetime import datetime
and context (classes, functions, or code) from other files:
# Path: odpw/core/model.py
# class ResourceCrawlLog(Base):
# __tablename__ = tab_resourcescrawllog
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP, primary_key=True)
# status=Column(SmallInteger, index=True)
#
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
# crawltime=Column(BigInteger)
#
# referrer=Column( String)
# disklocation=Column( String)
# digest=Column( String)
# contentchanged=Column( Integer)
# domain=Column( String, index=True)
#
# Path: odpw/web_rest/rest/odpw_restapi.py
# def default_error_handler(error):
# def database_not_found_error_handler(e):
#
# Path: odpw/utils/datamonitor_utils.py
# def parseDate(date):
# date_parts = []
# # YYYY<MM|DD|HH|MM|SS>
# if len(date) < 4:
# # error
# pass
# else:
# date_parts_length = [4, 2, 2, 2, 2, 2]
# date_parts = []
# for l in date_parts_length:
# if len(date) >= l:
# date_parts.append(int(date[0:l]))
# date = date[l:]
#
# if len(date_parts) == 0:
# return None
# elif len(date_parts) == 1:
# date_parts.append(1)
# date_parts.append(1)
# elif len(date_parts) == 2:
# date_parts.append(1)
#
# return datetime.datetime(*map(int, date_parts))
. Output only the next line. | d = parseDate(date) |
Given snippet: <|code_start|>
log =structlog.get_logger()
def resourceMigrate(snapshot, db, dbm):
iter=Resource.iter(dbm.getResources(snapshot=snapshot))
batch=[]
for R in iter:
uri=R.url
uri=uri.replace("http:// \thttp:","http:")
uri=uri.replace("http:// http://","http://")
r={
'snapshot':R.snapshot
,'uri':uri
,'timestamp':R.timestamp
,'status':R.status
,'exc':R.exception
,'header':R.header
,'mime':R.mime
,'size':R.size
}
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import structlog
from odpw.core.model import ResourceInfo
from odpw.utils.timer import Timer
from odpw.db.models import Resource
and context:
# Path: odpw/core/model.py
# class ResourceInfo(Base):
# __tablename__ = tab_resourcesinfo
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP)
# status=Column(SmallInteger)
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
which might include code, classes, or functions. Output only the next line. | RI=ResourceInfo(**r) |
Given the code snippet: <|code_start|> "apiuri": fields.String(required=True, description='API URI of the portal'),
"id": fields.String(required=True, description='Internal Portal id'),
"software": fields.String(required=True, description='Software powering the portal'),
})
portalstats = ns.inherit('PortalStats', portal, {
"snapshot_count": fields.Integer(required=True, description='Software powering the portal'),
"last_snapshot": fields.Integer(required=True, description='Software powering the portal'),
"datasetCount": fields.Integer(required=True, description='Software powering the portal'),
"resourceCount": fields.Integer(required=True, description='Software powering the portal'),
})
@ns.route('/list')
class Portals(Resource):
#@ns.expect(pagination_arguments)
#@api.marshal_with(portal, as_list=True)
@ns.doc(description='Get a list of all portals (including the internal portal ID)')
def get(self):
"""
Returns list of portals.
"""
#args = pagination_arguments.parse_args(request)
#page = args.get('page', 1)
#per_page = args.get('per_page', 10)
session=current_app.config['dbsession']
<|code_end|>
, generate the next line using the imports in this file:
import json
import logging
from flask import current_app, Response, jsonify
from odpw.web_rest.rest.odpw_restapi import api
from odpw.web_rest.rest.helpers import toJSON, toCSV
from odpw.core.db import row2dict
from odpw.core.model import Portal, PortalSnapshotQuality, PortalSnapshot, ResourceInfo, Base
from odpw.web_rest.cache import cache
from flask_restplus import Resource
from flask import request
from flask_restplus import Resource
from flask_restplus import reqparse
from flask_restplus import fields
and context (functions, classes, or occasionally code) from other files:
# Path: odpw/web_rest/rest/odpw_restapi.py
# def default_error_handler(error):
# def database_not_found_error_handler(e):
#
# Path: odpw/web_rest/rest/helpers.py
# def toJSON(func):
# @wraps(func)
# def decorated_function(*args, **kwargs):
# results= func(*args, **kwargs)
# return Response(json.dumps(results), mimetype='application/json')
#
# return decorated_function
#
# def toCSV(func):
# @wraps(func)
# def decorated_function(*args, **kwargs):
# results= func(*args, **kwargs)
#
# keys = results[0].keys()
#
# si = StringIO.StringIO()
# cw = csv.DictWriter(si,keys)
# cw.writeheader()
# cw.writerows(results)
#
# output = make_response(si.getvalue())
# #output.headers["Content-Disposition"] = "attachment; filename=portals.csv"
# output.headers["Content-type"] = "text/csv"
# return output
#
# return decorated_function
#
# Path: odpw/core/db.py
# def row2dict(r):
# if hasattr(r, '_fields'):
# d = {}
# for field in r._fields:
# rf = r.__getattribute__(field)
# if isinstance(rf, Base):
# d.update(_row2dict(rf))
# else:
# d[field] = rf
# return d
# if isinstance(r, Base):
# return _row2dict(r)
#
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
#
# Path: odpw/web_rest/cache.py
. Output only the next line. | data= [row2dict(i) for i in session.query(Portal).all()] |
Here is a snippet: <|code_start|> "apiuri": fields.String(required=True, description='API URI of the portal'),
"id": fields.String(required=True, description='Internal Portal id'),
"software": fields.String(required=True, description='Software powering the portal'),
})
portalstats = ns.inherit('PortalStats', portal, {
"snapshot_count": fields.Integer(required=True, description='Software powering the portal'),
"last_snapshot": fields.Integer(required=True, description='Software powering the portal'),
"datasetCount": fields.Integer(required=True, description='Software powering the portal'),
"resourceCount": fields.Integer(required=True, description='Software powering the portal'),
})
@ns.route('/list')
class Portals(Resource):
#@ns.expect(pagination_arguments)
#@api.marshal_with(portal, as_list=True)
@ns.doc(description='Get a list of all portals (including the internal portal ID)')
def get(self):
"""
Returns list of portals.
"""
#args = pagination_arguments.parse_args(request)
#page = args.get('page', 1)
#per_page = args.get('per_page', 10)
session=current_app.config['dbsession']
<|code_end|>
. Write the next line using the current file imports:
import json
import logging
from flask import current_app, Response, jsonify
from odpw.web_rest.rest.odpw_restapi import api
from odpw.web_rest.rest.helpers import toJSON, toCSV
from odpw.core.db import row2dict
from odpw.core.model import Portal, PortalSnapshotQuality, PortalSnapshot, ResourceInfo, Base
from odpw.web_rest.cache import cache
from flask_restplus import Resource
from flask import request
from flask_restplus import Resource
from flask_restplus import reqparse
from flask_restplus import fields
and context from other files:
# Path: odpw/web_rest/rest/odpw_restapi.py
# def default_error_handler(error):
# def database_not_found_error_handler(e):
#
# Path: odpw/web_rest/rest/helpers.py
# def toJSON(func):
# @wraps(func)
# def decorated_function(*args, **kwargs):
# results= func(*args, **kwargs)
# return Response(json.dumps(results), mimetype='application/json')
#
# return decorated_function
#
# def toCSV(func):
# @wraps(func)
# def decorated_function(*args, **kwargs):
# results= func(*args, **kwargs)
#
# keys = results[0].keys()
#
# si = StringIO.StringIO()
# cw = csv.DictWriter(si,keys)
# cw.writeheader()
# cw.writerows(results)
#
# output = make_response(si.getvalue())
# #output.headers["Content-Disposition"] = "attachment; filename=portals.csv"
# output.headers["Content-type"] = "text/csv"
# return output
#
# return decorated_function
#
# Path: odpw/core/db.py
# def row2dict(r):
# if hasattr(r, '_fields'):
# d = {}
# for field in r._fields:
# rf = r.__getattribute__(field)
# if isinstance(rf, Base):
# d.update(_row2dict(rf))
# else:
# d[field] = rf
# return d
# if isinstance(r, Base):
# return _row2dict(r)
#
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
#
# Path: odpw/web_rest/cache.py
, which may include functions, classes, or code. Output only the next line. | data= [row2dict(i) for i in session.query(Portal).all()] |
Here is a snippet: <|code_start|>
@ns.doc(description='Get all portals and additional information such as last snapshot and number of datasets')
def get(self):
"""
Returns list of portals with additon stats.
"""
#args = pagination_arguments.parse_args(request)
#page = args.get('page', 1)
#per_page = args.get('per_page', 10)
session=current_app.config['dbsession']
data=[row2dict(r) for r in session.query(Portal, Portal.snapshot_count, Portal.first_snapshot, Portal.last_snapshot, Portal.datasetcount, Portal.resourcecount)]
return Response(json.dumps(data),
mimetype='application/json')
@ns.route('/quality/<int:snapshot>')
class PortalsQuality(Resource):
#@ns.expect(pagination_arguments)
#@api.marshal_with(portalsquality, as_list=True)
@ns.doc('get_PortalsQuality')
def get(self, snapshot):
"""
get list of portals with their quality assessment metrics for the specified snapshot
"""
#args = pagination_arguments.parse_args(request)
#page = args.get('page', 1)
#per_page = args.get('per_page', 10)
session=current_app.config['dbsession']
<|code_end|>
. Write the next line using the current file imports:
import json
import logging
from flask import current_app, Response, jsonify
from odpw.web_rest.rest.odpw_restapi import api
from odpw.web_rest.rest.helpers import toJSON, toCSV
from odpw.core.db import row2dict
from odpw.core.model import Portal, PortalSnapshotQuality, PortalSnapshot, ResourceInfo, Base
from odpw.web_rest.cache import cache
from flask_restplus import Resource
from flask import request
from flask_restplus import Resource
from flask_restplus import reqparse
from flask_restplus import fields
and context from other files:
# Path: odpw/web_rest/rest/odpw_restapi.py
# def default_error_handler(error):
# def database_not_found_error_handler(e):
#
# Path: odpw/web_rest/rest/helpers.py
# def toJSON(func):
# @wraps(func)
# def decorated_function(*args, **kwargs):
# results= func(*args, **kwargs)
# return Response(json.dumps(results), mimetype='application/json')
#
# return decorated_function
#
# def toCSV(func):
# @wraps(func)
# def decorated_function(*args, **kwargs):
# results= func(*args, **kwargs)
#
# keys = results[0].keys()
#
# si = StringIO.StringIO()
# cw = csv.DictWriter(si,keys)
# cw.writeheader()
# cw.writerows(results)
#
# output = make_response(si.getvalue())
# #output.headers["Content-Disposition"] = "attachment; filename=portals.csv"
# output.headers["Content-type"] = "text/csv"
# return output
#
# return decorated_function
#
# Path: odpw/core/db.py
# def row2dict(r):
# if hasattr(r, '_fields'):
# d = {}
# for field in r._fields:
# rf = r.__getattribute__(field)
# if isinstance(rf, Base):
# d.update(_row2dict(rf))
# else:
# d[field] = rf
# return d
# if isinstance(r, Base):
# return _row2dict(r)
#
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
#
# Path: odpw/web_rest/cache.py
, which may include functions, classes, or code. Output only the next line. | data=[row2dict(r) for r in session.query(Portal, Portal.datasetcount, Portal.resourcecount).join(PortalSnapshotQuality).filter(PortalSnapshotQuality.snapshot==snapshot).add_entity(PortalSnapshotQuality)] |
Continue the code snippet: <|code_start|>
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk['name']:
continue
fks.append(
ForeignKeyConstraint((), (), name=fk['name'])
)
t = Table(table_name, metadata, *fks)
tbs.append(t)
all_fks.extend(fks)
for fkc in all_fks:
conn.execute(DropConstraint(fkc, cascade=True))
for table in tbs:
if table.name != tab_datasets and table.name != tab_resourcesinfo:
conn.execute(DropTable(table))
trans.commit()
def tableExists(self,Variable_tableName):
return self.engine.has_table(self.engine, Variable_tableName)
def viewExists(self,Variable_tableName):
print 'check if table {} exists'.format(Variable_tableName)
return self.engine.connect().execute()
def init(self, Base):
<|code_end|>
. Use current file imports:
from collections import defaultdict
from sqlalchemy import create_engine, inspect
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import (
MetaData,
Table,
DropTable,
ForeignKeyConstraint,
DDL
)
from sqlalchemy.sql.ddl import DropConstraint
from odpw.core.model import Dataset, Base, tab_datasets, tab_resourcesinfo, ResourceInfo, tab_resourcescrawllog
import os
import structlog
import time
and context (classes, functions, or code) from other files:
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
. Output only the next line. | event.listen(Dataset.__table__, 'after_create', self.dataset_insert_function) |
Next line prediction: <|code_start|> instance = inspect(obj)
for key, x in instance.attrs.items():
result[key].append(x.value)
return result
# _row2dict = lambda r: {c.name: str(getattr(r, c.name)) for c in r.__table__.columns}
def _row2dict(r):
data = {}
for c in r.__table__.columns:
att = getattr(r, c.name)
if hasattr(att, 'encode'):
at = att.encode('utf-8')
else:
at = att
data[c.name] = att
# if type(att) in [dict, list]:
# else:
# data[c.name] = str(att)
return data
def row2dict(r):
if hasattr(r, '_fields'):
d = {}
for field in r._fields:
rf = r.__getattribute__(field)
<|code_end|>
. Use current file imports:
(from collections import defaultdict
from sqlalchemy import create_engine, inspect
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import (
MetaData,
Table,
DropTable,
ForeignKeyConstraint,
DDL
)
from sqlalchemy.sql.ddl import DropConstraint
from odpw.core.model import Dataset, Base, tab_datasets, tab_resourcesinfo, ResourceInfo, tab_resourcescrawllog
import os
import structlog
import time)
and context including class names, function names, or small code snippets from other files:
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
. Output only the next line. | if isinstance(rf, Base): |
Predict the next line after this snippet: <|code_start|> if port:
conn_string += ":" + str(port)
conn_string += "/" + db
log.info("Connecting DB")
self.engine = create_engine(conn_string, pool_size=20, client_encoding='utf8', echo=debug)
add_engine_pidguard(self.engine)
#add_query_time_logging(self.engine)
# register_after_fork(self.engine, self.engine.dispose)
log.info("Connected DB")
# self.engine.connect()
self.session_factory = sessionmaker(bind=self.engine) # , expire_on_commit=False
# self.session = self.Session()
self.dataset_insert_function = DDL(
"""
CREATE OR REPLACE FUNCTION dataset_insert_function()
RETURNS TRIGGER AS $$
DECLARE
_snapshot smallint;
_table_name text;
BEGIN
_snapshot := NEW.snapshot;
<|code_end|>
using the current file's imports:
from collections import defaultdict
from sqlalchemy import create_engine, inspect
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import (
MetaData,
Table,
DropTable,
ForeignKeyConstraint,
DDL
)
from sqlalchemy.sql.ddl import DropConstraint
from odpw.core.model import Dataset, Base, tab_datasets, tab_resourcesinfo, ResourceInfo, tab_resourcescrawllog
import os
import structlog
import time
and any relevant context from other files:
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
. Output only the next line. | _table_name := '""" + tab_datasets + """_' || _snapshot; |
Based on the snippet: <|code_start|>
EXECUTE
'INSERT INTO '
|| quote_ident(_table_name)
|| ' VALUES ($1.*)'
USING NEW;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
""")
self.dataset_insert_trigger = DDL(
"""
CREATE TRIGGER dataset_insert_trigger
BEFORE INSERT ON """ + tab_datasets + """
FOR EACH ROW EXECUTE PROCEDURE dataset_insert_function();
"""
)
self.resourcesinfo_insert_function = DDL(
"""
CREATE OR REPLACE FUNCTION resourcesinfo_insert_function()
RETURNS TRIGGER AS $$
DECLARE
_snapshot smallint;
_table_name text;
BEGIN
_snapshot := NEW.snapshot;
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import defaultdict
from sqlalchemy import create_engine, inspect
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import (
MetaData,
Table,
DropTable,
ForeignKeyConstraint,
DDL
)
from sqlalchemy.sql.ddl import DropConstraint
from odpw.core.model import Dataset, Base, tab_datasets, tab_resourcesinfo, ResourceInfo, tab_resourcescrawllog
import os
import structlog
import time
and context (classes, functions, sometimes code) from other files:
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
. Output only the next line. | _table_name := '""" + tab_resourcesinfo + """_' || _snapshot; |
Using the snippet: <|code_start|> fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk['name']:
continue
fks.append(
ForeignKeyConstraint((), (), name=fk['name'])
)
t = Table(table_name, metadata, *fks)
tbs.append(t)
all_fks.extend(fks)
for fkc in all_fks:
conn.execute(DropConstraint(fkc, cascade=True))
for table in tbs:
if table.name != tab_datasets and table.name != tab_resourcesinfo:
conn.execute(DropTable(table))
trans.commit()
def tableExists(self,Variable_tableName):
return self.engine.has_table(self.engine, Variable_tableName)
def viewExists(self,Variable_tableName):
print 'check if table {} exists'.format(Variable_tableName)
return self.engine.connect().execute()
def init(self, Base):
event.listen(Dataset.__table__, 'after_create', self.dataset_insert_function)
event.listen(Dataset.__table__, 'after_create', self.dataset_insert_trigger)
<|code_end|>
, determine the next line of code. You have imports:
from collections import defaultdict
from sqlalchemy import create_engine, inspect
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import (
MetaData,
Table,
DropTable,
ForeignKeyConstraint,
DDL
)
from sqlalchemy.sql.ddl import DropConstraint
from odpw.core.model import Dataset, Base, tab_datasets, tab_resourcesinfo, ResourceInfo, tab_resourcescrawllog
import os
import structlog
import time
and context (class names, function names, or code) available:
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
. Output only the next line. | event.listen(ResourceInfo.__table__, 'after_create', self.resourcesinfo_insert_function) |
Predict the next line for this snippet: <|code_start|> || quote_ident(_table_name)
|| ' VALUES ($1.*)'
USING NEW;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
""")
self.resourcesinfo_insert_trigger = DDL(
"""
CREATE TRIGGER resourcesinfo_insert_trigger
BEFORE INSERT ON """ + tab_resourcesinfo + """
FOR EACH ROW EXECUTE PROCEDURE resourcesinfo_insert_function();
"""
)
self.resourcescrawllog_insert_function = DDL(
"""
CREATE OR REPLACE FUNCTION resourcescrawllog_insert_function()
RETURNS TRIGGER AS $$
DECLARE
_snapshot smallint;
_table_name text;
BEGIN
_snapshot := NEW.snapshot;
<|code_end|>
with the help of current file imports:
from collections import defaultdict
from sqlalchemy import create_engine, inspect
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy.engine import reflection
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import (
MetaData,
Table,
DropTable,
ForeignKeyConstraint,
DDL
)
from sqlalchemy.sql.ddl import DropConstraint
from odpw.core.model import Dataset, Base, tab_datasets, tab_resourcesinfo, ResourceInfo, tab_resourcescrawllog
import os
import structlog
import time
and context from other files:
# Path: odpw/core/model.py
# class Portal(Base):
# class PortalSnapshot(Base):
# class Serializable(object):
# class PortalSnapshotDynamicity(Base,Serializable):
# class PortalSnapshotQuality(Base):
# class Dataset(Base):
# class DatasetData(Base):
# class DatasetQuality(Base):
# class MetaResource(Base):
# class ResourceInfo(Base):
# class ResourceCrawlLog(Base):
# class ResourceHistory(Base):
# class ResourceFreshness(Base):
# class FormatDist(Base):
# def snapshot_count(self):
# def snapshot_count(cls):
# def first_snapshot(self):
# def first_snapshot(cls):
# def last_snapshot(self):
# def last_snapshot(cls):
# def datasetcount(self):
# def datasetcount(cls):
# def resourcecount(self):
# def resourcecount(cls):
# def __repr__(self):
# def fetchtime(self):
# def __repr__(self):
# def to_dict(self):
# def dyratio(self):
# def adddelratio(self):
# def addRatio(self):
# def delRatio(self):
# def updatedRatio(self):
# def staticRatio(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def __repr__(self):
# def info(cls):
# def __repr__(self):
, which may contain function names, class names, or code. Output only the next line. | _table_name := '""" + tab_resourcescrawllog + """_' || _snapshot; |
Predict the next line after this snippet: <|code_start|>
# https://www.w3.org/2015/spatial/wiki/ISO_19115_-_DCAT_-_Schema.org_mapping
def resp_party(g, doc, orga):
if isinstance(orga, URIRef) and '@id' not in doc:
doc['@id'] = str(orga)
for p, o in g.predicate_objects(orga):
if 'name' not in doc:
if p == FOAF.name:
doc['name'] = str(o)
elif p == VCARD.fn:
doc['name'] = str(o)
elif p == VCARD['organization-name']:
doc['name'] = str(o)
if 'email' not in doc:
if p == FOAF.mbox:
doc['email'] = str(o)
elif p == VCARD.hasEmail:
doc['email'] = str(o)
if 'url' not in doc:
if p == FOAF.homepage:
doc['url'] = str(o)
elif p == VCARD.hasURL:
doc['url'] = str(o)
return doc
def convert(portal, data):
g = rdflib.Graph()
# write dcat dataset into graph
<|code_end|>
using the current file's imports:
import rdflib
import json
from rdflib import URIRef, BNode, Literal
from rdflib.namespace import Namespace, RDF, RDFS, DCTERMS, XSD
from odpw.core import dataset_converter
and any relevant context from other files:
# Path: odpw/core/dataset_converter.py
# DCT = Namespace("http://purl.org/dc/terms/")
# DCAT = Namespace("http://www.w3.org/ns/dcat#")
# ADMS = Namespace("http://www.w3.org/ns/adms#")
# VCARD = Namespace("http://www.w3.org/2006/vcard/ns#")
# FOAF = Namespace("http://xmlns.com/foaf/0.1/")
# SCHEMA = Namespace('http://schema.org/')
# TIME = Namespace('http://www.w3.org/2006/time')
# LOCN = Namespace('http://www.w3.org/ns/locn#')
# GSP = Namespace('http://www.opengis.net/ont/geosparql#')
# OWL = Namespace('http://www.w3.org/2002/07/owl#')
# ADEQUATE = Namespace('https://www.adequate.at/ns#')
# GEOJSON_IMT = 'https://www.iana.org/assignments/media-types/application/vnd.geo+json'
# VALID_URL = re.compile(
# r'^(?:http|ftp)s?://' # http:// or https://
# r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
# r'localhost|' #localhost...
# r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
# r'(?::\d+)?' # optional port
# r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# URI = get_compiled_pattern('^%(URI)s$')
# def add_dcat_to_graph(dataset_dict, portal, graph, portal_uri):
# def dict_to_dcat(dataset_dict, portal, graph=None, format='json-ld', portal_uri=None):
# def fix_socrata_graph(g, dataset_dict, portal_url):
# def is_valid_url(references):
# def is_valid_uri(references):
# def graph_from_opendatasoft(g, dataset_dict, portal_url):
# def graph_from_data_gouv_fr(g, dataset_dict, portal_url):
# def __init__(self, graph, portal_base_url):
# def graph_from_ckan(self, dataset_dict):
# def _get_dataset_value(self, dataset_dict, key, default=None):
# def publisher_uri_from_dataset_dict(self, dataset_dict):
# def catalog_uri(self):
# def resource_uri(self, resource_dict, dataset_id):
# def dataset_uri(self, dataset_dict):
# def _add_triples_from_dict(graph, _dict, subject, items,
# list_value=False,
# date_value=False):
# def _add_date_triples_from_dict(graph, _dict, subject, items):
# def _add_list_triples_from_dict(graph, _dict, subject, items):
# def _add_triple_from_dict(graph, _dict, subject, predicate, key,
# fallbacks=None,
# list_value=False,
# date_value=False):
# def _get_dict_value(_dict, key, default=None):
# def _add_list_triple(graph, subject, predicate, value):
# def _add_date_triple(graph, subject, predicate, value):
# class CKANConverter:
. Output only the next line. | dataset_converter.dict_to_dcat(data, portal, graph=g) |
Continue the code snippet: <|code_start|> if 'domain' not in request.meta:
domain=''
try:
parsed_uri = urlparse( response.url )
domain = '{uri.netloc}'.format(uri=parsed_uri)
except:
domain='error'
request.meta['domain']=domain
#create folder and file
domain=request.meta['domain']
#compute digest and filesize
digest=hashlib.md5(content).hexdigest()
request.meta['digest']=digest
request.meta['size']=sys.getsizeof(content)
#check if digest exists?, if yes, get file location and file size
last_digest= spider.api.getLastDigest(uri=response.url)
request.meta['contentchanged']=0 if last_digest and last_digest[0]==digest else 1
try:
filename=request.meta['git']
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename,'wb') as fw:
try:
fw.write(content)
except Exception as e:
<|code_end|>
. Use current file imports:
import datetime
import gzip
import hashlib
import os
import time
import urllib
import structlog
import sys
import twisted
import scrapy
from urlparse import urlparse
from scrapy.http import Response
from odpw.utils.error_handling import getExceptionString, ErrorHandler
from odpw.core.model import ResourceInfo, ResourceCrawlLog
and context (classes, functions, or code) from other files:
# Path: odpw/utils/error_handling.py
# def getExceptionString(e):
# try:
# if isinstance(e,ckanapi.errors.CKANAPIError):
# try:
# err = literal_eval(e.extra_msg)
# return str(type(e))+":"+str(err[2])
# except Exception:
# return str(type(e))+":"+str(e.extra_msg)
# else:
# if e.message:
# return str(type(e))+":"+str(e.message)
# if e.message:
# return str(type(e))+":"
# except Exception as e:
# log.error("Get Exception string", exctype=type(e), excmsg=e.message,exc_info=True)
# return 601
#
# class ErrorHandler():
#
# exceptions=defaultdict(long)
#
# DEBUG=False
#
# @classmethod
# def handleError(cls, log, msg=None, exception=None, debug=False, **kwargs):
# name=type(exception).__name__
# cls.exceptions[name] +=1
#
# if debug:
# print(traceback.format_exc())
#
# log.error(msg, exctype=type(exception), excmsg=exception.message, **kwargs)
#
# @classmethod
# def printStats(cls):
# print '>>>','--*'*10,'EXCEPTIONS','*--'*10
# if len(cls.exceptions)==0:
# print "No exceptions handled"
# else:
# print " Numbers of Exceptions:"
# for exc, count in cls.exceptions.iteritems():
# print " ",exc, count
# print '<<<','--*'*25
#
# Path: odpw/core/model.py
# class ResourceInfo(Base):
# __tablename__ = tab_resourcesinfo
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP)
# status=Column(SmallInteger)
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
#
# class ResourceCrawlLog(Base):
# __tablename__ = tab_resourcescrawllog
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP, primary_key=True)
# status=Column(SmallInteger, index=True)
#
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
# crawltime=Column(BigInteger)
#
# referrer=Column( String)
# disklocation=Column( String)
# digest=Column( String)
# contentchanged=Column( Integer)
# domain=Column( String, index=True)
. Output only the next line. | request.meta['error']=getExceptionString(e) |
Here is a snippet: <|code_start|>
log = structlog.get_logger()
error_classes=[
(KeyError,601,None)
,(scrapy.exceptions.IgnoreRequest,602,'Robots.txt')
,(scrapy.exceptions.NotSupported,603)
,(twisted.internet.error.ConnectError,604)
,(twisted.internet.error.ConnectionRefusedError,605)
,(twisted.internet.error.DNSLookupError,606)
,(twisted.internet.error.TCPTimedOutError,607)
,(twisted.internet.error.TimeoutError,608)
]
class ErrorHandling(object):
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
<|code_end|>
. Write the next line using the current file imports:
import datetime
import gzip
import hashlib
import os
import time
import urllib
import structlog
import sys
import twisted
import scrapy
from urlparse import urlparse
from scrapy.http import Response
from odpw.utils.error_handling import getExceptionString, ErrorHandler
from odpw.core.model import ResourceInfo, ResourceCrawlLog
and context from other files:
# Path: odpw/utils/error_handling.py
# def getExceptionString(e):
# try:
# if isinstance(e,ckanapi.errors.CKANAPIError):
# try:
# err = literal_eval(e.extra_msg)
# return str(type(e))+":"+str(err[2])
# except Exception:
# return str(type(e))+":"+str(e.extra_msg)
# else:
# if e.message:
# return str(type(e))+":"+str(e.message)
# if e.message:
# return str(type(e))+":"
# except Exception as e:
# log.error("Get Exception string", exctype=type(e), excmsg=e.message,exc_info=True)
# return 601
#
# class ErrorHandler():
#
# exceptions=defaultdict(long)
#
# DEBUG=False
#
# @classmethod
# def handleError(cls, log, msg=None, exception=None, debug=False, **kwargs):
# name=type(exception).__name__
# cls.exceptions[name] +=1
#
# if debug:
# print(traceback.format_exc())
#
# log.error(msg, exctype=type(exception), excmsg=exception.message, **kwargs)
#
# @classmethod
# def printStats(cls):
# print '>>>','--*'*10,'EXCEPTIONS','*--'*10
# if len(cls.exceptions)==0:
# print "No exceptions handled"
# else:
# print " Numbers of Exceptions:"
# for exc, count in cls.exceptions.iteritems():
# print " ",exc, count
# print '<<<','--*'*25
#
# Path: odpw/core/model.py
# class ResourceInfo(Base):
# __tablename__ = tab_resourcesinfo
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP)
# status=Column(SmallInteger)
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
#
# class ResourceCrawlLog(Base):
# __tablename__ = tab_resourcescrawllog
#
# uri= Column(String, primary_key=True)
# snapshot= Column(SmallInteger, primary_key=True)
# timestamp= Column(TIMESTAMP, primary_key=True)
# status=Column(SmallInteger, index=True)
#
# exc=Column(String)
# header=Column(JSONB)
# mime=Column(String)
# size=Column(BigInteger)
# crawltime=Column(BigInteger)
#
# referrer=Column( String)
# disklocation=Column( String)
# digest=Column( String)
# contentchanged=Column( Integer)
# domain=Column( String, index=True)
, which may include functions, classes, or code. Output only the next line. | ErrorHandler.handleError(log, 'process_exception',exception=exception, uri=request.url) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.