Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line for this snippet: <|code_start|> correct=float(100 * test.getRunResults().correct)/total
wrong=float(100 * test.getRunResults().wrong)/total
not_mapped=float(100 * (test.getRunResults().not_mapped + test.getRunResults().not_found))/total
else:
correct=0
wrong=0
not_mapped=0
maptime=test.getRunResults().maptime
if maptime != 0:
throughput=total/float(test.getRunResults().maptime)
else:
throughput=-1
if errors == 0:
html += "<td>%.3f%%</td>" % correct
html += "<td>%.3f%%</td>" % not_mapped
html += "<td>%d</td>" % throughput
csv+="%s,%s,%.4f,%.4f,%d\n" % (test.getMapper().getTitle(),test.getMapper().param_string,correct,not_mapped,throughput)
else:
html += "<td></td>"
html += "<td></td>"
html += "<td></td>"
csv+="%s,%s,-,-,-\n" % (test.getMapper().getTitle(),test.getMapper().param_string)
html += "</tr>"
csv_filename = self.writeCSV("overview",csv)
html += "</table>"
<|code_end|>
with the help of current file imports:
from lib import util
import json
import json
import json
import json
and context from other files:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
, which may contain function names, class names, or code. Output only the next line. | html += util.makeExportDropdown("",csv_filename) |
Predict the next line after this snippet: <|code_start|>
elif measure == "corrects":
try:
value = test.getRunResults().correct / float(test.getRunResults().maptime)
except ZeroDivisionError:
value = 0
elif measure == "precision":
value = test.getRunResults().precision
elif measure == "recall":
value = test.getRunResults().recall
elif measure == "fmeasure":
value = test.getRunResults().fmeasure
value = round(value, 4)
column.append(value)
if len(column):
data.append(column)
if min != 0:
min_str = ",min: %f" % min
else:
min_str = ""
titles = []
for name in self.mate.getTestNameList():
tests = self.mate.getTestsByName(name)
if len(tests) != 0 and tests[0]._("base") == "tests_base/base_mapping":
titles.append(tests[0].getTitle())
# titles=["0%","75%","90%","95%","99%"]
<|code_end|>
using the current file's imports:
import time
import os
import sys
import math
import yaml
import json
import report_plot
from lib import page
from lib import util
and any relevant context from other files:
# Path: lib/page.py
# class Page:
# def __init__(self):
# def addSection(self, title, content, footer=None, description=None, container=True):
# def addSectionFront(self, title, content, footer=None, description=None, container=True):
# def addNav(self, nav_list, active_title=""):
# def enableNavSeparators(self,enable):
# def addScript(self, script):
# def addStyle(self, style):
# def setNavRight(self, nav):
# def setSidebarFooter(self, html):
# def enableFullscreenSections(self):
# def enableSidebar(self,enable):
# def setFooter(self,footer):
# def html(self):
#
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
. Output only the next line. | page.addSection("Results: %s" % title_section, """<div id="plot_%s"></div>%s""" % (measure,util.makeExportDropdown("plot_%s"%measure,""))) |
Predict the next line for this snippet: <|code_start|>
def map(self):
self.enterWorkingDirectory()
mapper = self.getMapper()
mapper.onMapPre()
try:
reads_size = 0
for readfile in self._("input:reads"):
reads_size = reads_size + os.path.getsize(readfile)
except:
self.error("Error accessing read / reference input files for size - do they exist / are accessible?",
str(self._("input:reads")), None)
raise SystemExit
if os.path.isfile(self._("output:testee_path")):
self.error("Output file existed before mapper run", self._("output:testee_path"))
raise SystemExit
if os.path.isfile(self._("output:sorted_testee_path")):
self.error("Output file existed before mapper run", self._("output:sorted_testee_path"))
raise SystemExit
cmd_pre = mapper.getCommandLinePre()
self.dbg("Command(pre): " + cmd_pre)
self.sub(cmd_pre)
mapper_path = list(self.getMapper().getBinaryPath())[0]
<|code_end|>
with the help of current file imports:
import math
import os
import yaml
from lib import util
and context from other files:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
, which may contain function names, class names, or code. Output only the next line. | base_runtime_file = self.mate.getCachePathPrefix() + util.md5(mapper_path + self._("input:reference")) + "_initt.yaml" |
Given the following code snippet before the placeholder: <|code_start|>
def init(self):
mapper = self.getMapper()
if self._("output:bam"):
self.setc("output:extension", ".bam")
else:
self.setc("output:extension", ".sam")
self.setc("output:testee_path", self._("output:mapping_prefix") + mapper.getName() + self._("output:extension"))
self.setc("output:sorted_testee_path", "sorted_" + self._("output:testee_path"))
self.setc("input:sorted_mapping_comparison", "sorted_" + self._("input:mapping_comparison"))
if self.mate._("evaluation:pos_threshold") != None:
self.setc("evaluation:pos_threshold", self.mate._("evaluation:pos_threshold"))
# Translate basic IO configuration into mapper parameters
mapper.setInReferenceFile(self._("input:reference"))
mapper.setInReadFiles(self._("input:reads"))
mapper.setInPaired(self._("input:reads_paired_end"))
mapper.setOutMappingFile(self._("output:testee_path"))
mapper.addParams(self._("params"))
# TODO: Fix
if self._("output:bam"):
mapper.addParams({"ngm": {"b": ""}})
self.enterWorkingDirectory()
<|code_end|>
, predict the next line using imports from the current file:
from lib import util
from lib import stats
and context including class names, function names, and sometimes code from other files:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
#
# Path: lib/stats.py
# class ReferenceMappingStatistics:
# class MappingRow:
# def __init__(self):
# def to_string(self):
# def to_csv(self):
# def computeMeasures(self):
# def diff(self, other):
# def __init__(self, alignment=None):
. Output only the next line. | self.dbg("Ref: " + util.abs_path(self._("input:reference"))) |
Here is a snippet: <|code_start|> if self._("output:bam"):
self.setc("output:extension", ".bam")
else:
self.setc("output:extension", ".sam")
self.setc("output:testee_path", self._("output:mapping_prefix") + mapper.getName() + self._("output:extension"))
self.setc("output:sorted_testee_path", "sorted_" + self._("output:testee_path"))
self.setc("input:sorted_mapping_comparison", "sorted_" + self._("input:mapping_comparison"))
if self.mate._("evaluation:pos_threshold") != None:
self.setc("evaluation:pos_threshold", self.mate._("evaluation:pos_threshold"))
# Translate basic IO configuration into mapper parameters
mapper.setInReferenceFile(self._("input:reference"))
mapper.setInReadFiles(self._("input:reads"))
mapper.setInPaired(self._("input:reads_paired_end"))
mapper.setOutMappingFile(self._("output:testee_path"))
mapper.addParams(self._("params"))
# TODO: Fix
if self._("output:bam"):
mapper.addParams({"ngm": {"b": ""}})
self.enterWorkingDirectory()
self.dbg("Ref: " + util.abs_path(self._("input:reference")))
self.dbg("Reads: " + util.abs_path(self._("input:reads")[0]))
self.dbg("Output:" + util.abs_path(self._("output:testee_path")))
self.restoreWorkingDirectory()
<|code_end|>
. Write the next line using the current file imports:
from lib import util
from lib import stats
and context from other files:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
#
# Path: lib/stats.py
# class ReferenceMappingStatistics:
# class MappingRow:
# def __init__(self):
# def to_string(self):
# def to_csv(self):
# def computeMeasures(self):
# def diff(self, other):
# def __init__(self, alignment=None):
, which may include functions, classes, or code. Output only the next line. | dummy_stats = stats.ReferenceMappingStatistics() |
Given snippet: <|code_start|> self.stats.total += 1
self.doCompareRows(sam_test.getCurr())
for i in range(254, -1, -1):
self.stats.mapq_cumulated[i]["correct"] += self.stats.mapq_cumulated[i + 1]["correct"]
self.stats.mapq_cumulated[i]["wrong"] += self.stats.mapq_cumulated[i + 1]["wrong"]
self.stats.computeMeasures()
def doCompareRows(self, row_testee):
if row_testee.is_unmapped:
self.stats.not_mapped += 1
#self.export_read("fail",row_testee,None,"unmapped")
else:
self.stats.correct += 1
self.stats.mapq_cumulated[row_testee.mapq]["correct"] += 1
#self.export_read("pass",row_testee)
class ThresholdBasedEvaluator(Evaluator):
def compute(self):
sam_test = SAMFile(self.testee_filename)
sam_comp = SAMFile(self.comparison_filename)
dont_advance_test = False
warned_testee_end = False
warned_comp_end = False
while sam_comp.next():
self.stats.total += 1
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from stats import *
from sam import *
from lib import util
import sys
and context:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
which might include code, classes, or functions. Output only the next line. | edit_distance = len(util.parseMD(sam_comp.getCurr().getTag("MD"),sam_comp.getCurr().seq)) |
Next line prediction: <|code_start|>
def get(self):
page = Page()
page.enableFullscreenSections()
self.addSectionDataSource(page, "real")
self.addSectionSelectData(page)
self.addSectionEvaluation(page)
self.addSectionAdvanced(page)
page.setNavRight("""<a href="/" class="btn btn-warning" role="button">Back to Teaser Home</a>""")
self.write(page.html())
class RedirectJob(tornado.web.RequestHandler):
def get(self, jobid):
try:
with open("reports/" + jobid + "/index.html", "r") as h:
if h.read().strip() == "INITIALIZING":
raise
else:
self.redirect("/reports/" + jobid + "/")
except:
self.write("""<meta http-equiv="refresh" content="1">Please wait...""")
class SubmitJob(tornado.web.RequestHandler):
def post(self):
<|code_end|>
. Use current file imports:
(import tornado.ioloop
import tornado.web
import tornado.wsgi
import tornado.options
import yaml
import sys
import os
import time
import subprocess
import shutil
import sys
import datetime
import logging
from lib import util
from lib import page
from lib import util
)
and context including class names, function names, or small code snippets from other files:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
#
# Path: lib/page.py
# class Page:
# def __init__(self):
# def addSection(self, title, content, footer=None, description=None, container=True):
# def addSectionFront(self, title, content, footer=None, description=None, container=True):
# def addNav(self, nav_list, active_title=""):
# def enableNavSeparators(self,enable):
# def addScript(self, script):
# def addStyle(self, style):
# def setNavRight(self, nav):
# def setSidebarFooter(self, html):
# def enableFullscreenSections(self):
# def enableSidebar(self,enable):
# def setFooter(self,footer):
# def html(self):
#
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
. Output only the next line. | self.job_id = util.md5(str(int(time.time())) + self.get_argument('reference', ''))
|
Next line prediction: <|code_start|> not_mapped=float(100 * (test.getRunResults().not_mapped + test.getRunResults().not_found))/total
else:
correct=0
wrong=0
not_mapped=0
maptime=test.getRunResults().maptime
if maptime != 0:
throughput=total/float(test.getRunResults().maptime)
else:
throughput=-1
if errors == 0:
html += "<td>%.3f%%</td>" % correct
html += "<td>%.3f%%</td>" % wrong
html += "<td>%.3f%%</td>" % not_mapped
html += "<td>%d</td>" % throughput
csv+="%s,%s,%.4f,%.4f,%.4f,%d\n" % (test.getMapper().getTitle(),test.getMapper().param_string,correct,wrong,not_mapped,throughput)
else:
html += "<td></td>"
html += "<td></td>"
html += "<td></td>"
html += "<td></td>"
csv+="%s,%s,-,-,-,-\n" % (test.getMapper().getTitle(),test.getMapper().param_string)
html += "</tr>"
csv_filename = self.writeCSV("overview",csv)
html += "</table>"
<|code_end|>
. Use current file imports:
(from lib import util
import json
import json
import json
import json
import json
import math
import json
import json
import json
import json)
and context including class names, function names, or small code snippets from other files:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
. Output only the next line. | html += util.makeExportDropdown("",csv_filename) |
Here is a snippet: <|code_start|>
def sort_prepare(self):
self.enterWorkingDirectory()
self.dbg("Sorting...")
<|code_end|>
. Write the next line using the current file imports:
from lib import util
import os
and context from other files:
# Path: lib/util.py
# STATUS_NORMAL=1
# STATUS_MAX_MEMORY_EXCEEDED=2
# STATUS_MAX_RUNTIME_EXCEEDED=3
# MAX_LEN_OUT = 10000
# def runAndMeasure(command,detailed=True,maxtime=0,maxmem=0):
# def runSimple(command):
# def runAndMeasureInternal(return_queue,command,maxtime=0,maxmem=0):
# def runInternal(control_queue,command,max_memory):
# def measureProcess(queue, initial_pids, command, measurement_interval=1, max_runtime=0, max_memory=0, debug=False):
# def extendTargets(targets, proc):
# def killTargets(targets):
# def updateTargets(targets, metrics):
# def updateMetrics(target, metrics):
# def loadConfig(name, parent_dir="", already_included=[]):
# def setCallDir(d):
# def setRootDir(d):
# def enterCallDir():
# def enterRootDir():
# def getRootDir():
# def nl2br(text):
# def msg(text="", level=1):
# def yes_no(b):
# def md5(text):
# def merge(x, y):
# def formatFilesize(n):
# def percent(val, base, offset=0):
# def abs_path(path):
# def get_sam_header_line_count(filename):
# def is_sam_sorted(filename):
# def sort_sam(filename,threads=1):
# def sort_sam_picard(filename,threads=-1):
# def line_count(filename):
# def sanitize_string(s):
# def makeExportDropdown(plot_id,csv_filename):
# def parseMD(md,seq):
, which may include functions, classes, or code. Output only the next line. | sorted_testee_filename = util.sort_sam(self._("output:testee_path"),int(self.mate._("threads"))) |
Next line prediction: <|code_start|> def test_publish_linuxdoc(self):
c = self.config
c.publish = True
self.add_new('Frobnitz-Linuxdoc-HOWTO', example.ex_linuxdoc)
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
self.assertEqual(1, len(inv.all.keys()))
docs = inv.all.values()
c.skip = []
exitcode = tldp.driver.publish(c, docs)
self.assertEqual(exitcode, os.EX_OK)
doc = docs.pop(0)
self.assertTrue(doc.output.iscomplete)
def test_publish_docbooksgml(self):
self.add_docbooksgml_support_to_config()
c = self.config
c.publish = True
self.add_new('Frobnitz-DocBookSGML-HOWTO', example.ex_docbooksgml)
inv = tldp.inventory.Inventory(c.pubdir, c.sourcedir)
self.assertEqual(1, len(inv.all.keys()))
docs = inv.all.values()
exitcode = tldp.driver.publish(c, docs)
self.assertEqual(exitcode, os.EX_OK)
doc = docs.pop(0)
self.assertTrue(doc.output.iscomplete)
def test_publish_docbooksgml_larger(self):
self.add_docbooksgml_support_to_config()
c = self.config
c.publish = True
<|code_end|>
. Use current file imports:
(import os
import example
import tldp.driver
from tldptesttools import TestInventoryBase
from tldp.sources import SourceDocument)
and context including class names, function names, or small code snippets from other files:
# Path: tldp/sources.py
# class SourceDocument(object):
# '''a class providing a container for each set of source documents
# '''
# def __repr__(self):
# return '<%s:%s (%s)>' % \
# (self.__class__.__name__, self.filename, self.doctype)
#
# def __init__(self, filename):
# '''construct a SourceDocument
#
# filename is a required parameter
#
# The filename is the main (and sometimes sole) document representing
# the source of the LDP HOWTO or Guide. It is the document that is
# passed by name to be handled by any document processing toolchains
# (see also tldp.doctypes).
#
# Each instantiation will raise an IOERror if the supplied filename does
# not exist or if the filename isn't a file (symlink is fine, directory
# or fifo is not).
#
# The remainder of the instantiation will set attributes that are useful
# later in the processing phase, for example, stem, status, enclosing
# directory name and file extension.
#
# There are two important attributes. First, the document type guesser
# will try to infer the doctype (from file extension and signature).
# Note that it is not a fatal error if document type cannot be guessed,
# but the document will not be able to be processed. Second, it is
# useful during the decision-making process to know if any of the source
# files are newer than the output files. Thus, the stat() information
# for every file in the source document directory (or just the single
# source document file) will be collected.
# '''
# self.filename = os.path.abspath(filename)
#
# if not os.path.exists(self.filename):
# fn = self.filename
# logger.critical("Missing source document: %s", fn)
# raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
#
# if os.path.isdir(self.filename):
# self.filename = sourcedoc_fromdir(self.filename)
# elif os.path.isfile(self.filename):
# pass
# else:
# # -- we did not receive a useable document file or directory name
# self.filename = None
#
# if self.filename is None:
# fn = filename
# logger.critical("Source document is not a plain file: %s", fn)
# raise ValueError(fn + " not identifiable as a document")
#
# self.doctype = guess(self.filename)
# self.status = 'source'
# self.output = None
# self.working = None
# self.differing = set()
# self.dirname, self.basename = os.path.split(self.filename)
# self.stem, self.ext = stem_and_ext(self.basename)
# parentbase = os.path.basename(self.dirname)
# logger.debug("%s found source %s", self.stem, self.filename)
# if parentbase == self.stem:
# parentdir = os.path.dirname(self.dirname)
# self.md5sums = md5files(self.dirname, relative=parentdir)
# else:
# self.md5sums = md5files(self.filename, relative=self.dirname)
#
# def detail(self, widths, verbose, file=sys.stdout):
# '''produce a small tabular output about the document'''
# template = ' '.join(('{s.status:{w.status}}',
# '{s.doctype.__name__:{w.doctype}}',
# '{s.stem:{w.stem}}'))
# outstr = template.format(s=self, w=widths)
# print(outstr, file=file)
# if verbose:
# print(' doctype {}'.format(self.doctype), file=file)
# if self.output:
# print(' output dir {}'.format(self.output.dirname),
# file=file)
# print(' source file {}'.format(self.filename), file=file)
# for why, f in sorted(self.differing):
# fname = os.path.join(self.dirname, f)
# print(' {:>7} source {}'.format(why, fname), file=file)
# if self.output:
# for f in sorted(self.output.missing):
# print(' missing output {}'.format(f), file=file)
. Output only the next line. | doc = SourceDocument(example.ex_docbooksgml_dir.filename) |
Continue the code snippet: <|code_start|> - 'new': a source document without any matching output stem
- 'published': a pair of source/output documents with matching stems
- 'orphan': an output document without any matching source stem
- 'broken': a published document with missing output files
- 'stale': a published document with new(er) source files
The Inventory object is intended to be used to identify work that needs to
be done on individual source documents to produce up-to-date output
documents.
'''
def __repr__(self):
return '<%s: %d published, %d orphan, %d new, %d stale, %d broken>' % (
self.__class__.__name__,
len(self.published),
len(self.orphan),
len(self.new),
len(self.stale),
len(self.broken),)
def __init__(self, pubdir, sourcedirs):
'''construct an Inventory
pubdir: path to the OutputCollection
sourcedirs: a list of directories which could be passed to the
SourceCollection object; essentially a directory containing
SourceDocuments; for example LDP/LDP/howto/linuxdoc and
LDP/LDP/guide/docbook
'''
self.output = OutputCollection(pubdir)
<|code_end|>
. Use current file imports:
import copy
import logging
from collections import OrderedDict
from tldp.sources import SourceCollection
from tldp.outputs import OutputCollection
and context (classes, functions, or code) from other files:
# Path: tldp/sources.py
# class SourceCollection(LDPDocumentCollection):
# '''a dict-like container for SourceDocument objects
#
# The key in the SourceCollection is the stem name of the document, which
# allows convenient access and guarantees non-collision.
#
# The use of the stem as a key works conveniently with the
# OutputCollection which uses the same strategy on OutputDirectory.
# '''
# def __init__(self, dirnames=None):
# '''construct a SourceCollection
#
# delegates most responsibility to function scansourcedirs
# '''
# if dirnames is None:
# return
# self.update(scansourcedirs(dirnames))
#
# Path: tldp/outputs.py
# class OutputCollection(LDPDocumentCollection):
# '''a dict-like container for OutputDirectory objects
#
# The key of an OutputCollection is the stem name of the document, which
# allows convenient access and guaranteed non-collision.
#
# The use of the stem as a key works conveniently with the
# SourceCollection which uses the same strategy on SourceDocuments.
# '''
# def __init__(self, dirname=None):
# '''construct an OutputCollection
#
# If dirname is not supplied, OutputCollection is basically, a dict().
# If dirname is supplied, then OutputCollection scans the filesystem for
# subdirectories of dirname and creates an OutputDirectory for each
# subdir. Each subdir name is used as the stem (or key) for holding the
# OutputDirectory in the OutputCollection.
#
# For example, consider the following directory tree:
#
# en
# βββ Latvian-HOWTO
# βββ Scanner-HOWTO
# βββ UUCP-HOWTO
# βββ Wireless-HOWTO
#
# If called like OutputCollection("en"), the result in memory would be
# a structure resembling this:
#
# OutputCollection("/path/en") = {
# "Latvian-HOWTO": OutputDirectory("/path/en/Latvian-HOWTO")
# "Scanner-HOWTO": OutputDirectory("/path/en/Scanner-HOWTO")
# "UUCP-HOWTO": OutputDirectory("/path/en/UUCP-HOWTO")
# "Wireless-HOWTO": OutputDirectory("/path/en/Wireless-HOWTO")
# }
#
# '''
# if dirname is None:
# return
# elif not os.path.isdir(dirname):
# logger.critical("Output collection dir %s must already exist.",
# dirname)
# raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), dirname)
# for fname in sorted(os.listdir(dirname), key=lambda x: x.lower()):
# name = os.path.join(dirname, fname)
# if not os.path.isdir(name):
# logger.info("Skipping non-directory %s (in %s)", name, dirname)
# continue
# logger.debug("Found directory %s (in %s)", name, dirname)
# o = OutputDirectory(name)
# assert o.stem not in self
# self[o.stem] = o
. Output only the next line. | self.source = SourceCollection(sourcedirs) |
Predict the next line for this snippet: <|code_start|> - 'output': an output document before any status detection
- 'new': a source document without any matching output stem
- 'published': a pair of source/output documents with matching stems
- 'orphan': an output document without any matching source stem
- 'broken': a published document with missing output files
- 'stale': a published document with new(er) source files
The Inventory object is intended to be used to identify work that needs to
be done on individual source documents to produce up-to-date output
documents.
'''
def __repr__(self):
return '<%s: %d published, %d orphan, %d new, %d stale, %d broken>' % (
self.__class__.__name__,
len(self.published),
len(self.orphan),
len(self.new),
len(self.stale),
len(self.broken),)
def __init__(self, pubdir, sourcedirs):
'''construct an Inventory
pubdir: path to the OutputCollection
sourcedirs: a list of directories which could be passed to the
SourceCollection object; essentially a directory containing
SourceDocuments; for example LDP/LDP/howto/linuxdoc and
LDP/LDP/guide/docbook
'''
<|code_end|>
with the help of current file imports:
import copy
import logging
from collections import OrderedDict
from tldp.sources import SourceCollection
from tldp.outputs import OutputCollection
and context from other files:
# Path: tldp/sources.py
# class SourceCollection(LDPDocumentCollection):
# '''a dict-like container for SourceDocument objects
#
# The key in the SourceCollection is the stem name of the document, which
# allows convenient access and guarantees non-collision.
#
# The use of the stem as a key works conveniently with the
# OutputCollection which uses the same strategy on OutputDirectory.
# '''
# def __init__(self, dirnames=None):
# '''construct a SourceCollection
#
# delegates most responsibility to function scansourcedirs
# '''
# if dirnames is None:
# return
# self.update(scansourcedirs(dirnames))
#
# Path: tldp/outputs.py
# class OutputCollection(LDPDocumentCollection):
# '''a dict-like container for OutputDirectory objects
#
# The key of an OutputCollection is the stem name of the document, which
# allows convenient access and guaranteed non-collision.
#
# The use of the stem as a key works conveniently with the
# SourceCollection which uses the same strategy on SourceDocuments.
# '''
# def __init__(self, dirname=None):
# '''construct an OutputCollection
#
# If dirname is not supplied, OutputCollection is basically, a dict().
# If dirname is supplied, then OutputCollection scans the filesystem for
# subdirectories of dirname and creates an OutputDirectory for each
# subdir. Each subdir name is used as the stem (or key) for holding the
# OutputDirectory in the OutputCollection.
#
# For example, consider the following directory tree:
#
# en
# βββ Latvian-HOWTO
# βββ Scanner-HOWTO
# βββ UUCP-HOWTO
# βββ Wireless-HOWTO
#
# If called like OutputCollection("en"), the result in memory would be
# a structure resembling this:
#
# OutputCollection("/path/en") = {
# "Latvian-HOWTO": OutputDirectory("/path/en/Latvian-HOWTO")
# "Scanner-HOWTO": OutputDirectory("/path/en/Scanner-HOWTO")
# "UUCP-HOWTO": OutputDirectory("/path/en/UUCP-HOWTO")
# "Wireless-HOWTO": OutputDirectory("/path/en/Wireless-HOWTO")
# }
#
# '''
# if dirname is None:
# return
# elif not os.path.isdir(dirname):
# logger.critical("Output collection dir %s must already exist.",
# dirname)
# raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), dirname)
# for fname in sorted(os.listdir(dirname), key=lambda x: x.lower()):
# name = os.path.join(dirname, fname)
# if not os.path.isdir(name):
# logger.info("Skipping non-directory %s (in %s)", name, dirname)
# continue
# logger.debug("Found directory %s (in %s)", name, dirname)
# o = OutputDirectory(name)
# assert o.stem not in self
# self[o.stem] = o
, which may contain function names, class names, or code. Output only the next line. | self.output = OutputCollection(pubdir) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf8 -*-
#
# Copyright (c) 2016 Linux Documentation Project
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
# -- SUT
class TestConfigWorks(unittest.TestCase):
def test_basic(self):
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from argparse import Namespace
from tldp.config import collectconfiguration
and context including class names, function names, and sometimes code from other files:
# Path: tldp/config.py
# def collectconfiguration(tag, argv):
# '''main specification of command-line (and config file) shape'''
#
# ap = DefaultFreeArgumentParser()
# ap.add_argument('--sourcedir', '--source-dir', '--source-directory',
# '-s',
# default=[], action=DirectoriesExist,
# help='a directory containing LDP source documents')
#
# ap.add_argument('--pubdir', '--output', '--outputdir', '--outdir',
# '-o',
# default=None, action=DirectoryExists,
# help='a directory containing LDP output documents')
#
# ap.add_argument('--builddir', '--build-dir', '--build-directory',
# '-d',
# default=None, action=DirectoryExists,
# help='a scratch directory used for building')
#
# ap.add_argument('--configfile', '--config-file', '--cfg',
# '-c',
# default=DEFAULT_CONFIGFILE,
# type=arg_isreadablefile,
# help='a configuration file')
#
# ap.add_argument('--loglevel',
# default=logging.ERROR, type=arg_isloglevel,
# help='set the loglevel')
#
# ap.add_argument('--verbose',
# action=StoreTrueOrNargBool, nargs='?', default=False,
# help='more info in --list/--detail [%(default)s]')
#
# ap.add_argument('--skip',
# default=[], action='append', type=str,
# help='skip this stem during processing')
#
# ap.add_argument('--resources',
# default=['images', 'resources'], action='append', type=str,
# help='subdirs to copy during build [%(default)s]')
#
# # -- and the distinct, mutually exclusive actions this script can perform
# #
# g = ap.add_mutually_exclusive_group()
# g.add_argument('--publish',
# '-p',
# action='store_true', default=False,
# help='build and publish LDP documentation [%(default)s]')
#
# g.add_argument('--build',
# '-b',
# action='store_true', default=False,
# help='build LDP documentation [%(default)s]')
#
# g.add_argument('--script',
# '-S',
# action='store_true', default=False,
# help='dump runnable script [%(default)s]')
#
# g.add_argument('--detail', '--list',
# '-l',
# action='store_true', default=False,
# help='list elements of LDP system [%(default)s]')
#
# g.add_argument('--summary',
# '-t',
# action='store_true', default=False,
# help='dump inventory summary report [%(default)s]')
#
# g.add_argument('--doctypes', '--formats', '--format',
# '--list-doctypes', '--list-formats',
# '-T',
# action='store_true', default=False,
# help='show supported doctypes [%(default)s]')
#
# g.add_argument('--statustypes', '--list-statustypes',
# action='store_true', default=False,
# help='show status types and classes [%(default)s]')
#
# # -- collect up the distributed configuration fragments
# #
# for cls in tldp.typeguesser.knowndoctypes:
# argparse_method = getattr(cls, 'argparse', None)
# if argparse_method:
# argparse_method(ap)
#
# cc = CascadingConfig(tag, ap, argv)
# config, args = cc.parse()
# return config, args
. Output only the next line. | config, args = collectconfiguration('tag', []) |
Given the following code snippet before the placeholder: <|code_start|> file = kwargs.get('file', sys.stdout)
s = script.format(output=output, source=source, config=config)
print('', file=file)
print(s, file=file)
return True
@logtimings(logger.debug)
def execute_shellscript(self, script, preamble=preamble,
postamble=postamble, **kwargs):
source = self.source
output = self.output
config = self.config
logdir = output.logdir
prefix = source.doctype.__name__ + '-'
s = script.format(output=output, source=source, config=config)
tf = ntf(dir=logdir, prefix=prefix, suffix='.sh', delete=False)
tf.close()
with codecs.open(tf.name, 'w', encoding='utf-8') as f:
if preamble:
f.write(preamble)
f.write(s)
if postamble:
f.write(postamble)
mode = stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
os.chmod(tf.name, mode)
cmd = [tf.name]
<|code_end|>
, predict the next line using imports from the current file:
import os
import sys
import stat
import time
import errno
import codecs
import shutil
import logging
import inspect
import networkx as nx
from tempfile import NamedTemporaryFile as ntf
from functools import wraps
from tldp.utils import execute, logtimings, writemd5sums
and context including class names, function names, and sometimes code from other files:
# Path: tldp/utils.py
# def execute(cmd, stdin=None, stdout=None, stderr=None,
# logdir=None, env=os.environ):
# '''(yet another) wrapper around subprocess.Popen()
#
# The processing tools for handling DocBook SGML, DocBook XML and Linuxdoc
# all use different conventions for writing outputs. Some write into the
# working directory. Others write to STDOUT. Others accept the output file
# as a required option.
#
# To allow for automation and flexibility, this wrapper function does what
# most other synchronous subprocess.Popen() wrappers does, but it adds a
# feature to record the STDOUT and STDERR of the executable. This is
# helpful when trying to diagnose build failures of individual documents.
#
# Required:
#
# - cmd: (list form only; the paranoid prefer shell=False)
# this must include the whole command-line
# - logdir: an existing directory in which temporary log files
# will be created
#
# Optional:
#
# - stdin: if not supplied, STDIN (FD 0) will be left as is
# - stdout: if not supplied, STDOUT (FD 1) will be connected
# to a named file in the logdir (and left for later inspection)
# - stderr: if not supplied, STDERR (FD 2) will be connected
# to a named file in the logdir (and left for later inspection)
# - env: if not supplied, just use current environment
#
# Returns: the numeric exit code of the process
#
# Side effects:
#
# * will probably create temporary files in logdir
# * function calls wait(); process execution will intentionally block
# until the child process terminates
#
# Possible exceptions:
#
# * if the first element of list cmd does not contain an executable,
# this function will raise an AssertionError
# * if logdir is not a directory, this function will raise ValueError or
# IOError
# * and, of course, any exceptions passed up from calling subprocess.Popen
#
# '''
# prefix = os.path.basename(cmd[0]) + '.' + str(os.getpid()) + '-'
#
# assert isexecutable(cmd[0])
#
# if logdir is None:
# raise ValueError("logdir must be a directory, cannot be None.")
#
# if not os.path.isdir(logdir):
# raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), logdir)
#
# # -- not remapping STDIN, because that doesn't make sense here
# mytfile = functools.partial(mkstemp, prefix=prefix, dir=logdir)
# if stdout is None:
# stdout, stdoutname = mytfile(suffix='.stdout')
# else:
# stdoutname = None
#
# if stderr is None:
# stderr, stderrname = mytfile(suffix='.stderr')
# else:
# stderrname = None
#
# logger.debug("About to execute: %r", cmd)
# proc = subprocess.Popen(cmd, shell=False, close_fds=True,
# stdin=stdin, stdout=stdout, stderr=stderr,
# env=env, preexec_fn=os.setsid)
# result = proc.wait()
# if result != 0:
# logger.error("Non-zero exit (%s) for process: %r", result, cmd)
# logger.error("Find STDOUT/STDERR in %s/%s*", logdir, prefix)
# if isinstance(stdout, int) and stdoutname:
# os.close(stdout)
# conditionallogging(result, 'STDOUT', stdoutname)
# if isinstance(stderr, int) and stderrname:
# os.close(stderr)
# conditionallogging(result, 'STDERR', stderrname)
# return result
#
# def logtimings(logmethod):
# def anon(f):
# @wraps(f)
# def timing(*args, **kwargs):
# s = time.time()
# result = f(*args, **kwargs)
# e = time.time()
# logmethod('running %s(%r, %r) took %.3f s',
# f.__name__, args, kwargs, e - s)
# return result
# return timing
# return anon
#
# def writemd5sums(fname, md5s, header=None):
# '''write an MD5SUM file from [(filename, MD5), ...]'''
# with codecs.open(fname, 'w', encoding='utf-8') as file:
# if header:
# print(header, file=file)
# for fname, hashval in sorted(md5s.items()):
# print(hashval + ' ' + fname, file=file)
. Output only the next line. | result = execute(cmd, logdir=logdir) |
Using the snippet: <|code_start|> if os.path.isdir(fullpath):
source.append('"' + fullpath + '"')
if not source:
logger.debug("%s no images or resources to copy", self.source.stem)
return True
s = 'rsync --archive --verbose %s ./' % (' '.join(source))
return self.shellscript(s, **kwargs)
def hook_build_success(self):
stem = self.output.stem
logdir = self.output.logdir
dirname = self.output.dirname
logger.info("%s build SUCCESS %s.", stem, dirname)
logger.debug("%s removing logs %s)", stem, logdir)
if os.path.isdir(logdir):
shutil.rmtree(logdir)
return True
def hook_build_failure(self):
pass
def shellscript(self, script, **kwargs):
if self.config.build:
return self.execute_shellscript(script, **kwargs)
elif self.config.script:
return self.dump_shellscript(script, **kwargs)
else:
etext = '%s in shellscript, neither --build nor --script'
raise Exception(etext % (self.source.stem,))
<|code_end|>
, determine the next line of code. You have imports:
import os
import sys
import stat
import time
import errno
import codecs
import shutil
import logging
import inspect
import networkx as nx
from tempfile import NamedTemporaryFile as ntf
from functools import wraps
from tldp.utils import execute, logtimings, writemd5sums
and context (class names, function names, or code) available:
# Path: tldp/utils.py
# def execute(cmd, stdin=None, stdout=None, stderr=None,
# logdir=None, env=os.environ):
# '''(yet another) wrapper around subprocess.Popen()
#
# The processing tools for handling DocBook SGML, DocBook XML and Linuxdoc
# all use different conventions for writing outputs. Some write into the
# working directory. Others write to STDOUT. Others accept the output file
# as a required option.
#
# To allow for automation and flexibility, this wrapper function does what
# most other synchronous subprocess.Popen() wrappers does, but it adds a
# feature to record the STDOUT and STDERR of the executable. This is
# helpful when trying to diagnose build failures of individual documents.
#
# Required:
#
# - cmd: (list form only; the paranoid prefer shell=False)
# this must include the whole command-line
# - logdir: an existing directory in which temporary log files
# will be created
#
# Optional:
#
# - stdin: if not supplied, STDIN (FD 0) will be left as is
# - stdout: if not supplied, STDOUT (FD 1) will be connected
# to a named file in the logdir (and left for later inspection)
# - stderr: if not supplied, STDERR (FD 2) will be connected
# to a named file in the logdir (and left for later inspection)
# - env: if not supplied, just use current environment
#
# Returns: the numeric exit code of the process
#
# Side effects:
#
# * will probably create temporary files in logdir
# * function calls wait(); process execution will intentionally block
# until the child process terminates
#
# Possible exceptions:
#
# * if the first element of list cmd does not contain an executable,
# this function will raise an AssertionError
# * if logdir is not a directory, this function will raise ValueError or
# IOError
# * and, of course, any exceptions passed up from calling subprocess.Popen
#
# '''
# prefix = os.path.basename(cmd[0]) + '.' + str(os.getpid()) + '-'
#
# assert isexecutable(cmd[0])
#
# if logdir is None:
# raise ValueError("logdir must be a directory, cannot be None.")
#
# if not os.path.isdir(logdir):
# raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), logdir)
#
# # -- not remapping STDIN, because that doesn't make sense here
# mytfile = functools.partial(mkstemp, prefix=prefix, dir=logdir)
# if stdout is None:
# stdout, stdoutname = mytfile(suffix='.stdout')
# else:
# stdoutname = None
#
# if stderr is None:
# stderr, stderrname = mytfile(suffix='.stderr')
# else:
# stderrname = None
#
# logger.debug("About to execute: %r", cmd)
# proc = subprocess.Popen(cmd, shell=False, close_fds=True,
# stdin=stdin, stdout=stdout, stderr=stderr,
# env=env, preexec_fn=os.setsid)
# result = proc.wait()
# if result != 0:
# logger.error("Non-zero exit (%s) for process: %r", result, cmd)
# logger.error("Find STDOUT/STDERR in %s/%s*", logdir, prefix)
# if isinstance(stdout, int) and stdoutname:
# os.close(stdout)
# conditionallogging(result, 'STDOUT', stdoutname)
# if isinstance(stderr, int) and stderrname:
# os.close(stderr)
# conditionallogging(result, 'STDERR', stderrname)
# return result
#
# def logtimings(logmethod):
# def anon(f):
# @wraps(f)
# def timing(*args, **kwargs):
# s = time.time()
# result = f(*args, **kwargs)
# e = time.time()
# logmethod('running %s(%r, %r) took %.3f s',
# f.__name__, args, kwargs, e - s)
# return result
# return timing
# return anon
#
# def writemd5sums(fname, md5s, header=None):
# '''write an MD5SUM file from [(filename, MD5), ...]'''
# with codecs.open(fname, 'w', encoding='utf-8') as file:
# if header:
# print(header, file=file)
# for fname, hashval in sorted(md5s.items()):
# print(hashval + ' ' + fname, file=file)
. Output only the next line. | @logtimings(logger.debug) |
Given the code snippet: <|code_start|> s = '''
# - - - - - {source.stem} - - - - - -
cd -- "{output.dirname}"'''
return self.shellscript(s, **kwargs)
os.chdir(self.output.dirname)
return True
def generate_md5sums(self, **kwargs):
logger.debug("%s generating MD5SUMS in %s.",
self.output.stem, self.output.dirname)
timestr = time.strftime('%F-%T', time.gmtime())
md5file = self.output.MD5SUMS
if self.config.script:
l = list()
for fname, hashval in sorted(self.source.md5sums.items()):
l.append('# {} {}'.format(hashval, fname))
md5s = '\n'.join(l)
s = '''# -- MD5SUMS file from source tree at {}
#
# md5sum > {} -- {}
#
{}
#'''
s = s.format(timestr,
md5file,
' '.join(self.source.md5sums.keys()),
md5s)
return self.shellscript(s, **kwargs)
header = '# -- MD5SUMS for {}'.format(self.source.stem)
<|code_end|>
, generate the next line using the imports in this file:
import os
import sys
import stat
import time
import errno
import codecs
import shutil
import logging
import inspect
import networkx as nx
from tempfile import NamedTemporaryFile as ntf
from functools import wraps
from tldp.utils import execute, logtimings, writemd5sums
and context (functions, classes, or occasionally code) from other files:
# Path: tldp/utils.py
# def execute(cmd, stdin=None, stdout=None, stderr=None,
# logdir=None, env=os.environ):
# '''(yet another) wrapper around subprocess.Popen()
#
# The processing tools for handling DocBook SGML, DocBook XML and Linuxdoc
# all use different conventions for writing outputs. Some write into the
# working directory. Others write to STDOUT. Others accept the output file
# as a required option.
#
# To allow for automation and flexibility, this wrapper function does what
# most other synchronous subprocess.Popen() wrappers does, but it adds a
# feature to record the STDOUT and STDERR of the executable. This is
# helpful when trying to diagnose build failures of individual documents.
#
# Required:
#
# - cmd: (list form only; the paranoid prefer shell=False)
# this must include the whole command-line
# - logdir: an existing directory in which temporary log files
# will be created
#
# Optional:
#
# - stdin: if not supplied, STDIN (FD 0) will be left as is
# - stdout: if not supplied, STDOUT (FD 1) will be connected
# to a named file in the logdir (and left for later inspection)
# - stderr: if not supplied, STDERR (FD 2) will be connected
# to a named file in the logdir (and left for later inspection)
# - env: if not supplied, just use current environment
#
# Returns: the numeric exit code of the process
#
# Side effects:
#
# * will probably create temporary files in logdir
# * function calls wait(); process execution will intentionally block
# until the child process terminates
#
# Possible exceptions:
#
# * if the first element of list cmd does not contain an executable,
# this function will raise an AssertionError
# * if logdir is not a directory, this function will raise ValueError or
# IOError
# * and, of course, any exceptions passed up from calling subprocess.Popen
#
# '''
# prefix = os.path.basename(cmd[0]) + '.' + str(os.getpid()) + '-'
#
# assert isexecutable(cmd[0])
#
# if logdir is None:
# raise ValueError("logdir must be a directory, cannot be None.")
#
# if not os.path.isdir(logdir):
# raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), logdir)
#
# # -- not remapping STDIN, because that doesn't make sense here
# mytfile = functools.partial(mkstemp, prefix=prefix, dir=logdir)
# if stdout is None:
# stdout, stdoutname = mytfile(suffix='.stdout')
# else:
# stdoutname = None
#
# if stderr is None:
# stderr, stderrname = mytfile(suffix='.stderr')
# else:
# stderrname = None
#
# logger.debug("About to execute: %r", cmd)
# proc = subprocess.Popen(cmd, shell=False, close_fds=True,
# stdin=stdin, stdout=stdout, stderr=stderr,
# env=env, preexec_fn=os.setsid)
# result = proc.wait()
# if result != 0:
# logger.error("Non-zero exit (%s) for process: %r", result, cmd)
# logger.error("Find STDOUT/STDERR in %s/%s*", logdir, prefix)
# if isinstance(stdout, int) and stdoutname:
# os.close(stdout)
# conditionallogging(result, 'STDOUT', stdoutname)
# if isinstance(stderr, int) and stderrname:
# os.close(stderr)
# conditionallogging(result, 'STDERR', stderrname)
# return result
#
# def logtimings(logmethod):
# def anon(f):
# @wraps(f)
# def timing(*args, **kwargs):
# s = time.time()
# result = f(*args, **kwargs)
# e = time.time()
# logmethod('running %s(%r, %r) took %.3f s',
# f.__name__, args, kwargs, e - s)
# return result
# return timing
# return anon
#
# def writemd5sums(fname, md5s, header=None):
# '''write an MD5SUM file from [(filename, MD5), ...]'''
# with codecs.open(fname, 'w', encoding='utf-8') as file:
# if header:
# print(header, file=file)
# for fname, hashval in sorted(md5s.items()):
# print(hashval + ' ' + fname, file=file)
. Output only the next line. | writemd5sums(md5file, self.source.md5sums, header=header) |
Given the following code snippet before the placeholder: <|code_start|> if os.path.isfile(filename):
if os.path.basename(filename) in IGNORABLE_SOURCE:
return None
return filename
elif os.path.isdir(filename):
return sourcedoc_fromdir(filename)
return None
def sourcedoc_fromdir(name):
candidates = list()
if not os.path.isdir(name):
return None
stem = os.path.basename(name)
for ext in knownextensions:
possible = os.path.join(name, stem + ext)
if os.path.isfile(possible):
candidates.append(possible)
if len(candidates) > 1:
logger.warning("%s multiple document choices in dir %s, bailing....",
stem, name)
raise Exception("multiple document choices in " + name)
elif len(candidates) == 0:
return None
else:
doc = candidates.pop()
logger.debug("%s identified main document %s.", stem, doc)
return doc
<|code_end|>
, predict the next line using imports from the current file:
import os
import sys
import errno
import logging
from tldp.ldpcollection import LDPDocumentCollection
from tldp.utils import md5files, stem_and_ext
from tldp.typeguesser import guess, knownextensions
and context including class names, function names, and sometimes code from other files:
# Path: tldp/ldpcollection.py
# class LDPDocumentCollection(collections.MutableMapping):
# '''a dict-like container for DocumentCollection objects
#
# Intended to be subclassed.
#
# Implements all the usual dictionary stuff, but also provides sorted
# lists of documents in the collection.
# '''
# def __repr__(self):
# return '<%s:(%s docs)>' % (self.__class__.__name__, len(self))
#
# def __delitem__(self, key):
# del self.__dict__[key]
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __iter__(self):
# return iter(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def iterkeys(self):
# return iter(self.keys)
#
# def itervalues(self):
# for key in sorted(self, key=lambda x: x.lower()):
# yield self[key]
#
# def iteritems(self):
# for key in self.keys:
# yield (key, self[key])
#
# def keys(self):
# return sorted(self, key=lambda x: x.lower())
#
# def items(self):
# return [(key, self[key]) for key in self.keys()]
#
# def values(self):
# return [self[key] for key in self.keys()]
#
# Path: tldp/utils.py
# def md5files(name, relative=None):
# '''get all of the MD5s for files from here downtree'''
# return fileinfo(name, relative=relative, func=md5file)
#
# def stem_and_ext(name):
# '''return (stem, ext) for any relative or absolute filename'''
# return os.path.splitext(os.path.basename(os.path.normpath(name)))
#
# Path: tldp/typeguesser.py
# def getDoctypeMembers(membertype):
# def getDoctypeClasses():
# def guess(fname):
. Output only the next line. | class SourceCollection(LDPDocumentCollection): |
Next line prediction: <|code_start|>
if not os.path.exists(self.filename):
fn = self.filename
logger.critical("Missing source document: %s", fn)
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
if os.path.isdir(self.filename):
self.filename = sourcedoc_fromdir(self.filename)
elif os.path.isfile(self.filename):
pass
else:
# -- we did not receive a useable document file or directory name
self.filename = None
if self.filename is None:
fn = filename
logger.critical("Source document is not a plain file: %s", fn)
raise ValueError(fn + " not identifiable as a document")
self.doctype = guess(self.filename)
self.status = 'source'
self.output = None
self.working = None
self.differing = set()
self.dirname, self.basename = os.path.split(self.filename)
self.stem, self.ext = stem_and_ext(self.basename)
parentbase = os.path.basename(self.dirname)
logger.debug("%s found source %s", self.stem, self.filename)
if parentbase == self.stem:
parentdir = os.path.dirname(self.dirname)
<|code_end|>
. Use current file imports:
(import os
import sys
import errno
import logging
from tldp.ldpcollection import LDPDocumentCollection
from tldp.utils import md5files, stem_and_ext
from tldp.typeguesser import guess, knownextensions)
and context including class names, function names, or small code snippets from other files:
# Path: tldp/ldpcollection.py
# class LDPDocumentCollection(collections.MutableMapping):
# '''a dict-like container for DocumentCollection objects
#
# Intended to be subclassed.
#
# Implements all the usual dictionary stuff, but also provides sorted
# lists of documents in the collection.
# '''
# def __repr__(self):
# return '<%s:(%s docs)>' % (self.__class__.__name__, len(self))
#
# def __delitem__(self, key):
# del self.__dict__[key]
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __iter__(self):
# return iter(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def iterkeys(self):
# return iter(self.keys)
#
# def itervalues(self):
# for key in sorted(self, key=lambda x: x.lower()):
# yield self[key]
#
# def iteritems(self):
# for key in self.keys:
# yield (key, self[key])
#
# def keys(self):
# return sorted(self, key=lambda x: x.lower())
#
# def items(self):
# return [(key, self[key]) for key in self.keys()]
#
# def values(self):
# return [self[key] for key in self.keys()]
#
# Path: tldp/utils.py
# def md5files(name, relative=None):
# '''get all of the MD5s for files from here downtree'''
# return fileinfo(name, relative=relative, func=md5file)
#
# def stem_and_ext(name):
# '''return (stem, ext) for any relative or absolute filename'''
# return os.path.splitext(os.path.basename(os.path.normpath(name)))
#
# Path: tldp/typeguesser.py
# def getDoctypeMembers(membertype):
# def getDoctypeClasses():
# def guess(fname):
. Output only the next line. | self.md5sums = md5files(self.dirname, relative=parentdir) |
Continue the code snippet: <|code_start|> files are newer than the output files. Thus, the stat() information
for every file in the source document directory (or just the single
source document file) will be collected.
'''
self.filename = os.path.abspath(filename)
if not os.path.exists(self.filename):
fn = self.filename
logger.critical("Missing source document: %s", fn)
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
if os.path.isdir(self.filename):
self.filename = sourcedoc_fromdir(self.filename)
elif os.path.isfile(self.filename):
pass
else:
# -- we did not receive a useable document file or directory name
self.filename = None
if self.filename is None:
fn = filename
logger.critical("Source document is not a plain file: %s", fn)
raise ValueError(fn + " not identifiable as a document")
self.doctype = guess(self.filename)
self.status = 'source'
self.output = None
self.working = None
self.differing = set()
self.dirname, self.basename = os.path.split(self.filename)
<|code_end|>
. Use current file imports:
import os
import sys
import errno
import logging
from tldp.ldpcollection import LDPDocumentCollection
from tldp.utils import md5files, stem_and_ext
from tldp.typeguesser import guess, knownextensions
and context (classes, functions, or code) from other files:
# Path: tldp/ldpcollection.py
# class LDPDocumentCollection(collections.MutableMapping):
# '''a dict-like container for DocumentCollection objects
#
# Intended to be subclassed.
#
# Implements all the usual dictionary stuff, but also provides sorted
# lists of documents in the collection.
# '''
# def __repr__(self):
# return '<%s:(%s docs)>' % (self.__class__.__name__, len(self))
#
# def __delitem__(self, key):
# del self.__dict__[key]
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __iter__(self):
# return iter(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def iterkeys(self):
# return iter(self.keys)
#
# def itervalues(self):
# for key in sorted(self, key=lambda x: x.lower()):
# yield self[key]
#
# def iteritems(self):
# for key in self.keys:
# yield (key, self[key])
#
# def keys(self):
# return sorted(self, key=lambda x: x.lower())
#
# def items(self):
# return [(key, self[key]) for key in self.keys()]
#
# def values(self):
# return [self[key] for key in self.keys()]
#
# Path: tldp/utils.py
# def md5files(name, relative=None):
# '''get all of the MD5s for files from here downtree'''
# return fileinfo(name, relative=relative, func=md5file)
#
# def stem_and_ext(name):
# '''return (stem, ext) for any relative or absolute filename'''
# return os.path.splitext(os.path.basename(os.path.normpath(name)))
#
# Path: tldp/typeguesser.py
# def getDoctypeMembers(membertype):
# def getDoctypeClasses():
# def guess(fname):
. Output only the next line. | self.stem, self.ext = stem_and_ext(self.basename) |
Using the snippet: <|code_start|>
There are two important attributes. First, the document type guesser
will try to infer the doctype (from file extension and signature).
Note that it is not a fatal error if document type cannot be guessed,
but the document will not be able to be processed. Second, it is
useful during the decision-making process to know if any of the source
files are newer than the output files. Thus, the stat() information
for every file in the source document directory (or just the single
source document file) will be collected.
'''
self.filename = os.path.abspath(filename)
if not os.path.exists(self.filename):
fn = self.filename
logger.critical("Missing source document: %s", fn)
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
if os.path.isdir(self.filename):
self.filename = sourcedoc_fromdir(self.filename)
elif os.path.isfile(self.filename):
pass
else:
# -- we did not receive a useable document file or directory name
self.filename = None
if self.filename is None:
fn = filename
logger.critical("Source document is not a plain file: %s", fn)
raise ValueError(fn + " not identifiable as a document")
<|code_end|>
, determine the next line of code. You have imports:
import os
import sys
import errno
import logging
from tldp.ldpcollection import LDPDocumentCollection
from tldp.utils import md5files, stem_and_ext
from tldp.typeguesser import guess, knownextensions
and context (class names, function names, or code) available:
# Path: tldp/ldpcollection.py
# class LDPDocumentCollection(collections.MutableMapping):
# '''a dict-like container for DocumentCollection objects
#
# Intended to be subclassed.
#
# Implements all the usual dictionary stuff, but also provides sorted
# lists of documents in the collection.
# '''
# def __repr__(self):
# return '<%s:(%s docs)>' % (self.__class__.__name__, len(self))
#
# def __delitem__(self, key):
# del self.__dict__[key]
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __iter__(self):
# return iter(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def iterkeys(self):
# return iter(self.keys)
#
# def itervalues(self):
# for key in sorted(self, key=lambda x: x.lower()):
# yield self[key]
#
# def iteritems(self):
# for key in self.keys:
# yield (key, self[key])
#
# def keys(self):
# return sorted(self, key=lambda x: x.lower())
#
# def items(self):
# return [(key, self[key]) for key in self.keys()]
#
# def values(self):
# return [self[key] for key in self.keys()]
#
# Path: tldp/utils.py
# def md5files(name, relative=None):
# '''get all of the MD5s for files from here downtree'''
# return fileinfo(name, relative=relative, func=md5file)
#
# def stem_and_ext(name):
# '''return (stem, ext) for any relative or absolute filename'''
# return os.path.splitext(os.path.basename(os.path.normpath(name)))
#
# Path: tldp/typeguesser.py
# def getDoctypeMembers(membertype):
# def getDoctypeClasses():
# def guess(fname):
. Output only the next line. | self.doctype = guess(self.filename) |
Given snippet: <|code_start|> else:
logger.warning("Skipping non-document %s", fname)
continue
for candy in candidates:
if candy.stem in found:
dup = found[candy.stem].filename
logger.warning("Ignoring duplicate is %s", candy.filename)
logger.warning("Existing dup-entry is %s", dup)
else:
found[candy.stem] = candy
logger.debug("Discovered %s source documents", len(found))
return found
def arg_issourcedoc(filename):
filename = os.path.abspath(filename)
if os.path.isfile(filename):
if os.path.basename(filename) in IGNORABLE_SOURCE:
return None
return filename
elif os.path.isdir(filename):
return sourcedoc_fromdir(filename)
return None
def sourcedoc_fromdir(name):
candidates = list()
if not os.path.isdir(name):
return None
stem = os.path.basename(name)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import sys
import errno
import logging
from tldp.ldpcollection import LDPDocumentCollection
from tldp.utils import md5files, stem_and_ext
from tldp.typeguesser import guess, knownextensions
and context:
# Path: tldp/ldpcollection.py
# class LDPDocumentCollection(collections.MutableMapping):
# '''a dict-like container for DocumentCollection objects
#
# Intended to be subclassed.
#
# Implements all the usual dictionary stuff, but also provides sorted
# lists of documents in the collection.
# '''
# def __repr__(self):
# return '<%s:(%s docs)>' % (self.__class__.__name__, len(self))
#
# def __delitem__(self, key):
# del self.__dict__[key]
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __iter__(self):
# return iter(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def iterkeys(self):
# return iter(self.keys)
#
# def itervalues(self):
# for key in sorted(self, key=lambda x: x.lower()):
# yield self[key]
#
# def iteritems(self):
# for key in self.keys:
# yield (key, self[key])
#
# def keys(self):
# return sorted(self, key=lambda x: x.lower())
#
# def items(self):
# return [(key, self[key]) for key in self.keys()]
#
# def values(self):
# return [self[key] for key in self.keys()]
#
# Path: tldp/utils.py
# def md5files(name, relative=None):
# '''get all of the MD5s for files from here downtree'''
# return fileinfo(name, relative=relative, func=md5file)
#
# def stem_and_ext(name):
# '''return (stem, ext) for any relative or absolute filename'''
# return os.path.splitext(os.path.basename(os.path.normpath(name)))
#
# Path: tldp/typeguesser.py
# def getDoctypeMembers(membertype):
# def getDoctypeClasses():
# def guess(fname):
which might include code, classes, or functions. Output only the next line. | for ext in knownextensions: |
Given snippet: <|code_start|> shutil.copy(filename, newname)
else:
with open(newname, 'w'):
pass
relname = os.path.relpath(newname, self.tempdir)
return relname, newname
class CCTestTools(unittest.TestCase):
def setUp(self):
self.makeTempdir()
def tearDown(self):
self.removeTempdir()
def makeTempdir(self):
self.tempdir = mkdtemp(prefix='tldp-test-')
def removeTempdir(self):
shutil.rmtree(self.tempdir)
def writeconfig(self, case):
tf = ntf(prefix=case.tag, suffix='.cfg', dir=self.tempdir, delete=False)
tf.close()
with codecs.open(tf.name, 'w', encoding='utf-8') as f:
f.write(case.cfg)
case.configfile = tf.name
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import codecs
import random
import shutil
import unittest
import tldp.config
from tempfile import mkdtemp
from tempfile import NamedTemporaryFile as ntf
from tldp.outputs import OutputNamingConvention
from tldp.utils import writemd5sums, md5file
and context:
# Path: tldp/outputs.py
# class OutputNamingConvention(object):
# '''A base class inherited by OutputDirectory to ensure consistent
# naming of files across the output collection of documents,
# regardless of the source document type and processing toolchain
# choice.
#
# Sets a list of names for documents that are expected to be present
# in order to report that the directory iscomplete.
# '''
# expected = ['name_txt', 'name_pdf', 'name_htmls', 'name_html',
# 'name_indexhtml']
#
# def __init__(self, dirname, stem):
# self.dirname = dirname
# self.stem = stem
#
# @property
# def MD5SUMS(self):
# return os.path.join(self.dirname, '.LDP-source-MD5SUMS')
#
# @property
# def name_txt(self):
# return os.path.join(self.dirname, self.stem + '.txt')
#
# @property
# def name_fo(self):
# return os.path.join(self.dirname, self.stem + '.fo')
#
# @property
# def name_pdf(self):
# return os.path.join(self.dirname, self.stem + '.pdf')
#
# @property
# def name_html(self):
# return os.path.join(self.dirname, self.stem + '.html')
#
# @property
# def name_htmls(self):
# return os.path.join(self.dirname, self.stem + '-single.html')
#
# @property
# def name_epub(self):
# return os.path.join(self.dirname, self.stem + '.epub')
#
# @property
# def name_indexhtml(self):
# return os.path.join(self.dirname, 'index.html')
#
# @property
# def validsource(self):
# return os.path.join(self.dirname, self.stem + '.xml') # -- burp
#
# @property
# def iscomplete(self):
# '''True if the output directory contains all expected documents'''
# present = list()
# for prop in self.expected:
# name = getattr(self, prop, None)
# assert name is not None
# present.append(os.path.exists(name))
# return all(present)
#
# @property
# def missing(self):
# '''returns a set of missing files'''
# missing = set()
# for prop in self.expected:
# name = getattr(self, prop, None)
# assert name is not None
# if not os.path.isfile(name):
# missing.add(name)
# return missing
#
# @property
# def md5sums(self):
# d = dict()
# try:
# with codecs.open(self.MD5SUMS, encoding='utf-8') as f:
# for line in f:
# if line.startswith('#'):
# continue
# hashval, fname = line.strip().split()
# d[fname] = hashval
# except IOError as e:
# if e.errno != errno.ENOENT:
# raise
# return d
#
# Path: tldp/utils.py
# def writemd5sums(fname, md5s, header=None):
# '''write an MD5SUM file from [(filename, MD5), ...]'''
# with codecs.open(fname, 'w', encoding='utf-8') as file:
# if header:
# print(header, file=file)
# for fname, hashval in sorted(md5s.items()):
# print(hashval + ' ' + fname, file=file)
#
# def md5file(name):
# '''return MD5 hash for a single file name'''
# with open(name, 'rb') as f:
# bs = f.read()
# md5 = hashlib.md5(bs).hexdigest()
# try:
# md5 = unicode(md5)
# except NameError:
# pass # -- python3
# return md5
which might include code, classes, or functions. Output only the next line. | class TestOutputDirSkeleton(OutputNamingConvention): |
Given the following code snippet before the placeholder: <|code_start|>
class CCTestTools(unittest.TestCase):
def setUp(self):
self.makeTempdir()
def tearDown(self):
self.removeTempdir()
def makeTempdir(self):
self.tempdir = mkdtemp(prefix='tldp-test-')
def removeTempdir(self):
shutil.rmtree(self.tempdir)
def writeconfig(self, case):
tf = ntf(prefix=case.tag, suffix='.cfg', dir=self.tempdir, delete=False)
tf.close()
with codecs.open(tf.name, 'w', encoding='utf-8') as f:
f.write(case.cfg)
case.configfile = tf.name
class TestOutputDirSkeleton(OutputNamingConvention):
def mkdir(self):
if not os.path.isdir(self.dirname):
os.mkdir(self.dirname)
def create_md5sum_file(self, md5s):
<|code_end|>
, predict the next line using imports from the current file:
import os
import codecs
import random
import shutil
import unittest
import tldp.config
from tempfile import mkdtemp
from tempfile import NamedTemporaryFile as ntf
from tldp.outputs import OutputNamingConvention
from tldp.utils import writemd5sums, md5file
and context including class names, function names, and sometimes code from other files:
# Path: tldp/outputs.py
# class OutputNamingConvention(object):
# '''A base class inherited by OutputDirectory to ensure consistent
# naming of files across the output collection of documents,
# regardless of the source document type and processing toolchain
# choice.
#
# Sets a list of names for documents that are expected to be present
# in order to report that the directory iscomplete.
# '''
# expected = ['name_txt', 'name_pdf', 'name_htmls', 'name_html',
# 'name_indexhtml']
#
# def __init__(self, dirname, stem):
# self.dirname = dirname
# self.stem = stem
#
# @property
# def MD5SUMS(self):
# return os.path.join(self.dirname, '.LDP-source-MD5SUMS')
#
# @property
# def name_txt(self):
# return os.path.join(self.dirname, self.stem + '.txt')
#
# @property
# def name_fo(self):
# return os.path.join(self.dirname, self.stem + '.fo')
#
# @property
# def name_pdf(self):
# return os.path.join(self.dirname, self.stem + '.pdf')
#
# @property
# def name_html(self):
# return os.path.join(self.dirname, self.stem + '.html')
#
# @property
# def name_htmls(self):
# return os.path.join(self.dirname, self.stem + '-single.html')
#
# @property
# def name_epub(self):
# return os.path.join(self.dirname, self.stem + '.epub')
#
# @property
# def name_indexhtml(self):
# return os.path.join(self.dirname, 'index.html')
#
# @property
# def validsource(self):
# return os.path.join(self.dirname, self.stem + '.xml') # -- burp
#
# @property
# def iscomplete(self):
# '''True if the output directory contains all expected documents'''
# present = list()
# for prop in self.expected:
# name = getattr(self, prop, None)
# assert name is not None
# present.append(os.path.exists(name))
# return all(present)
#
# @property
# def missing(self):
# '''returns a set of missing files'''
# missing = set()
# for prop in self.expected:
# name = getattr(self, prop, None)
# assert name is not None
# if not os.path.isfile(name):
# missing.add(name)
# return missing
#
# @property
# def md5sums(self):
# d = dict()
# try:
# with codecs.open(self.MD5SUMS, encoding='utf-8') as f:
# for line in f:
# if line.startswith('#'):
# continue
# hashval, fname = line.strip().split()
# d[fname] = hashval
# except IOError as e:
# if e.errno != errno.ENOENT:
# raise
# return d
#
# Path: tldp/utils.py
# def writemd5sums(fname, md5s, header=None):
# '''write an MD5SUM file from [(filename, MD5), ...]'''
# with codecs.open(fname, 'w', encoding='utf-8') as file:
# if header:
# print(header, file=file)
# for fname, hashval in sorted(md5s.items()):
# print(hashval + ' ' + fname, file=file)
#
# def md5file(name):
# '''return MD5 hash for a single file name'''
# with open(name, 'rb') as f:
# bs = f.read()
# md5 = hashlib.md5(bs).hexdigest()
# try:
# md5 = unicode(md5)
# except NameError:
# pass # -- python3
# return md5
. Output only the next line. | writemd5sums(self.MD5SUMS, md5s) |
Given the code snippet: <|code_start|> if not os.path.abspath(dirname):
raise Exception("Please use absolute path in unit tests....")
self.dirname = dirname
if not os.path.isdir(self.dirname):
os.mkdir(self.dirname)
self.md5s = dict()
def copytree(self, source):
dst = opj(self.dirname, opb(source))
shutil.copytree(source, dst)
def create_stale(self, fname):
l = list(self.md5s[fname])
random.shuffle(l)
if l == self.md5s[fname]:
self.invalidate_checksum(fname)
self.md5s[fname] = ''.join(l)
@property
def md5sums(self):
return self.md5s
def addsourcefile(self, filename, content):
fname = os.path.join(self.dirname, filename)
if os.path.isfile(content):
shutil.copy(content, fname)
else:
with codecs.open(fname, 'w', encoding='utf-8') as f:
f.write(content)
relpath = os.path.relpath(fname, start=self.dirname)
<|code_end|>
, generate the next line using the imports in this file:
import os
import codecs
import random
import shutil
import unittest
import tldp.config
from tempfile import mkdtemp
from tempfile import NamedTemporaryFile as ntf
from tldp.outputs import OutputNamingConvention
from tldp.utils import writemd5sums, md5file
and context (functions, classes, or occasionally code) from other files:
# Path: tldp/outputs.py
# class OutputNamingConvention(object):
# '''A base class inherited by OutputDirectory to ensure consistent
# naming of files across the output collection of documents,
# regardless of the source document type and processing toolchain
# choice.
#
# Sets a list of names for documents that are expected to be present
# in order to report that the directory iscomplete.
# '''
# expected = ['name_txt', 'name_pdf', 'name_htmls', 'name_html',
# 'name_indexhtml']
#
# def __init__(self, dirname, stem):
# self.dirname = dirname
# self.stem = stem
#
# @property
# def MD5SUMS(self):
# return os.path.join(self.dirname, '.LDP-source-MD5SUMS')
#
# @property
# def name_txt(self):
# return os.path.join(self.dirname, self.stem + '.txt')
#
# @property
# def name_fo(self):
# return os.path.join(self.dirname, self.stem + '.fo')
#
# @property
# def name_pdf(self):
# return os.path.join(self.dirname, self.stem + '.pdf')
#
# @property
# def name_html(self):
# return os.path.join(self.dirname, self.stem + '.html')
#
# @property
# def name_htmls(self):
# return os.path.join(self.dirname, self.stem + '-single.html')
#
# @property
# def name_epub(self):
# return os.path.join(self.dirname, self.stem + '.epub')
#
# @property
# def name_indexhtml(self):
# return os.path.join(self.dirname, 'index.html')
#
# @property
# def validsource(self):
# return os.path.join(self.dirname, self.stem + '.xml') # -- burp
#
# @property
# def iscomplete(self):
# '''True if the output directory contains all expected documents'''
# present = list()
# for prop in self.expected:
# name = getattr(self, prop, None)
# assert name is not None
# present.append(os.path.exists(name))
# return all(present)
#
# @property
# def missing(self):
# '''returns a set of missing files'''
# missing = set()
# for prop in self.expected:
# name = getattr(self, prop, None)
# assert name is not None
# if not os.path.isfile(name):
# missing.add(name)
# return missing
#
# @property
# def md5sums(self):
# d = dict()
# try:
# with codecs.open(self.MD5SUMS, encoding='utf-8') as f:
# for line in f:
# if line.startswith('#'):
# continue
# hashval, fname = line.strip().split()
# d[fname] = hashval
# except IOError as e:
# if e.errno != errno.ENOENT:
# raise
# return d
#
# Path: tldp/utils.py
# def writemd5sums(fname, md5s, header=None):
# '''write an MD5SUM file from [(filename, MD5), ...]'''
# with codecs.open(fname, 'w', encoding='utf-8') as file:
# if header:
# print(header, file=file)
# for fname, hashval in sorted(md5s.items()):
# print(hashval + ' ' + fname, file=file)
#
# def md5file(name):
# '''return MD5 hash for a single file name'''
# with open(name, 'rb') as f:
# bs = f.read()
# md5 = hashlib.md5(bs).hexdigest()
# try:
# md5 = unicode(md5)
# except NameError:
# pass # -- python3
# return md5
. Output only the next line. | self.md5s[relpath] = md5file(fname) |
Here is a snippet: <|code_start|># -*- coding: utf8 -*-
#
# Copyright (c) 2016 Linux Documentation Project
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
# -- Test Data
# -- SUT
class TestInventoryUsage(TestInventoryBase):
def test_inventory_repr(self):
c = self.config
ex = random.choice(example.sources)
self.add_published('Frobnitz-HOWTO', ex)
<|code_end|>
. Write the next line using the current file imports:
import random
import example
from tldptesttools import TestInventoryBase
from tldp.inventory import Inventory
and context from other files:
# Path: tldp/inventory.py
# class Inventory(object):
# '''a container for classifying documents by their status
#
# Every SourceDocument has no more than one matching OutputDirectory.
#
# The Inventory class encodes the logic for identifying the following
# different status possibilities for an arbitrary set of SourceDocuments and
# OutputDirectorys.
#
# The following are possible values for status:
# - 'source': a source document before any status detection
# - 'output': an output document before any status detection
# - 'new': a source document without any matching output stem
# - 'published': a pair of source/output documents with matching stems
# - 'orphan': an output document without any matching source stem
# - 'broken': a published document with missing output files
# - 'stale': a published document with new(er) source files
#
# The Inventory object is intended to be used to identify work that needs to
# be done on individual source documents to produce up-to-date output
# documents.
# '''
# def __repr__(self):
# return '<%s: %d published, %d orphan, %d new, %d stale, %d broken>' % (
# self.__class__.__name__,
# len(self.published),
# len(self.orphan),
# len(self.new),
# len(self.stale),
# len(self.broken),)
#
# def __init__(self, pubdir, sourcedirs):
# '''construct an Inventory
#
# pubdir: path to the OutputCollection
#
# sourcedirs: a list of directories which could be passed to the
# SourceCollection object; essentially a directory containing
# SourceDocuments; for example LDP/LDP/howto/linuxdoc and
# LDP/LDP/guide/docbook
# '''
# self.output = OutputCollection(pubdir)
# self.source = SourceCollection(sourcedirs)
# s = copy.deepcopy(self.source)
# o = copy.deepcopy(self.output)
# sset = set(s.keys())
# oset = set(o.keys())
#
# # -- orphan identification
# #
# self.orphan = OutputCollection()
# for doc in oset.difference(sset):
# self.orphan[doc] = o[doc]
# del o[doc]
# self.orphan[doc].status = 'orphan'
# logger.debug("Identified %d orphan documents: %r.", len(self.orphan),
# self.orphan.keys())
#
# # -- unpublished ('new') identification
# #
# self.new = SourceCollection()
# for doc in sset.difference(oset):
# self.new[doc] = s[doc]
# del s[doc]
# self.new[doc].status = 'new'
# logger.debug("Identified %d new documents: %r.", len(self.new),
# self.new.keys())
#
# # -- published identification; source and output should be same size
# assert len(s) == len(o)
# for stem, odoc in o.items():
# sdoc = s[stem]
# sdoc.output = odoc
# odoc.source = sdoc
# sdoc.status = sdoc.output.status = 'published'
# self.published = s
# logger.debug("Identified %d published documents.", len(self.published))
#
# # -- broken identification
# #
# self.broken = SourceCollection()
# for stem, sdoc in s.items():
# if not sdoc.output.iscomplete:
# self.broken[stem] = sdoc
# sdoc.status = sdoc.output.status = 'broken'
# logger.debug("Identified %d broken documents: %r.", len(self.broken),
# self.broken.keys())
#
# # -- stale identification
# #
# self.stale = SourceCollection()
# for stem, sdoc in s.items():
# odoc = sdoc.output
# omd5, smd5 = odoc.md5sums, sdoc.md5sums
# if omd5 != smd5:
# logger.debug("%s differing MD5 sets %r %r", stem, smd5, omd5)
# changed = set()
# for gone in set(omd5.keys()).difference(smd5.keys()):
# logger.debug("%s gone %s", stem, gone)
# changed.add(('gone', gone))
# for new in set(smd5.keys()).difference(omd5.keys()):
# changed.add(('new', new))
# for sfn in set(smd5.keys()).intersection(omd5.keys()):
# if smd5[sfn] != omd5[sfn]:
# changed.add(('changed', sfn))
# for why, sfn in changed:
# logger.debug("%s differing source %s (%s)", stem, sfn, why)
# odoc.status = sdoc.status = 'stale'
# sdoc.differing = changed
# self.stale[stem] = sdoc
# logger.debug("Identified %d stale documents: %r.", len(self.stale),
# self.stale.keys())
#
# def getByStatusClass(self, status_class):
# desired = status_classes.get(status_class, None)
# assert isinstance(desired, list)
# collection = SourceCollection()
# for status_type in desired:
# collection.update(getattr(self, status_type))
# return collection
#
# @property
# def outputs(self):
# return self.getByStatusClass('outputs')
#
# @property
# def sources(self):
# return self.getByStatusClass('sources')
#
# @property
# def problems(self):
# return self.getByStatusClass('problems')
#
# @property
# def work(self):
# return self.getByStatusClass('work')
#
# @property
# def orphans(self):
# return self.getByStatusClass('orphans')
#
# @property
# def orphaned(self):
# return self.getByStatusClass('orphaned')
#
# @property
# def all(self):
# return self.getByStatusClass('all')
, which may include functions, classes, or code. Output only the next line. | i = Inventory(c.pubdir, c.sourcedir) |
Given the following code snippet before the placeholder: <|code_start|> '''constructor
:param dirname: directory name for all output documents
This directory name is expected to end with the document stem name,
for example '/path/to/the/collection/Unicode-HOWTO'. The parent
directory (e.g. '/path/to/the/collection' must exist already. The
output directory itself will be created, or emptied and cleared if
the document needs to be rebuilt.
'''
self.dirname = os.path.abspath(dirname)
self.stem = os.path.basename(self.dirname)
super(OutputDirectory, self).__init__(self.dirname, self.stem)
parent = os.path.dirname(self.dirname)
if not os.path.isdir(parent):
logger.critical("Missing output collection directory %s.", parent)
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), parent)
self.status = 'output'
self.source = source
self.logdir = os.path.join(self.dirname, logdir)
def detail(self, widths, verbose, file=sys.stdout):
template = ' '.join(('{s.status:{w.status}}',
'{u:{w.doctype}}',
'{s.stem:{w.stem}}'))
outstr = template.format(s=self, w=widths, u="<unknown>")
print(outstr, file=file)
if verbose:
print(' missing source', file=file)
<|code_end|>
, predict the next line using imports from the current file:
import os
import sys
import errno
import codecs
import logging
from tldp.ldpcollection import LDPDocumentCollection
from tldp.utils import logdir
and context including class names, function names, and sometimes code from other files:
# Path: tldp/ldpcollection.py
# class LDPDocumentCollection(collections.MutableMapping):
# '''a dict-like container for DocumentCollection objects
#
# Intended to be subclassed.
#
# Implements all the usual dictionary stuff, but also provides sorted
# lists of documents in the collection.
# '''
# def __repr__(self):
# return '<%s:(%s docs)>' % (self.__class__.__name__, len(self))
#
# def __delitem__(self, key):
# del self.__dict__[key]
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __iter__(self):
# return iter(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def iterkeys(self):
# return iter(self.keys)
#
# def itervalues(self):
# for key in sorted(self, key=lambda x: x.lower()):
# yield self[key]
#
# def iteritems(self):
# for key in self.keys:
# yield (key, self[key])
#
# def keys(self):
# return sorted(self, key=lambda x: x.lower())
#
# def items(self):
# return [(key, self[key]) for key in self.keys()]
#
# def values(self):
# return [self[key] for key in self.keys()]
#
# Path: tldp/utils.py
# def logtimings(logmethod):
# def anon(f):
# def timing(*args, **kwargs):
# def firstfoundfile(locations):
# def arg_isloglevel(l, defaultlevel=logging.ERROR):
# def arg_isstr(s):
# def arg_isreadablefile(f):
# def arg_isdirectory(d):
# def arg_isexecutable(f):
# def sameFilesystem(d0, d1):
# def stem_and_ext(name):
# def swapdirs(a, b):
# def logfilecontents(logmethod, prefix, fname):
# def conditionallogging(result, prefix, fname):
# def execute(cmd, stdin=None, stdout=None, stderr=None,
# logdir=None, env=os.environ):
# def isexecutable(f):
# def isreadablefile(f):
# def isstr(s):
# def which(program):
# def writemd5sums(fname, md5s, header=None):
# def md5file(name):
# def statfile(name):
# def md5files(name, relative=None):
# def statfiles(name, relative=None):
# def fileinfo(name, relative=None, func=statfile):
. Output only the next line. | class OutputCollection(LDPDocumentCollection): |
Given snippet: <|code_start|> An important element of the OutputDirectory is the stem, determined
from the directory name when __init__() is called.
'''
def __repr__(self):
return '<%s:%s>' % (self.__class__.__name__, self.dirname)
@classmethod
def fromsource(cls, dirname, source):
newname = os.path.join(dirname, source.stem)
return cls(newname, source=source)
def __init__(self, dirname, source=None):
'''constructor
:param dirname: directory name for all output documents
This directory name is expected to end with the document stem name,
for example '/path/to/the/collection/Unicode-HOWTO'. The parent
directory (e.g. '/path/to/the/collection' must exist already. The
output directory itself will be created, or emptied and cleared if
the document needs to be rebuilt.
'''
self.dirname = os.path.abspath(dirname)
self.stem = os.path.basename(self.dirname)
super(OutputDirectory, self).__init__(self.dirname, self.stem)
parent = os.path.dirname(self.dirname)
if not os.path.isdir(parent):
logger.critical("Missing output collection directory %s.", parent)
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), parent)
self.status = 'output'
self.source = source
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import sys
import errno
import codecs
import logging
from tldp.ldpcollection import LDPDocumentCollection
from tldp.utils import logdir
and context:
# Path: tldp/ldpcollection.py
# class LDPDocumentCollection(collections.MutableMapping):
# '''a dict-like container for DocumentCollection objects
#
# Intended to be subclassed.
#
# Implements all the usual dictionary stuff, but also provides sorted
# lists of documents in the collection.
# '''
# def __repr__(self):
# return '<%s:(%s docs)>' % (self.__class__.__name__, len(self))
#
# def __delitem__(self, key):
# del self.__dict__[key]
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __iter__(self):
# return iter(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def iterkeys(self):
# return iter(self.keys)
#
# def itervalues(self):
# for key in sorted(self, key=lambda x: x.lower()):
# yield self[key]
#
# def iteritems(self):
# for key in self.keys:
# yield (key, self[key])
#
# def keys(self):
# return sorted(self, key=lambda x: x.lower())
#
# def items(self):
# return [(key, self[key]) for key in self.keys()]
#
# def values(self):
# return [self[key] for key in self.keys()]
#
# Path: tldp/utils.py
# def logtimings(logmethod):
# def anon(f):
# def timing(*args, **kwargs):
# def firstfoundfile(locations):
# def arg_isloglevel(l, defaultlevel=logging.ERROR):
# def arg_isstr(s):
# def arg_isreadablefile(f):
# def arg_isdirectory(d):
# def arg_isexecutable(f):
# def sameFilesystem(d0, d1):
# def stem_and_ext(name):
# def swapdirs(a, b):
# def logfilecontents(logmethod, prefix, fname):
# def conditionallogging(result, prefix, fname):
# def execute(cmd, stdin=None, stdout=None, stderr=None,
# logdir=None, env=os.environ):
# def isexecutable(f):
# def isreadablefile(f):
# def isstr(s):
# def which(program):
# def writemd5sums(fname, md5s, header=None):
# def md5file(name):
# def statfile(name):
# def md5files(name, relative=None):
# def statfiles(name, relative=None):
# def fileinfo(name, relative=None, func=statfile):
which might include code, classes, or functions. Output only the next line. | self.logdir = os.path.join(self.dirname, logdir) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf8 -*-
#
# Copyright (c) 2016 Linux Documentation Project
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
# -- Test Data
# -- SUT
# -- shorthand
opj = os.path.join
opd = os.path.dirname
opa = os.path.abspath
sampledocs = opj(opd(__file__), 'sample-documents')
def genericGuessTest(content, ext):
tf = ntf(prefix='tldp-guesser-test-', suffix=ext, delete=False)
tf.close()
with codecs.open(tf.name, 'w', encoding='utf-8') as f:
f.write(content)
<|code_end|>
, predict the next line using imports from the current file:
import os
import codecs
import unittest
import example
from tempfile import NamedTemporaryFile as ntf
from tldp.typeguesser import guess
from tldp.doctypes.common import SignatureChecker
and context including class names, function names, and sometimes code from other files:
# Path: tldp/typeguesser.py
# def guess(fname):
# '''return a tldp.doctype class which is a best guess for document type
#
# :parama fname: A filename.
#
# The guess function will try to guess the document type (doctype) from the
# file extension. If extension matching produces multiple possible doctype
# matches (e.g. .xml or .sgml), the guess function will then use signature
# matching to find the earliest match in the file for a signature.
#
# If there are multiple signature matches, it will choose the signature
# matching at the earliest position in the file.
#
# Bugs/shortcomings:
#
# * This is only a guesser.
# * When signature matching, it reports first signature it discovers in
# any input file.
# * It could/should read more than 1024 bytes (cf. SignatureChecker)
# especially if it cannot return any result.
# * It could/should use heuristics or something richer than signatures.
# '''
# try:
# stem, ext = os.path.splitext(fname)
# except (AttributeError, TypeError):
# return None
#
# if not ext:
# logger.debug("%s no file extension, skipping %s.", stem, ext)
# return None
#
# possible = [t for t in knowndoctypes if ext in t.extensions]
# logger.debug("Possible: %r", possible)
# if not possible:
# logger.debug("%s unknown extension %s.", stem, ext)
# return None
#
# if len(possible) == 1:
# doctype = possible.pop()
# return doctype
#
# # -- for this extension, multiple document types, probably SGML, XML
# #
# logger.debug("%s multiple possible doctypes for extension %s on file %s.",
# stem, ext, fname)
# for doctype in possible:
# logger.debug("%s extension %s could be %s.", stem, ext, doctype)
#
# try:
# with codecs.open(fname, encoding='utf-8') as f:
# buf = f.read(1024)
# except UnicodeDecodeError:
# # -- a wee bit ugly, but many SGML docs used iso-8859-1, so fall back
# with codecs.open(fname, encoding='iso-8859-1') as f:
# buf = f.read(1024)
#
# guesses = list()
# for doctype in possible:
# sindex = doctype.signatureLocation(buf, fname)
# if sindex is not None:
# guesses.append((sindex, doctype))
#
# if not guesses:
# logger.warning("%s no matching signature found for %s.",
# stem, fname)
# return None
# if len(guesses) == 1:
# _, doctype = guesses.pop()
# return doctype
#
# # -- OK, this is unusual; we still found multiple document type
# # signatures. Seems rare but unlikely, so we should choose the
# # first signature in the file as the more likely document type.
# #
# guesses.sort()
# logger.info("%s multiple doctype guesses for file %s", stem, fname)
# for sindex, doctype in guesses:
# logger.info("%s could be %s (sig at pos %s)", stem, doctype, sindex)
# logger.info("%s going to guess %s for %s", stem, doctype, fname)
# _, doctype = guesses.pop(0)
# return doctype
#
# Path: tldp/doctypes/common.py
# class SignatureChecker(object):
#
# @classmethod
# def signatureLocation(cls, buf, fname):
# for sig in cls.signatures:
# try:
# sindex = buf.index(sig)
# logger.debug("YES FOUND signature %r in %s at %s; doctype %s.",
# sig, fname, sindex, cls)
# return sindex
# except ValueError:
# logger.debug("not found signature %r in %s for type %s",
# sig, fname, cls.__name__)
# return None
. Output only the next line. | dt = guess(tf.name) |
Continue the code snippet: <|code_start|># -- Test Data
# -- SUT
# -- shorthand
opj = os.path.join
opd = os.path.dirname
opa = os.path.abspath
sampledocs = opj(opd(__file__), 'sample-documents')
def genericGuessTest(content, ext):
tf = ntf(prefix='tldp-guesser-test-', suffix=ext, delete=False)
tf.close()
with codecs.open(tf.name, 'w', encoding='utf-8') as f:
f.write(content)
dt = guess(tf.name)
os.unlink(tf.name)
return dt
class TestDoctypes(unittest.TestCase):
def testISO_8859_1(self):
dt = guess(opj(sampledocs, 'ISO-8859-1.sgml'))
self.assertIsNotNone(dt)
def testDetectionBySignature(self):
for ex in example.sources:
<|code_end|>
. Use current file imports:
import os
import codecs
import unittest
import example
from tempfile import NamedTemporaryFile as ntf
from tldp.typeguesser import guess
from tldp.doctypes.common import SignatureChecker
and context (classes, functions, or code) from other files:
# Path: tldp/typeguesser.py
# def guess(fname):
# '''return a tldp.doctype class which is a best guess for document type
#
# :parama fname: A filename.
#
# The guess function will try to guess the document type (doctype) from the
# file extension. If extension matching produces multiple possible doctype
# matches (e.g. .xml or .sgml), the guess function will then use signature
# matching to find the earliest match in the file for a signature.
#
# If there are multiple signature matches, it will choose the signature
# matching at the earliest position in the file.
#
# Bugs/shortcomings:
#
# * This is only a guesser.
# * When signature matching, it reports first signature it discovers in
# any input file.
# * It could/should read more than 1024 bytes (cf. SignatureChecker)
# especially if it cannot return any result.
# * It could/should use heuristics or something richer than signatures.
# '''
# try:
# stem, ext = os.path.splitext(fname)
# except (AttributeError, TypeError):
# return None
#
# if not ext:
# logger.debug("%s no file extension, skipping %s.", stem, ext)
# return None
#
# possible = [t for t in knowndoctypes if ext in t.extensions]
# logger.debug("Possible: %r", possible)
# if not possible:
# logger.debug("%s unknown extension %s.", stem, ext)
# return None
#
# if len(possible) == 1:
# doctype = possible.pop()
# return doctype
#
# # -- for this extension, multiple document types, probably SGML, XML
# #
# logger.debug("%s multiple possible doctypes for extension %s on file %s.",
# stem, ext, fname)
# for doctype in possible:
# logger.debug("%s extension %s could be %s.", stem, ext, doctype)
#
# try:
# with codecs.open(fname, encoding='utf-8') as f:
# buf = f.read(1024)
# except UnicodeDecodeError:
# # -- a wee bit ugly, but many SGML docs used iso-8859-1, so fall back
# with codecs.open(fname, encoding='iso-8859-1') as f:
# buf = f.read(1024)
#
# guesses = list()
# for doctype in possible:
# sindex = doctype.signatureLocation(buf, fname)
# if sindex is not None:
# guesses.append((sindex, doctype))
#
# if not guesses:
# logger.warning("%s no matching signature found for %s.",
# stem, fname)
# return None
# if len(guesses) == 1:
# _, doctype = guesses.pop()
# return doctype
#
# # -- OK, this is unusual; we still found multiple document type
# # signatures. Seems rare but unlikely, so we should choose the
# # first signature in the file as the more likely document type.
# #
# guesses.sort()
# logger.info("%s multiple doctype guesses for file %s", stem, fname)
# for sindex, doctype in guesses:
# logger.info("%s could be %s (sig at pos %s)", stem, doctype, sindex)
# logger.info("%s going to guess %s for %s", stem, doctype, fname)
# _, doctype = guesses.pop(0)
# return doctype
#
# Path: tldp/doctypes/common.py
# class SignatureChecker(object):
#
# @classmethod
# def signatureLocation(cls, buf, fname):
# for sig in cls.signatures:
# try:
# sindex = buf.index(sig)
# logger.debug("YES FOUND signature %r in %s at %s; doctype %s.",
# sig, fname, sindex, cls)
# return sindex
# except ValueError:
# logger.debug("not found signature %r in %s for type %s",
# sig, fname, cls.__name__)
# return None
. Output only the next line. | if isinstance(ex.doctype, SignatureChecker): |
Continue the code snippet: <|code_start|> else:
err = get_response_error(response)
raise err
def delete_cors(self):
""" Delete the bucket CORS settings.
"""
response = self.connection.make_request("DELETE", self.name)
if response.status == 204:
return True
else:
err = get_response_error(response)
raise err
def initiate_multipart_upload(self, key_name, content_type=None):
"""Initiate a multipart upload.
Returns: An instance of MultiPartUpload
Keyword arguments:
key_name - The object name
content_type - The content type of the object
"""
params = {"uploads": None}
headers = {
"Content-Type": content_type or self.DefaultContentType
}
response = self.connection.make_request(
"POST", self.name, key_name, headers=headers, params=params)
if response.status == 200:
resp = load_data(response.read())
<|code_end|>
. Use current file imports:
import hashlib
import json
from base64 import b64encode
from .key import Key
from .acl import ACL
from .multipart import MultiPartUpload
from .exception import get_response_error
from .util import load_data
and context (classes, functions, or code) from other files:
# Path: qingcloud/qingstor/multipart.py
# class MultiPartUpload(object):
#
# def __init__(self, bucket, key_name, upload_id):
# """
# @param bucket - The name of the bucket
# @param key_name - The name of the object
# @param upload_id - ID for the initiated multipart upload
# """
# self.bucket = bucket
# self.key_name = key_name
# self.upload_id = upload_id
#
# def upload_part_from_file(self, fp, part_number):
# """ Upload multipart from a file
#
# Keyword arguments:
# fp - a file-like object
# part_number - The number of the multipart
# """
# params = {
# "upload_id": self.upload_id,
# "part_number": str(part_number),
# }
# response = self.bucket.connection.make_request(
# "PUT", self.bucket.name, self.key_name, data=fp, params=params)
# if response.status == 201:
# part = Part(self.bucket.name, self.key_name, part_number)
# return part
# else:
# err = get_response_error(response)
# raise err
#
# def get_all_parts(self):
# """ Retrieve all multiparts of an object that uploaded.
# """
# params = {
# "upload_id": self.upload_id,
# }
# response = self.bucket.connection.make_request(
# "GET", self.bucket.name, self.key_name, params=params)
# if response.status == 200:
# parts = []
# resp = load_data(response.read())
# for item in resp["object_parts"]:
# part = Part(self.bucket.name, self.key_name,
# item["part_number"])
# part.size = item["size"]
# part.created = item["created"]
# parts.append(part)
# return parts
# else:
# err = get_response_error(response)
# raise err
#
# def cancel_upload(self):
# """Abort the multipart upload.
# """
# return self.bucket.cancel_multipart_upload(
# self.key_name, self.upload_id
# )
#
# def complete_upload(self, parts):
# """Complete the multipart upload.
# """
# return self.bucket.complete_multipart_upload(
# self.key_name, self.upload_id, parts
# )
#
# Path: qingcloud/qingstor/exception.py
# def get_response_error(response, body=None):
# if not body:
# body = response.read()
# args = {
# "status": response.status,
# "body": body,
# "request_id": response.getheader("request_id")
# }
# if body:
# try:
# resp = load_data(body)
# args["code"] = resp["code"]
# args["message"] = resp["message"]
# args["url"] = resp["url"]
# except ValueError:
# pass
# return QSResponseError(**args)
#
# Path: qingcloud/qingstor/util.py
# def load_data(data):
# """ Wrapper to load json data, to be compatible with Python3.
# Returns: JSON data
#
# Keyword arguments:
# data: might be bytes or str
# """
# if type(data) == bytes:
# return json.loads(data.decode("utf-8"))
# else:
# return json.loads(data)
. Output only the next line. | handler = MultiPartUpload(self, key_name, resp["upload_id"]) |
Using the snippet: <|code_start|> return not (self.get_key(key_name) is None)
def __getitem__(self, key_name):
key = self.get_key(key_name)
if key:
return key
else:
raise KeyError
def __len__(self):
pass
def get_key(self, key_name, validate=True):
""" Retrieves an object by name.
Returns: An instance of a Key object or None
Keyword arguments:
key_name - The name of the bucket
validate - If True, the function will try to verify the object exists
on the service-side (Default: True)
"""
if not validate:
return Key(self, key_name)
response = self.connection.make_request("HEAD", self.name, key_name)
if response.status == 200:
return Key(self, key_name)
elif response.status == 401:
<|code_end|>
, determine the next line of code. You have imports:
import hashlib
import json
from base64 import b64encode
from .key import Key
from .acl import ACL
from .multipart import MultiPartUpload
from .exception import get_response_error
from .util import load_data
and context (class names, function names, or code) available:
# Path: qingcloud/qingstor/multipart.py
# class MultiPartUpload(object):
#
# def __init__(self, bucket, key_name, upload_id):
# """
# @param bucket - The name of the bucket
# @param key_name - The name of the object
# @param upload_id - ID for the initiated multipart upload
# """
# self.bucket = bucket
# self.key_name = key_name
# self.upload_id = upload_id
#
# def upload_part_from_file(self, fp, part_number):
# """ Upload multipart from a file
#
# Keyword arguments:
# fp - a file-like object
# part_number - The number of the multipart
# """
# params = {
# "upload_id": self.upload_id,
# "part_number": str(part_number),
# }
# response = self.bucket.connection.make_request(
# "PUT", self.bucket.name, self.key_name, data=fp, params=params)
# if response.status == 201:
# part = Part(self.bucket.name, self.key_name, part_number)
# return part
# else:
# err = get_response_error(response)
# raise err
#
# def get_all_parts(self):
# """ Retrieve all multiparts of an object that uploaded.
# """
# params = {
# "upload_id": self.upload_id,
# }
# response = self.bucket.connection.make_request(
# "GET", self.bucket.name, self.key_name, params=params)
# if response.status == 200:
# parts = []
# resp = load_data(response.read())
# for item in resp["object_parts"]:
# part = Part(self.bucket.name, self.key_name,
# item["part_number"])
# part.size = item["size"]
# part.created = item["created"]
# parts.append(part)
# return parts
# else:
# err = get_response_error(response)
# raise err
#
# def cancel_upload(self):
# """Abort the multipart upload.
# """
# return self.bucket.cancel_multipart_upload(
# self.key_name, self.upload_id
# )
#
# def complete_upload(self, parts):
# """Complete the multipart upload.
# """
# return self.bucket.complete_multipart_upload(
# self.key_name, self.upload_id, parts
# )
#
# Path: qingcloud/qingstor/exception.py
# def get_response_error(response, body=None):
# if not body:
# body = response.read()
# args = {
# "status": response.status,
# "body": body,
# "request_id": response.getheader("request_id")
# }
# if body:
# try:
# resp = load_data(body)
# args["code"] = resp["code"]
# args["message"] = resp["message"]
# args["url"] = resp["url"]
# except ValueError:
# pass
# return QSResponseError(**args)
#
# Path: qingcloud/qingstor/util.py
# def load_data(data):
# """ Wrapper to load json data, to be compatible with Python3.
# Returns: JSON data
#
# Keyword arguments:
# data: might be bytes or str
# """
# if type(data) == bytes:
# return json.loads(data.decode("utf-8"))
# else:
# return json.loads(data)
. Output only the next line. | err = get_response_error(response) |
Given the following code snippet before the placeholder: <|code_start|> key_name - The name of the object
"""
response = self.connection.make_request("DELETE", self.name, key_name)
if response.status == 204:
return True
else:
err = get_response_error(response)
raise err
def delete_keys(self, keys, quiet=False):
""" Delete a list of object by keys
Keyword arguments:
keys - A list of keys to delete
quiet - Whether or not to return the list of objects successfully
deleted, deleted objects list won't be returned when True.
"""
req_data = json.dumps({
"objects": [{"key": k} for k in keys],
"quiet": quiet
})
content_md5 = b64encode(hashlib.md5(req_data).digest())
resp = self.connection.make_request(
"POST",
self.name,
params="delete",
headers={"Content-MD5": content_md5},
data=req_data
)
if resp.status == 200:
<|code_end|>
, predict the next line using imports from the current file:
import hashlib
import json
from base64 import b64encode
from .key import Key
from .acl import ACL
from .multipart import MultiPartUpload
from .exception import get_response_error
from .util import load_data
and context including class names, function names, and sometimes code from other files:
# Path: qingcloud/qingstor/multipart.py
# class MultiPartUpload(object):
#
# def __init__(self, bucket, key_name, upload_id):
# """
# @param bucket - The name of the bucket
# @param key_name - The name of the object
# @param upload_id - ID for the initiated multipart upload
# """
# self.bucket = bucket
# self.key_name = key_name
# self.upload_id = upload_id
#
# def upload_part_from_file(self, fp, part_number):
# """ Upload multipart from a file
#
# Keyword arguments:
# fp - a file-like object
# part_number - The number of the multipart
# """
# params = {
# "upload_id": self.upload_id,
# "part_number": str(part_number),
# }
# response = self.bucket.connection.make_request(
# "PUT", self.bucket.name, self.key_name, data=fp, params=params)
# if response.status == 201:
# part = Part(self.bucket.name, self.key_name, part_number)
# return part
# else:
# err = get_response_error(response)
# raise err
#
# def get_all_parts(self):
# """ Retrieve all multiparts of an object that uploaded.
# """
# params = {
# "upload_id": self.upload_id,
# }
# response = self.bucket.connection.make_request(
# "GET", self.bucket.name, self.key_name, params=params)
# if response.status == 200:
# parts = []
# resp = load_data(response.read())
# for item in resp["object_parts"]:
# part = Part(self.bucket.name, self.key_name,
# item["part_number"])
# part.size = item["size"]
# part.created = item["created"]
# parts.append(part)
# return parts
# else:
# err = get_response_error(response)
# raise err
#
# def cancel_upload(self):
# """Abort the multipart upload.
# """
# return self.bucket.cancel_multipart_upload(
# self.key_name, self.upload_id
# )
#
# def complete_upload(self, parts):
# """Complete the multipart upload.
# """
# return self.bucket.complete_multipart_upload(
# self.key_name, self.upload_id, parts
# )
#
# Path: qingcloud/qingstor/exception.py
# def get_response_error(response, body=None):
# if not body:
# body = response.read()
# args = {
# "status": response.status,
# "body": body,
# "request_id": response.getheader("request_id")
# }
# if body:
# try:
# resp = load_data(body)
# args["code"] = resp["code"]
# args["message"] = resp["message"]
# args["url"] = resp["url"]
# except ValueError:
# pass
# return QSResponseError(**args)
#
# Path: qingcloud/qingstor/util.py
# def load_data(data):
# """ Wrapper to load json data, to be compatible with Python3.
# Returns: JSON data
#
# Keyword arguments:
# data: might be bytes or str
# """
# if type(data) == bytes:
# return json.loads(data.decode("utf-8"))
# else:
# return json.loads(data)
. Output only the next line. | return load_data(resp.read()) |
Next line prediction: <|code_start|> def digest(self, string_to_digest):
if self._hmac_256:
_hmac = self._hmac_256.copy()
else:
_hmac = self._hmac.copy()
if is_python3:
string_to_digest = string_to_digest.encode()
_hmac.update(string_to_digest)
return _hmac.digest()
def sign_string(self, string_to_sign):
to_sign = self.digest(string_to_sign)
return base64.b64encode(to_sign).strip()
class QuerySignatureAuthHandler(HmacKeys):
""" Provides Query Signature Authentication.
"""
SignatureVersion = 1
APIVersion = 1
def _calc_signature(self, params, verb, path):
""" calc signature for request
"""
string_to_sign = '%s\n%s\n' % (verb, path)
params['signature_method'] = self.algorithm()
keys = sorted(params.keys())
pairs = []
for key in keys:
<|code_end|>
. Use current file imports:
(import sys
import hmac
import base64
import datetime
import urllib.parse as urllib
import urllib
from hashlib import sha1, sha256
from past.builtins import basestring
from qingcloud.misc.json_tool import json_dump, json_load
from qingcloud.misc.utils import get_utf8_value, get_ts, base64_url_decode,\
base64_url_encode)
and context including class names, function names, or small code snippets from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
. Output only the next line. | val = get_utf8_value(params[key]) |
Next line prediction: <|code_start|> APIVersion = 1
def _calc_signature(self, params, verb, path):
""" calc signature for request
"""
string_to_sign = '%s\n%s\n' % (verb, path)
params['signature_method'] = self.algorithm()
keys = sorted(params.keys())
pairs = []
for key in keys:
val = get_utf8_value(params[key])
if is_python3:
key = key.encode()
pairs.append(urllib.quote(key, safe='') + '=' +
urllib.quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
# print "string to sign:[%s]" % string_to_sign
b64 = self.sign_string(string_to_sign)
return (qs, b64)
def add_auth(self, req, **kwargs):
""" add authorize information for request
"""
req.params['access_key_id'] = self.qy_access_key_id if 'access_key' not in kwargs else kwargs.get('access_key')
if 'token' in kwargs:
req.params['token'] = kwargs.get('token')
req.params['signature_version'] = self.SignatureVersion if 'signature_version' not in kwargs \
else kwargs.get('signature_version')
req.params['version'] = self.APIVersion
<|code_end|>
. Use current file imports:
(import sys
import hmac
import base64
import datetime
import urllib.parse as urllib
import urllib
from hashlib import sha1, sha256
from past.builtins import basestring
from qingcloud.misc.json_tool import json_dump, json_load
from qingcloud.misc.utils import get_utf8_value, get_ts, base64_url_decode,\
base64_url_encode)
and context including class names, function names, or small code snippets from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
. Output only the next line. | time_stamp = get_ts() |
Using the snippet: <|code_start|> req.body = ''
# if this is a retried req, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
req.params["signature"] = signature
req.path = req.path.split('?')[0]
req.path = (req.path + '?' + qs +
'&signature=' + urllib.quote_plus(signature))
class AppSignatureAuthHandler(QuerySignatureAuthHandler):
""" Provides App Signature Authentication.
"""
def __init__(self, app_id, secret_app_key, access_token=None):
HmacKeys.__init__(self, "", app_id, secret_app_key)
self.app_id = app_id
self.access_token = access_token
def sign_string(self, string_to_sign):
to_sign = self.digest(string_to_sign)
return base64_url_encode(to_sign)
def extract_payload(self, payload, signature):
expected_sig = self.sign_string(payload)
if signature != expected_sig:
return None
<|code_end|>
, determine the next line of code. You have imports:
import sys
import hmac
import base64
import datetime
import urllib.parse as urllib
import urllib
from hashlib import sha1, sha256
from past.builtins import basestring
from qingcloud.misc.json_tool import json_dump, json_load
from qingcloud.misc.utils import get_utf8_value, get_ts, base64_url_decode,\
base64_url_encode
and context (class names, function names, or code) available:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
. Output only the next line. | return json_load(base64_url_decode(payload)) |
Given the following code snippet before the placeholder: <|code_start|> req.body = urllib.urlencode(params)
req.header = {
'Content-Length': str(len(req.body)),
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'Connection': 'Keep-Alive'
}
else:
req.body = ''
# if this is a retried req, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
req.params["signature"] = signature
req.path = req.path.split('?')[0]
req.path = (req.path + '?' + qs +
'&signature=' + urllib.quote_plus(signature))
class AppSignatureAuthHandler(QuerySignatureAuthHandler):
""" Provides App Signature Authentication.
"""
def __init__(self, app_id, secret_app_key, access_token=None):
HmacKeys.__init__(self, "", app_id, secret_app_key)
self.app_id = app_id
self.access_token = access_token
def sign_string(self, string_to_sign):
to_sign = self.digest(string_to_sign)
<|code_end|>
, predict the next line using imports from the current file:
import sys
import hmac
import base64
import datetime
import urllib.parse as urllib
import urllib
from hashlib import sha1, sha256
from past.builtins import basestring
from qingcloud.misc.json_tool import json_dump, json_load
from qingcloud.misc.utils import get_utf8_value, get_ts, base64_url_decode,\
base64_url_encode
and context including class names, function names, and sometimes code from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
. Output only the next line. | return base64_url_encode(to_sign) |
Here is a snippet: <|code_start|>class QSResponseError(Exception):
def __init__(self, status, body=None, request_id=None,
code=None, message=None, url=None):
self.status = status
self.body = body or ''
self.request_id = request_id
self.code = code
self.message = message
self.url = url
def __repr__(self):
return "%s: %s %s\n%s" % (self.__class__.__name__,
self.status, self.code, self.body)
def __str__(self):
return "%s: %s %s\n%s" % (self.__class__.__name__,
self.status, self.code, self.body)
def get_response_error(response, body=None):
if not body:
body = response.read()
args = {
"status": response.status,
"body": body,
"request_id": response.getheader("request_id")
}
if body:
try:
<|code_end|>
. Write the next line using the current file imports:
from .util import load_data
and context from other files:
# Path: qingcloud/qingstor/util.py
# def load_data(data):
# """ Wrapper to load json data, to be compatible with Python3.
# Returns: JSON data
#
# Keyword arguments:
# data: might be bytes or str
# """
# if type(data) == bytes:
# return json.loads(data.decode("utf-8"))
# else:
# return json.loads(data)
, which may include functions, classes, or code. Output only the next line. | resp = load_data(body) |
Given snippet: <|code_start|># -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
class UtilsTestCase(unittest.TestCase):
def test_get_utf8_value(self):
self.assertEqual(get_utf8_value('utf-8'), 'utf-8')
self.assertEqual(get_utf8_value(u'unicode'), 'unicode')
self.assertEqual(get_utf8_value([1, 2]), '[1, 2]')
if sys.version < "3":
self.assertEqual(get_utf8_value(u'δ½ ε₯½'), '\xe4\xbd\xa0\xe5\xa5\xbd')
else:
self.assertEqual(get_utf8_value(u'δ½ ε₯½'), u'δ½ ε₯½')
def test_filter_out_none(self):
data = {'a': 1, 'b': 2, 'c': None}
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import time
import unittest
import sys
from mock import Mock
from qingcloud.misc.utils import (get_utf8_value, filter_out_none, get_ts,
parse_ts, local_ts, base64_url_encode, base64_url_decode,
wait_job)
and context:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def filter_out_none(dictionary, keys=None):
# """ Filter out items whose value is None.
# If `keys` specified, only return non-None items with matched key.
# """
# ret = {}
# if keys is None:
# keys = []
# for key, value in dictionary.items():
# if value is None or key not in keys:
# continue
# ret[key] = value
# return ret
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def parse_ts(ts):
# """ Return as timestamp
# """
# ts = ts.strip()
# try:
# ts_s = time.strptime(ts, ISO8601)
# return time.mktime(ts_s)
# except ValueError:
# try:
# ts_s = time.strptime(ts, ISO8601_MS)
# return time.mktime(ts_s)
# except ValueError:
# return 0
#
# def local_ts(utc_ts):
# ts = parse_ts(utc_ts)
# if ts:
# return ts - time.timezone
# else:
# return 0
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def wait_job(conn, job_id, timeout=60):
# """ waiting for job complete (success or fail) until timeout
# """
# def describe_job(job_id):
# ret = conn.describe_jobs([job_id])
# if not ret or not ret.get('job_set'):
# return None
# return ret['job_set'][0]
#
# deadline = time.time() + timeout
# while time.time() <= deadline:
# time.sleep(2)
# job = describe_job(job_id)
# if not job:
# continue
# if job['status'] not in ('pending', 'working'):
# if conn.debug:
# print('job is %s: %s' % (job['status'], job_id))
# sys.stdout.flush()
# return True
#
# if conn.debug:
# print('timeout for job: %s' % job_id)
# sys.stdout.flush()
# return False
which might include code, classes, or functions. Output only the next line. | self.assertEqual(filter_out_none(data), {}) |
Here is a snippet: <|code_start|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
class UtilsTestCase(unittest.TestCase):
def test_get_utf8_value(self):
self.assertEqual(get_utf8_value('utf-8'), 'utf-8')
self.assertEqual(get_utf8_value(u'unicode'), 'unicode')
self.assertEqual(get_utf8_value([1, 2]), '[1, 2]')
if sys.version < "3":
self.assertEqual(get_utf8_value(u'δ½ ε₯½'), '\xe4\xbd\xa0\xe5\xa5\xbd')
else:
self.assertEqual(get_utf8_value(u'δ½ ε₯½'), u'δ½ ε₯½')
def test_filter_out_none(self):
data = {'a': 1, 'b': 2, 'c': None}
self.assertEqual(filter_out_none(data), {})
data = {'a': 1, 'b': 2, 'c': None}
self.assertEqual(filter_out_none(data, keys=['a', 'c']), {'a': 1})
def test_get_ts(self):
ts = 1391832000
ts = time.localtime(ts + time.timezone + 60 * 60 * 8)
expected = '2014-02-08T12:00:00Z'
<|code_end|>
. Write the next line using the current file imports:
import time
import unittest
import sys
from mock import Mock
from qingcloud.misc.utils import (get_utf8_value, filter_out_none, get_ts,
parse_ts, local_ts, base64_url_encode, base64_url_decode,
wait_job)
and context from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def filter_out_none(dictionary, keys=None):
# """ Filter out items whose value is None.
# If `keys` specified, only return non-None items with matched key.
# """
# ret = {}
# if keys is None:
# keys = []
# for key, value in dictionary.items():
# if value is None or key not in keys:
# continue
# ret[key] = value
# return ret
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def parse_ts(ts):
# """ Return as timestamp
# """
# ts = ts.strip()
# try:
# ts_s = time.strptime(ts, ISO8601)
# return time.mktime(ts_s)
# except ValueError:
# try:
# ts_s = time.strptime(ts, ISO8601_MS)
# return time.mktime(ts_s)
# except ValueError:
# return 0
#
# def local_ts(utc_ts):
# ts = parse_ts(utc_ts)
# if ts:
# return ts - time.timezone
# else:
# return 0
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def wait_job(conn, job_id, timeout=60):
# """ waiting for job complete (success or fail) until timeout
# """
# def describe_job(job_id):
# ret = conn.describe_jobs([job_id])
# if not ret or not ret.get('job_set'):
# return None
# return ret['job_set'][0]
#
# deadline = time.time() + timeout
# while time.time() <= deadline:
# time.sleep(2)
# job = describe_job(job_id)
# if not job:
# continue
# if job['status'] not in ('pending', 'working'):
# if conn.debug:
# print('job is %s: %s' % (job['status'], job_id))
# sys.stdout.flush()
# return True
#
# if conn.debug:
# print('timeout for job: %s' % job_id)
# sys.stdout.flush()
# return False
, which may include functions, classes, or code. Output only the next line. | self.assertEqual(get_ts(ts), expected) |
Given the following code snippet before the placeholder: <|code_start|>
class UtilsTestCase(unittest.TestCase):
def test_get_utf8_value(self):
self.assertEqual(get_utf8_value('utf-8'), 'utf-8')
self.assertEqual(get_utf8_value(u'unicode'), 'unicode')
self.assertEqual(get_utf8_value([1, 2]), '[1, 2]')
if sys.version < "3":
self.assertEqual(get_utf8_value(u'δ½ ε₯½'), '\xe4\xbd\xa0\xe5\xa5\xbd')
else:
self.assertEqual(get_utf8_value(u'δ½ ε₯½'), u'δ½ ε₯½')
def test_filter_out_none(self):
data = {'a': 1, 'b': 2, 'c': None}
self.assertEqual(filter_out_none(data), {})
data = {'a': 1, 'b': 2, 'c': None}
self.assertEqual(filter_out_none(data, keys=['a', 'c']), {'a': 1})
def test_get_ts(self):
ts = 1391832000
ts = time.localtime(ts + time.timezone + 60 * 60 * 8)
expected = '2014-02-08T12:00:00Z'
self.assertEqual(get_ts(ts), expected)
self.assertTrue(isinstance(get_ts(), str))
def test_parse_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
<|code_end|>
, predict the next line using imports from the current file:
import time
import unittest
import sys
from mock import Mock
from qingcloud.misc.utils import (get_utf8_value, filter_out_none, get_ts,
parse_ts, local_ts, base64_url_encode, base64_url_decode,
wait_job)
and context including class names, function names, and sometimes code from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def filter_out_none(dictionary, keys=None):
# """ Filter out items whose value is None.
# If `keys` specified, only return non-None items with matched key.
# """
# ret = {}
# if keys is None:
# keys = []
# for key, value in dictionary.items():
# if value is None or key not in keys:
# continue
# ret[key] = value
# return ret
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def parse_ts(ts):
# """ Return as timestamp
# """
# ts = ts.strip()
# try:
# ts_s = time.strptime(ts, ISO8601)
# return time.mktime(ts_s)
# except ValueError:
# try:
# ts_s = time.strptime(ts, ISO8601_MS)
# return time.mktime(ts_s)
# except ValueError:
# return 0
#
# def local_ts(utc_ts):
# ts = parse_ts(utc_ts)
# if ts:
# return ts - time.timezone
# else:
# return 0
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def wait_job(conn, job_id, timeout=60):
# """ waiting for job complete (success or fail) until timeout
# """
# def describe_job(job_id):
# ret = conn.describe_jobs([job_id])
# if not ret or not ret.get('job_set'):
# return None
# return ret['job_set'][0]
#
# deadline = time.time() + timeout
# while time.time() <= deadline:
# time.sleep(2)
# job = describe_job(job_id)
# if not job:
# continue
# if job['status'] not in ('pending', 'working'):
# if conn.debug:
# print('job is %s: %s' % (job['status'], job_id))
# sys.stdout.flush()
# return True
#
# if conn.debug:
# print('timeout for job: %s' % job_id)
# sys.stdout.flush()
# return False
. Output only the next line. | self.assertEqual(parse_ts(ts), expected) |
Here is a snippet: <|code_start|> self.assertEqual(get_utf8_value(u'δ½ ε₯½'), '\xe4\xbd\xa0\xe5\xa5\xbd')
else:
self.assertEqual(get_utf8_value(u'δ½ ε₯½'), u'δ½ ε₯½')
def test_filter_out_none(self):
data = {'a': 1, 'b': 2, 'c': None}
self.assertEqual(filter_out_none(data), {})
data = {'a': 1, 'b': 2, 'c': None}
self.assertEqual(filter_out_none(data, keys=['a', 'c']), {'a': 1})
def test_get_ts(self):
ts = 1391832000
ts = time.localtime(ts + time.timezone + 60 * 60 * 8)
expected = '2014-02-08T12:00:00Z'
self.assertEqual(get_ts(ts), expected)
self.assertTrue(isinstance(get_ts(), str))
def test_parse_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
ts = '2014-02-08T12:00:00.000Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
def test_local_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391860800.0
<|code_end|>
. Write the next line using the current file imports:
import time
import unittest
import sys
from mock import Mock
from qingcloud.misc.utils import (get_utf8_value, filter_out_none, get_ts,
parse_ts, local_ts, base64_url_encode, base64_url_decode,
wait_job)
and context from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def filter_out_none(dictionary, keys=None):
# """ Filter out items whose value is None.
# If `keys` specified, only return non-None items with matched key.
# """
# ret = {}
# if keys is None:
# keys = []
# for key, value in dictionary.items():
# if value is None or key not in keys:
# continue
# ret[key] = value
# return ret
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def parse_ts(ts):
# """ Return as timestamp
# """
# ts = ts.strip()
# try:
# ts_s = time.strptime(ts, ISO8601)
# return time.mktime(ts_s)
# except ValueError:
# try:
# ts_s = time.strptime(ts, ISO8601_MS)
# return time.mktime(ts_s)
# except ValueError:
# return 0
#
# def local_ts(utc_ts):
# ts = parse_ts(utc_ts)
# if ts:
# return ts - time.timezone
# else:
# return 0
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def wait_job(conn, job_id, timeout=60):
# """ waiting for job complete (success or fail) until timeout
# """
# def describe_job(job_id):
# ret = conn.describe_jobs([job_id])
# if not ret or not ret.get('job_set'):
# return None
# return ret['job_set'][0]
#
# deadline = time.time() + timeout
# while time.time() <= deadline:
# time.sleep(2)
# job = describe_job(job_id)
# if not job:
# continue
# if job['status'] not in ('pending', 'working'):
# if conn.debug:
# print('job is %s: %s' % (job['status'], job_id))
# sys.stdout.flush()
# return True
#
# if conn.debug:
# print('timeout for job: %s' % job_id)
# sys.stdout.flush()
# return False
, which may include functions, classes, or code. Output only the next line. | self.assertEqual(local_ts(ts), expected) |
Given snippet: <|code_start|> data = {'a': 1, 'b': 2, 'c': None}
self.assertEqual(filter_out_none(data, keys=['a', 'c']), {'a': 1})
def test_get_ts(self):
ts = 1391832000
ts = time.localtime(ts + time.timezone + 60 * 60 * 8)
expected = '2014-02-08T12:00:00Z'
self.assertEqual(get_ts(ts), expected)
self.assertTrue(isinstance(get_ts(), str))
def test_parse_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
ts = '2014-02-08T12:00:00.000Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
def test_local_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391860800.0
self.assertEqual(local_ts(ts), expected)
ts = '2014-02-08T12:00:00.000Z'
expected = 1391860800.0
self.assertEqual(local_ts(ts), expected)
def test_base64_url_encode(self):
self.assertEqual("c29tZSBzdHJpbmcgdG8gZW5jb2RlIA",
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import time
import unittest
import sys
from mock import Mock
from qingcloud.misc.utils import (get_utf8_value, filter_out_none, get_ts,
parse_ts, local_ts, base64_url_encode, base64_url_decode,
wait_job)
and context:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def filter_out_none(dictionary, keys=None):
# """ Filter out items whose value is None.
# If `keys` specified, only return non-None items with matched key.
# """
# ret = {}
# if keys is None:
# keys = []
# for key, value in dictionary.items():
# if value is None or key not in keys:
# continue
# ret[key] = value
# return ret
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def parse_ts(ts):
# """ Return as timestamp
# """
# ts = ts.strip()
# try:
# ts_s = time.strptime(ts, ISO8601)
# return time.mktime(ts_s)
# except ValueError:
# try:
# ts_s = time.strptime(ts, ISO8601_MS)
# return time.mktime(ts_s)
# except ValueError:
# return 0
#
# def local_ts(utc_ts):
# ts = parse_ts(utc_ts)
# if ts:
# return ts - time.timezone
# else:
# return 0
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def wait_job(conn, job_id, timeout=60):
# """ waiting for job complete (success or fail) until timeout
# """
# def describe_job(job_id):
# ret = conn.describe_jobs([job_id])
# if not ret or not ret.get('job_set'):
# return None
# return ret['job_set'][0]
#
# deadline = time.time() + timeout
# while time.time() <= deadline:
# time.sleep(2)
# job = describe_job(job_id)
# if not job:
# continue
# if job['status'] not in ('pending', 'working'):
# if conn.debug:
# print('job is %s: %s' % (job['status'], job_id))
# sys.stdout.flush()
# return True
#
# if conn.debug:
# print('timeout for job: %s' % job_id)
# sys.stdout.flush()
# return False
which might include code, classes, or functions. Output only the next line. | base64_url_encode("some string to encode ")) |
Continue the code snippet: <|code_start|> def test_get_ts(self):
ts = 1391832000
ts = time.localtime(ts + time.timezone + 60 * 60 * 8)
expected = '2014-02-08T12:00:00Z'
self.assertEqual(get_ts(ts), expected)
self.assertTrue(isinstance(get_ts(), str))
def test_parse_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
ts = '2014-02-08T12:00:00.000Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
def test_local_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391860800.0
self.assertEqual(local_ts(ts), expected)
ts = '2014-02-08T12:00:00.000Z'
expected = 1391860800.0
self.assertEqual(local_ts(ts), expected)
def test_base64_url_encode(self):
self.assertEqual("c29tZSBzdHJpbmcgdG8gZW5jb2RlIA",
base64_url_encode("some string to encode "))
def test_base64_url_decode(self):
<|code_end|>
. Use current file imports:
import time
import unittest
import sys
from mock import Mock
from qingcloud.misc.utils import (get_utf8_value, filter_out_none, get_ts,
parse_ts, local_ts, base64_url_encode, base64_url_decode,
wait_job)
and context (classes, functions, or code) from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def filter_out_none(dictionary, keys=None):
# """ Filter out items whose value is None.
# If `keys` specified, only return non-None items with matched key.
# """
# ret = {}
# if keys is None:
# keys = []
# for key, value in dictionary.items():
# if value is None or key not in keys:
# continue
# ret[key] = value
# return ret
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def parse_ts(ts):
# """ Return as timestamp
# """
# ts = ts.strip()
# try:
# ts_s = time.strptime(ts, ISO8601)
# return time.mktime(ts_s)
# except ValueError:
# try:
# ts_s = time.strptime(ts, ISO8601_MS)
# return time.mktime(ts_s)
# except ValueError:
# return 0
#
# def local_ts(utc_ts):
# ts = parse_ts(utc_ts)
# if ts:
# return ts - time.timezone
# else:
# return 0
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def wait_job(conn, job_id, timeout=60):
# """ waiting for job complete (success or fail) until timeout
# """
# def describe_job(job_id):
# ret = conn.describe_jobs([job_id])
# if not ret or not ret.get('job_set'):
# return None
# return ret['job_set'][0]
#
# deadline = time.time() + timeout
# while time.time() <= deadline:
# time.sleep(2)
# job = describe_job(job_id)
# if not job:
# continue
# if job['status'] not in ('pending', 'working'):
# if conn.debug:
# print('job is %s: %s' % (job['status'], job_id))
# sys.stdout.flush()
# return True
#
# if conn.debug:
# print('timeout for job: %s' % job_id)
# sys.stdout.flush()
# return False
. Output only the next line. | self.assertEqual("some string to encode ", base64_url_decode( |
Given the following code snippet before the placeholder: <|code_start|> ts = '2014-02-08T12:00:00Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
ts = '2014-02-08T12:00:00.000Z'
expected = 1391832000.0 + time.timezone + 60 * 60 * 8
self.assertEqual(parse_ts(ts), expected)
def test_local_ts(self):
ts = '2014-02-08T12:00:00Z'
expected = 1391860800.0
self.assertEqual(local_ts(ts), expected)
ts = '2014-02-08T12:00:00.000Z'
expected = 1391860800.0
self.assertEqual(local_ts(ts), expected)
def test_base64_url_encode(self):
self.assertEqual("c29tZSBzdHJpbmcgdG8gZW5jb2RlIA",
base64_url_encode("some string to encode "))
def test_base64_url_decode(self):
self.assertEqual("some string to encode ", base64_url_decode(
"c29tZSBzdHJpbmcgdG8gZW5jb2RlIA"))
def test_wait_job(self):
job_id = 'job-id'
conn = Mock()
# timeout
conn.describe_jobs.return_value = {'job_set': [{'status': 'working'}]}
<|code_end|>
, predict the next line using imports from the current file:
import time
import unittest
import sys
from mock import Mock
from qingcloud.misc.utils import (get_utf8_value, filter_out_none, get_ts,
parse_ts, local_ts, base64_url_encode, base64_url_decode,
wait_job)
and context including class names, function names, and sometimes code from other files:
# Path: qingcloud/misc/utils.py
# def get_utf8_value(value):
# if sys.version < "3":
# if isinstance(value, unicode):
# return value.encode('utf-8')
# if not isinstance(value, str):
# value = str(value)
# return value
# else:
# return str(value)
#
# def filter_out_none(dictionary, keys=None):
# """ Filter out items whose value is None.
# If `keys` specified, only return non-None items with matched key.
# """
# ret = {}
# if keys is None:
# keys = []
# for key, value in dictionary.items():
# if value is None or key not in keys:
# continue
# ret[key] = value
# return ret
#
# def get_ts(ts=None):
# """ Get formatted time
# """
# if not ts:
# ts = time.gmtime()
# return time.strftime(ISO8601, ts)
#
# def parse_ts(ts):
# """ Return as timestamp
# """
# ts = ts.strip()
# try:
# ts_s = time.strptime(ts, ISO8601)
# return time.mktime(ts_s)
# except ValueError:
# try:
# ts_s = time.strptime(ts, ISO8601_MS)
# return time.mktime(ts_s)
# except ValueError:
# return 0
#
# def local_ts(utc_ts):
# ts = parse_ts(utc_ts)
# if ts:
# return ts - time.timezone
# else:
# return 0
#
# def base64_url_encode(inp):
# if sys.version > "3":
# if isinstance(inp, str):
# inp = inp.encode()
# return bytes.decode(base64.urlsafe_b64encode(inp).rstrip(b'='))
# else:
# return base64.urlsafe_b64encode(str(inp)).rstrip(b'=')
#
# def base64_url_decode(inp):
# if sys.version > "3":
# if isinstance(inp, bytes):
# inp = inp.decode()
# return base64.urlsafe_b64decode(inp + '=' * (4 - len(inp) % 4)).decode()
# else:
# return base64.urlsafe_b64decode(str(inp + '=' * (4 - len(inp) % 4)))
#
# def wait_job(conn, job_id, timeout=60):
# """ waiting for job complete (success or fail) until timeout
# """
# def describe_job(job_id):
# ret = conn.describe_jobs([job_id])
# if not ret or not ret.get('job_set'):
# return None
# return ret['job_set'][0]
#
# deadline = time.time() + timeout
# while time.time() <= deadline:
# time.sleep(2)
# job = describe_job(job_id)
# if not job:
# continue
# if job['status'] not in ('pending', 'working'):
# if conn.debug:
# print('job is %s: %s' % (job['status'], job_id))
# sys.stdout.flush()
# return True
#
# if conn.debug:
# print('timeout for job: %s' % job_id)
# sys.stdout.flush()
# return False
. Output only the next line. | self.assertFalse(wait_job(conn, job_id, 4)) |
Given snippet: <|code_start|>
class MultiPartUpload(object):
def __init__(self, bucket, key_name, upload_id):
"""
@param bucket - The name of the bucket
@param key_name - The name of the object
@param upload_id - ID for the initiated multipart upload
"""
self.bucket = bucket
self.key_name = key_name
self.upload_id = upload_id
def upload_part_from_file(self, fp, part_number):
""" Upload multipart from a file
Keyword arguments:
fp - a file-like object
part_number - The number of the multipart
"""
params = {
"upload_id": self.upload_id,
"part_number": str(part_number),
}
response = self.bucket.connection.make_request(
"PUT", self.bucket.name, self.key_name, data=fp, params=params)
if response.status == 201:
part = Part(self.bucket.name, self.key_name, part_number)
return part
else:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .exception import get_response_error
from .util import load_data
and context:
# Path: qingcloud/qingstor/exception.py
# def get_response_error(response, body=None):
# if not body:
# body = response.read()
# args = {
# "status": response.status,
# "body": body,
# "request_id": response.getheader("request_id")
# }
# if body:
# try:
# resp = load_data(body)
# args["code"] = resp["code"]
# args["message"] = resp["message"]
# args["url"] = resp["url"]
# except ValueError:
# pass
# return QSResponseError(**args)
#
# Path: qingcloud/qingstor/util.py
# def load_data(data):
# """ Wrapper to load json data, to be compatible with Python3.
# Returns: JSON data
#
# Keyword arguments:
# data: might be bytes or str
# """
# if type(data) == bytes:
# return json.loads(data.decode("utf-8"))
# else:
# return json.loads(data)
which might include code, classes, or functions. Output only the next line. | err = get_response_error(response) |
Predict the next line after this snippet: <|code_start|> def upload_part_from_file(self, fp, part_number):
""" Upload multipart from a file
Keyword arguments:
fp - a file-like object
part_number - The number of the multipart
"""
params = {
"upload_id": self.upload_id,
"part_number": str(part_number),
}
response = self.bucket.connection.make_request(
"PUT", self.bucket.name, self.key_name, data=fp, params=params)
if response.status == 201:
part = Part(self.bucket.name, self.key_name, part_number)
return part
else:
err = get_response_error(response)
raise err
def get_all_parts(self):
""" Retrieve all multiparts of an object that uploaded.
"""
params = {
"upload_id": self.upload_id,
}
response = self.bucket.connection.make_request(
"GET", self.bucket.name, self.key_name, params=params)
if response.status == 200:
parts = []
<|code_end|>
using the current file's imports:
from .exception import get_response_error
from .util import load_data
and any relevant context from other files:
# Path: qingcloud/qingstor/exception.py
# def get_response_error(response, body=None):
# if not body:
# body = response.read()
# args = {
# "status": response.status,
# "body": body,
# "request_id": response.getheader("request_id")
# }
# if body:
# try:
# resp = load_data(body)
# args["code"] = resp["code"]
# args["message"] = resp["message"]
# args["url"] = resp["url"]
# except ValueError:
# pass
# return QSResponseError(**args)
#
# Path: qingcloud/qingstor/util.py
# def load_data(data):
# """ Wrapper to load json data, to be compatible with Python3.
# Returns: JSON data
#
# Keyword arguments:
# data: might be bytes or str
# """
# if type(data) == bytes:
# return json.loads(data.decode("utf-8"))
# else:
# return json.loads(data)
. Output only the next line. | resp = load_data(response.read()) |
Continue the code snippet: <|code_start|>
# Send the request
conn.request(method, request_path, request.body, request.header)
# Receive the response
response = conn.getresponse()
# Reuse the connection
if response.status < 500:
self._set_conn(conn)
return response
def _check_token(self):
if not self._token or not self._token_exp or time.time() >= self._token_exp:
try:
conn = httplib.HTTPConnection(self.credential_proxy_host, self.credential_proxy_port, timeout=1)
conn.request("GET", "/latest/meta-data/security-credentials", headers={"Accept": "application/json"})
response = conn.getresponse()
# Reuse the connection
if response.status == 200:
r = response.read()
if r:
# first reverse escape, then json_load
r = json_load(eval(r))
self._token = r.get('id_token')
self._token_exp = r.get('expiration')
self.iam_access_key = r.get('access_key')
self.iam_secret_key = r.get('secret_key')
<|code_end|>
. Use current file imports:
import time
import threading
import httplib
import http.client as httplib
from qingcloud.misc.json_tool import json_load
from qingcloud.conn.auth import QuerySignatureAuthHandler
and context (classes, functions, or code) from other files:
# Path: qingcloud/conn/auth.py
# class QuerySignatureAuthHandler(HmacKeys):
# """ Provides Query Signature Authentication.
# """
#
# SignatureVersion = 1
# APIVersion = 1
#
# def _calc_signature(self, params, verb, path):
# """ calc signature for request
# """
# string_to_sign = '%s\n%s\n' % (verb, path)
# params['signature_method'] = self.algorithm()
# keys = sorted(params.keys())
# pairs = []
# for key in keys:
# val = get_utf8_value(params[key])
# if is_python3:
# key = key.encode()
# pairs.append(urllib.quote(key, safe='') + '=' +
# urllib.quote(val, safe='-_~'))
# qs = '&'.join(pairs)
# string_to_sign += qs
# # print "string to sign:[%s]" % string_to_sign
# b64 = self.sign_string(string_to_sign)
# return (qs, b64)
#
# def add_auth(self, req, **kwargs):
# """ add authorize information for request
# """
# req.params['access_key_id'] = self.qy_access_key_id if 'access_key' not in kwargs else kwargs.get('access_key')
# if 'token' in kwargs:
# req.params['token'] = kwargs.get('token')
# req.params['signature_version'] = self.SignatureVersion if 'signature_version' not in kwargs \
# else kwargs.get('signature_version')
# req.params['version'] = self.APIVersion
# time_stamp = get_ts()
# req.params['time_stamp'] = time_stamp
# qs, signature = self._calc_signature(req.params, req.method,
# req.auth_path)
# # print 'query_string: %s Signature: %s' % (qs, signature)
# if req.method == 'POST':
# # req and retried req should not have signature
# params = req.params.copy()
# params["signature"] = signature
# req.body = urllib.urlencode(params)
# req.header = {
# 'Content-Length': str(len(req.body)),
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'Accept': 'text/plain',
# 'Connection': 'Keep-Alive'
# }
# else:
# req.body = ''
# # if this is a retried req, the qs from the previous try will
# # already be there, we need to get rid of that and rebuild it
# req.params["signature"] = signature
# req.path = req.path.split('?')[0]
# req.path = (req.path + '?' + qs +
# '&signature=' + urllib.quote_plus(signature))
. Output only the next line. | self._auth_handler = QuerySignatureAuthHandler(self.host, |
Predict the next line after this snippet: <|code_start|># =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
class TestInstanceGroupsAction(unittest.TestCase):
max_retry_times = 2
@classmethod
def setUpClass(cls):
""" Initialization of mock test. """
<|code_end|>
using the current file's imports:
import mock
import random
import unittest
from qingcloud.iaas.actions.instance_groups import InstanceGroupsAction
and any relevant context from other files:
# Path: qingcloud/iaas/actions/instance_groups.py
# class InstanceGroupsAction(object):
#
# def __init__(self, conn):
# self.conn = conn
#
# def create_instance_groups(self, relation,
# instance_group_name=None,
# description=None,
# **ignore):
# """ Create an instance group.
# @param relation: Define the relation between instances in the same group.
# "repel" means these instances prefer distributing on the different physical units.
# "attract" means these instances prefer converging on the same physical unit.
# @param instance_group_name: The name of this group.
# @param description: The description of this group.
# """
# action = const.ACTION_CREATE_INSTANCE_GROUPS
# valid_keys = ['relation', 'instance_group_name', 'description']
# body = filter_out_none(locals(), valid_keys)
# if not self.conn.req_checker.check_params(body,
# required_params=['relation'],
# ):
# return None
#
# return self.conn.send_request(action, body)
#
# def delete_instance_groups(self, instance_groups,
# **ignore):
# """ Delete the specific instance group.
# @param instance_groups: An id list contains the group(s) id which will be deleted.
# """
# action = const.ACTION_DELETE_INSTANCE_GROUPS
# valid_keys = ['instance_groups']
# body = filter_out_none(locals(), valid_keys)
# if not self.conn.req_checker.check_params(body,
# required_params=['instance_groups'],
# list_params=['instance_groups']
# ):
# return None
#
# return self.conn.send_request(action, body)
#
# def join_instance_group(self, instances,
# instance_group,
# **ignore):
# """ Add the instance(s) to the instance group.
# @param instances: An id list contains the instances(s) that will be added in the specific group.
# @param instance_group: The group id.
# """
# action = const.ACTION_JOIN_INSTANCE_GROUP
# valid_keys = ['instances', 'instance_group']
# body = filter_out_none(locals(), valid_keys)
# if not self.conn.req_checker.check_params(body,
# required_params=['instances', 'instance_group'],
# list_params=['instances']
# ):
# return None
#
# return self.conn.send_request(action, body)
#
# def leave_instance_group(self, instances,
# instance_group,
# **ignore):
# """ Delete the specific instance(s) from the group.
# @param instances: An id list contains the instance(s) who want to leave the instance group.
# @param instance_group: The instance group id.
# """
# action = const.ACTION_LEAVE_INSTANCE_GROUP
# valid_keys = ['instances', 'instance_group']
# body = filter_out_none(locals(), valid_keys)
# if not self.conn.req_checker.check_params(body,
# required_params=['instances', 'instance_group'],
# list_params=['instances']
# ):
# return None
#
# return self.conn.send_request(action, body)
#
# def describe_instance_groups(self, instance_groups=[],
# relation=None,
# tags=None,
# owner=None,
# verbose=0,
# offset=0,
# limit=20,
# **ignore):
# """ Describe the instance groups filtered by conditions.
# @param instance_groups: If this param was given, only return the group(s) info in this given list.
# @param relation: Filter by the relation type.
# @param tags: Filter by the tag id.
# @param owner: Filter by the owner id.
# @param verbose: Whether return the verbose information.
# @param offset: The offset of the item cursor and its default value is 0.
# @param limit: The number of items that will be displayed. Default is 20, maximum is 100.
# """
# action = const.ACTION_DESCRIBE_INSTANCE_GROUPS
# valid_keys = ['instance_groups', 'relation', 'tags', 'owner',
# 'verbose', 'offset', 'limit']
# body = filter_out_none(locals(), valid_keys)
# if not self.conn.req_checker.check_params(body,
# list_params=['instance_groups', 'tags'],
# integer_params=['limit', 'verbose', 'offset']
# ):
# return None
#
# return self.conn.send_request(action, body)
. Output only the next line. | cls.ig_action_object = InstanceGroupsAction(mock.Mock()) |
Predict the next line for this snippet: <|code_start|> user = ForeignKey(UserProfile)
repository = ForeignKey(Repository)
order = PositiveIntegerField()
repository_category = ForeignKey(RepositoryCategory)
link_type = ForeignKey(LinkType)
last_modified = DateTimeField(auto_now=True)
class Meta:
ordering = ['repository_category__name','order']
unique_together = ("user", "repository", 'link_type')
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
def expire_cache(sender, instance, **kwargs):
try:
github_username = None
bitbucket_username = None
try:
github_username = instance.user.social_auth.get(provider='github').extra_data['username']
github_prefix = github_username
except:
github_prefix = ''
try:
bitbucket_username = instance.user.social_auth.get(provider='bitbucket').extra_data['username']
bitbucket_prefix=bitbucket_username
except:
bitbucket_prefix = ''
custom_prefix = '.'.join((hashlib.md5(github_prefix).hexdigest(),hashlib.md5(bitbucket_prefix).hexdigest()))
<|code_end|>
with the help of current file imports:
from django.contrib.auth.models import User
from django.db import models
from django.db.models.fields import EmailField, URLField, CharField, TextField, \
PositiveIntegerField, SlugField, DateTimeField, BooleanField
from django.db.models.fields.related import ForeignKey
from django.db.models.signals import post_save, pre_save
from repowatcher.main.utils import expire_view_cache
from social_auth.fields import JSONField
import hashlib
import logging
and context from other files:
# Path: repowatcher/main/utils.py
# def expire_view_cache(view_name, args = [], kwargs={}, namespace=None, key_prefix=None):
# """
# This function allows you to invalidate any view-level cache.
# view_name: view function you wish to invalidate or it's named url pattern
# args: any arguments passed to the view function
# namepace: optioal, if an application namespace is needed
# key prefix: for the @cache_page decorator for the function (if any)
# """
#
# # create a fake request object
# request = HttpRequest()
# # Loookup the request path:
# if namespace:
# view_name = namespace + ":" + view_name
# request.path = reverse(view_name, args = args, kwargs=kwargs)
# # get cache key, expire if the cached item exists:
# key = cache.make_key(get_cache_key(request, key_prefix=key_prefix))
# if key:
# if cache.get(key):
# cache.delete(key)
# return True
# return False
, which may contain function names, class names, or code. Output only the next line. | expire_view_cache('authed_owned', key_prefix=custom_prefix) |
Predict the next line after this snippet: <|code_start|>logger = logging.getLogger(__name__)
class GithubProvider(ProviderBase):
base_url = "https://api.github.com/"
def __init__(self, user):
self.user = user
self.host = 'github'
self.access_token = None
if user.is_authenticated():
try:
self.access_token=user.social_auth.get(provider=self.host).extra_data['access_token']
self.client = Github(token=self.access_token)
except ObjectDoesNotExist:
self.client = Github(client_id=settings.GITHUB_APP_ID, client_secret=settings.GITHUB_API_SECRET)
else:
self.client = Github(client_id=settings.GITHUB_APP_ID, client_secret=settings.GITHUB_API_SECRET)
def get_user_details(self, username):
try:
github_user = self.client.users.get(username)
return vars(github_user)
except:
raise Http404
def create_or_update_user_details(self, user_dict, repository_user = None):
if repository_user is None:
<|code_end|>
using the current file's imports:
from ProviderBase import ProviderBase
from django.conf import settings
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from operator import itemgetter
from pygithub3 import Github
from pygithub3.exceptions import NotFound
from repowatcher.main.models import RepositoryUser, Repository, LinkType
from repowatcher.main.tasks import get_events
from django.http import Http404
import json
import logging
import requests
and any relevant context from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
#
# Path: repowatcher/main/tasks.py
# @task
# def get_events(url):
# try:
# body = urllib2.urlopen(url).read()
# events = json.loads(body)
# except Exception:
# events = None
# return events
. Output only the next line. | repository_user = RepositoryUser() |
Given snippet: <|code_start|> setattr(repository_user, key, value)
else:
if isinstance(value, datetime):
extra_data[key] = value.__str__()
else:
extra_data[key] = value
repository_user.extra_data = json.dumps(extra_data)
repository_user.host = self.host
return repository_user
def retrieve_user_details(self, username):
return RepositoryUser.objects.get(slug=self.host+'/'+username.lower())
def get_user_events(self, username):
try:
r = requests.get(GithubProvider.base_url + 'users/'+ username + '/events', params = {"client_id": settings.GITHUB_APP_ID, "client_secret": settings.GITHUB_API_SECRET})
user_events = json.loads(r.text)
except Exception:
user_events = []
return user_events
def get_repository_details(self, owner, repository):
try:
repo = self.client.repos.get(user=owner,repo=repository)
return vars(repo)
except NotFound:
raise Http404
def create_or_update_repository_details(self, repository_dict, repository = None):
if repository is None:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from ProviderBase import ProviderBase
from django.conf import settings
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from operator import itemgetter
from pygithub3 import Github
from pygithub3.exceptions import NotFound
from repowatcher.main.models import RepositoryUser, Repository, LinkType
from repowatcher.main.tasks import get_events
from django.http import Http404
import json
import logging
import requests
and context:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
#
# Path: repowatcher/main/tasks.py
# @task
# def get_events(url):
# try:
# body = urllib2.urlopen(url).read()
# events = json.loads(body)
# except Exception:
# events = None
# return events
which might include code, classes, or functions. Output only the next line. | repository = Repository() |
Predict the next line for this snippet: <|code_start|> extra_data[key] = value
repository.extra_data = json.dumps(extra_data)
if repository.language == "" or repository.language == None:
repository.language = "other"
repository.scm = 'git'
repository.host =self.host
repository.language = repository.language.lower()
return repository
def retrieve_repository_details(self, owner, repository):
host_slug = ('/'.join((self.host, owner, repository))).lower()
return Repository.objects.get(host_slug=host_slug)
def get_repository_events(self, owner, repository):
slug = (owner + '/' + repository).lower()
try:
r = requests.get('https://api.github.com/repos/'+ slug + '/events', params = {"client_id": settings.GITHUB_APP_ID, "client_secret": settings.GITHUB_API_SECRET})
repo_events = json.loads(r.text)
except Exception:
repo_events = []
return repo_events
def get_repositories_events(self, repository_list):
repo_events = []
request_urls = []
url_requests = []
for repository in repository_list:
slug = '/'.join((repository.owner, repository.name))
request_urls.append(GithubProvider.base_url + 'repos/' + slug + '/events' + '?client_id=' + settings.GITHUB_APP_ID + '&client_secret=' + settings.GITHUB_API_SECRET)
for url in request_urls:
<|code_end|>
with the help of current file imports:
from ProviderBase import ProviderBase
from django.conf import settings
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from operator import itemgetter
from pygithub3 import Github
from pygithub3.exceptions import NotFound
from repowatcher.main.models import RepositoryUser, Repository, LinkType
from repowatcher.main.tasks import get_events
from django.http import Http404
import json
import logging
import requests
and context from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
#
# Path: repowatcher/main/tasks.py
# @task
# def get_events(url):
# try:
# body = urllib2.urlopen(url).read()
# events = json.loads(body)
# except Exception:
# events = None
# return events
, which may contain function names, class names, or code. Output only the next line. | url_requests.append(get_events.delay(url)) |
Here is a snippet: <|code_start|>
class BitbucketProvider(ProviderBase):
def __init__(self, user):
self.user = user
self.tokens = None
self.host = 'bitbucket'
if user.is_authenticated():
try:
self.tokens = user.social_auth.get(provider=self.host).tokens
oauth_hook = OAuthHook(self.tokens['oauth_token'], self.tokens['oauth_token_secret'], header_auth=False)
logger.error(self.tokens['oauth_token']+"-"+self.tokens['oauth_token_secret'])
self.client = requests.session(hooks={'pre_request': oauth_hook})
except ObjectDoesNotExist:
self.client = requests.session()
else:
self.client = requests.session()
self.slumber = slumber.API("https://api.bitbucket.org/1.0/", session=self.client)
def get_user_details(self, username):
try:
user_dict = self.slumber.users(username).get()['user']
except Exception:
user_dict = {}
return user_dict
def create_or_update_user_details(self, user_dict, repository_user = None):
extra_data = {}
if repository_user is None:
<|code_end|>
. Write the next line using the current file imports:
from ProviderBase import ProviderBase
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from oauth_hook.hook import OAuthHook
from operator import itemgetter
from repowatcher.main.models import RepositoryUser, Repository, LinkType
from repowatcher.main.tasks import get_events
import json
import requests
import slumber
import logging
and context from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
#
# Path: repowatcher/main/tasks.py
# @task
# def get_events(url):
# try:
# body = urllib2.urlopen(url).read()
# events = json.loads(body)
# except Exception:
# events = None
# return events
, which may include functions, classes, or code. Output only the next line. | repository_user = RepositoryUser() |
Given the code snippet: <|code_start|> extra_data[key] = value.__str__()
else:
extra_data[key] = value
repository_user.extra_data = json.dumps(extra_data)
repository_user.host = self.host
return repository_user
def retrieve_user_details(self, username):
return RepositoryUser.objects.get(slug=self.host+'/'+username.lower())
def get_user_events(self, username):
try:
user_events = self.slumber.users(username.lower()).events.get()['events']
logger.debug("I did not throw an exception")
except Exception:
user_events = []
return user_events
def get_repository_details(self, owner, repository):
slug = '/'.join((owner.lower(), repository.lower()))
try:
repository_dict = self.slumber.repositories(slug).get()
except Exception:
raise Http404
return repository_dict
def create_or_update_repository_details(self, repository_dict, repository = None):
extra_data = {}
if repository is None:
<|code_end|>
, generate the next line using the imports in this file:
from ProviderBase import ProviderBase
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from oauth_hook.hook import OAuthHook
from operator import itemgetter
from repowatcher.main.models import RepositoryUser, Repository, LinkType
from repowatcher.main.tasks import get_events
import json
import requests
import slumber
import logging
and context (functions, classes, or occasionally code) from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
#
# Path: repowatcher/main/tasks.py
# @task
# def get_events(url):
# try:
# body = urllib2.urlopen(url).read()
# events = json.loads(body)
# except Exception:
# events = None
# return events
. Output only the next line. | repository = Repository() |
Using the snippet: <|code_start|> else:
extra_data[key] = value
repository.extra_data = json.dumps(extra_data)
if repository.language == "" or repository.language == None:
repository.language = "other"
repository.language = repository.language.lower()
repository.host =self.host
return repository
def retrieve_repository_details(self, owner, repository):
host_slug = ('/'.join((self.host,owner,repository))).lower()
repo = Repository.objects.get(host_slug= host_slug)
return repo
def get_repository_events(self, owner, repository):
slug = '/'.join((owner.lower(), repository.lower()))
try:
repo_events = self.slumber.repositories(slug).events.get()['events']
except Exception:
repo_events =[]
return repo_events
def get_repositories_events(self, repository_list):
repo_events = []
request_urls = []
url_requests = []
for repo in repository_list:
request_urls.append('https://api.bitbucket.org/1.0/repositories/%s/%s/events/'%(repo.owner.lower(),repo.name.lower()))
for url in request_urls:
<|code_end|>
, determine the next line of code. You have imports:
from ProviderBase import ProviderBase
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from oauth_hook.hook import OAuthHook
from operator import itemgetter
from repowatcher.main.models import RepositoryUser, Repository, LinkType
from repowatcher.main.tasks import get_events
import json
import requests
import slumber
import logging
and context (class names, function names, or code) available:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
#
# Path: repowatcher/main/tasks.py
# @task
# def get_events(url):
# try:
# body = urllib2.urlopen(url).read()
# events = json.loads(body)
# except Exception:
# events = None
# return events
. Output only the next line. | url_requests.append(get_events.delay(url)) |
Next line prediction: <|code_start|> repository = Repository()
if update or (datetime.now() - repository.last_modified) > timedelta(days = 1):
repository = github_provider.create_or_update_repository_details(repo, repository)
if not repository.private:
repository.save()
repositories_by_language[repository.language].append(repository)
for repo in bitbucket_repositories:
update = False
repo['owner'] = repo['owner'].lower().replace("/", "")
repo['name'] = repo['name'].lower().replace("/", "")
try:
repository = Repository.objects.get(slug= repo['owner'] + '/' + repo['name'], host='bitbucket')
except ObjectDoesNotExist:
update = True
repository = Repository()
if update or (datetime.now() - repository.last_modified) > timedelta(days = 1):
extra_data = {}
key_map={'owner':'owner','name':'name', 'website':'homepage','language':'language','description':'description','created_on':'created_at','last_updated':'pushed_at','scm':'scm','is_private':'private'}
for key,value in repo.iteritems():
if key in ['owner','name', 'website','language','description','created_on','last_updated','scm','is_private']:
setattr(repository,key_map[key],value)
else:
extra_data[key] = value
repository.extra_data = json.dumps(extra_data)
if repository.language == "" or repository.language == None:
repository.language = "other"
repository.host ='bitbucket'
if not repository.private:
repository.save()
for category in repositories_by_language.keys():
<|code_end|>
. Use current file imports:
(from GithubProvider import GithubProvider
from collections import defaultdict
from datetime import timedelta, datetime
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.views.decorators.cache import never_cache, cache_control
from oauth_hook import OAuthHook
from repowatcher.main.models import RepositoryCategory, Repository
from urllib import urlencode, quote
import json
import logging
import requests
import urllib)
and context including class names, function names, or small code snippets from other files:
# Path: repowatcher/main/models.py
# class RepositoryCategory(models.Model):
# name = CharField(max_length=100)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
. Output only the next line. | RepositoryCategory.objects.get_or_create(name = category) |
Next line prediction: <|code_start|>logger = logging.getLogger(__name__)
OAuthHook.consumer_key = settings.BITBUCKET_CONSUMER_KEY
OAuthHook.consumer_secret = settings.BITBUCKET_CONSUMER_SECRET
@never_cache
def index(request):
"""Home view, displays login mechanism"""
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('repowatcher.main.views.authed'))
else:
return render_to_response('index.html', {}, RequestContext(request))
def about(request):
<|code_end|>
. Use current file imports:
(from GithubProvider import GithubProvider
from collections import defaultdict
from datetime import timedelta, datetime
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.views.decorators.cache import never_cache, cache_control
from oauth_hook import OAuthHook
from repowatcher.main.models import RepositoryCategory, Repository
from urllib import urlencode, quote
import json
import logging
import requests
import urllib)
and context including class names, function names, or small code snippets from other files:
# Path: repowatcher/main/models.py
# class RepositoryCategory(models.Model):
# name = CharField(max_length=100)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
. Output only the next line. | used = [Repository(owner='django', name='django', host='github'), |
Predict the next line for this snippet: <|code_start|> user_repository_link.order = order
user_repository_link.save()
def save_watched_repositories(self, repositories_dict):
self.save_repositories(repositories_dict = repositories_dict, link_type = "watched")
def save_owned_repositories(self, repositories_dict):
self.save_repositories(repositories_dict = repositories_dict, link_type = "owned")
def save_starred_repositories(self, repositories_dict):
self.save_repositories(repositories_dict = repositories_dict, link_type = "starred")
def retrieve_repositories_list(self, username, link_type):
repositories = []
api_only = True
repo_link_type, _ = LinkType.objects.get_or_create(name = link_type)
if self.user.is_authenticated():
try:
if username == self.user.social_auth.get(provider=self.host).extra_data['username']:
profile = self.user.get_profile()
links = profile.userrepositorylink_set.filter(link_type = repo_link_type).filter(repository__host=self.host).select_related('repository').select_related('repository','repository_category')
if len(links) != 0:
for link in links:
repositories.append(link.repository)
api_only = False
else:
api_only = True
except ObjectDoesNotExist:
pass
try:
<|code_end|>
with the help of current file imports:
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.aggregates import Max, Min
from repowatcher.main.models import RepositoryUser, RepositoryCategory, \
Repository, UserRepositoryLink, RepositoryUserRepositoryLink, LinkType
import logging
import urllib
and context from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class RepositoryCategory(models.Model):
# name = CharField(max_length=100)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class UserRepositoryLink(models.Model):
# user = ForeignKey(UserProfile)
# repository = ForeignKey(Repository)
# order = PositiveIntegerField()
# repository_category = ForeignKey(RepositoryCategory)
# link_type = ForeignKey(LinkType)
# last_modified = DateTimeField(auto_now=True)
#
# class Meta:
# ordering = ['repository_category__name','order']
# unique_together = ("user", "repository", 'link_type')
#
# class RepositoryUserRepositoryLink(models.Model):
# user = ForeignKey(RepositoryUser)
# repository = ForeignKey(Repository)
# last_modified = DateTimeField(auto_now=True)
# link_type = ForeignKey(LinkType)
#
# class Meta:
# ordering = ['repository__language', '-repository__watchers']
# unique_together = ("user", "repository", "link_type")
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
, which may contain function names, class names, or code. Output only the next line. | repository_user = RepositoryUser.objects.get(slug=self.host+'/'+username.lower()) |
Continue the code snippet: <|code_start|> def retrieve_repository_details(self, owner, repository):
return
@abstractmethod
def get_repository_events(self, owner, repository):
return
@abstractmethod
def get_repositories_events(self, repository_list):
return
@abstractmethod
def get_repositories(self, username, owned, link_type):
return
def get_watched_repositories(self, username):
return self.get_repositories(username = username, link_type = "watched")
def get_owned_repositories(self, username):
return self.get_repositories(username = username, link_type = "owned")
def get_starred_repositories(self, username):
return self.get_repositories(username = username, link_type = "starred")
def save_repositories(self, repositories_dict, link_type):
profile = self.user.get_profile()
for category_name,value in repositories_dict.iteritems():
update = False
if category_name == '':
category_name = 'other'
<|code_end|>
. Use current file imports:
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.aggregates import Max, Min
from repowatcher.main.models import RepositoryUser, RepositoryCategory, \
Repository, UserRepositoryLink, RepositoryUserRepositoryLink, LinkType
import logging
import urllib
and context (classes, functions, or code) from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class RepositoryCategory(models.Model):
# name = CharField(max_length=100)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class UserRepositoryLink(models.Model):
# user = ForeignKey(UserProfile)
# repository = ForeignKey(Repository)
# order = PositiveIntegerField()
# repository_category = ForeignKey(RepositoryCategory)
# link_type = ForeignKey(LinkType)
# last_modified = DateTimeField(auto_now=True)
#
# class Meta:
# ordering = ['repository_category__name','order']
# unique_together = ("user", "repository", 'link_type')
#
# class RepositoryUserRepositoryLink(models.Model):
# user = ForeignKey(RepositoryUser)
# repository = ForeignKey(Repository)
# last_modified = DateTimeField(auto_now=True)
# link_type = ForeignKey(LinkType)
#
# class Meta:
# ordering = ['repository__language', '-repository__watchers']
# unique_together = ("user", "repository", "link_type")
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
. Output only the next line. | repository_category,_ = RepositoryCategory.objects.get_or_create(name = category_name) |
Next line prediction: <|code_start|> def get_repository_events(self, owner, repository):
return
@abstractmethod
def get_repositories_events(self, repository_list):
return
@abstractmethod
def get_repositories(self, username, owned, link_type):
return
def get_watched_repositories(self, username):
return self.get_repositories(username = username, link_type = "watched")
def get_owned_repositories(self, username):
return self.get_repositories(username = username, link_type = "owned")
def get_starred_repositories(self, username):
return self.get_repositories(username = username, link_type = "starred")
def save_repositories(self, repositories_dict, link_type):
profile = self.user.get_profile()
for category_name,value in repositories_dict.iteritems():
update = False
if category_name == '':
category_name = 'other'
repository_category,_ = RepositoryCategory.objects.get_or_create(name = category_name)
for order, item in enumerate(value,start=1):
if item !='':
try:
<|code_end|>
. Use current file imports:
(from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.aggregates import Max, Min
from repowatcher.main.models import RepositoryUser, RepositoryCategory, \
Repository, UserRepositoryLink, RepositoryUserRepositoryLink, LinkType
import logging
import urllib)
and context including class names, function names, or small code snippets from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class RepositoryCategory(models.Model):
# name = CharField(max_length=100)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class UserRepositoryLink(models.Model):
# user = ForeignKey(UserProfile)
# repository = ForeignKey(Repository)
# order = PositiveIntegerField()
# repository_category = ForeignKey(RepositoryCategory)
# link_type = ForeignKey(LinkType)
# last_modified = DateTimeField(auto_now=True)
#
# class Meta:
# ordering = ['repository_category__name','order']
# unique_together = ("user", "repository", 'link_type')
#
# class RepositoryUserRepositoryLink(models.Model):
# user = ForeignKey(RepositoryUser)
# repository = ForeignKey(Repository)
# last_modified = DateTimeField(auto_now=True)
# link_type = ForeignKey(LinkType)
#
# class Meta:
# ordering = ['repository__language', '-repository__watchers']
# unique_together = ("user", "repository", "link_type")
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
. Output only the next line. | repository = Repository.objects.get(host_slug=self.host+'/'+item.lower()) |
Continue the code snippet: <|code_start|> return self.get_repositories(username = username, link_type = "watched")
def get_owned_repositories(self, username):
return self.get_repositories(username = username, link_type = "owned")
def get_starred_repositories(self, username):
return self.get_repositories(username = username, link_type = "starred")
def save_repositories(self, repositories_dict, link_type):
profile = self.user.get_profile()
for category_name,value in repositories_dict.iteritems():
update = False
if category_name == '':
category_name = 'other'
repository_category,_ = RepositoryCategory.objects.get_or_create(name = category_name)
for order, item in enumerate(value,start=1):
if item !='':
try:
repository = Repository.objects.get(host_slug=self.host+'/'+item.lower())
except ObjectDoesNotExist:
repository = Repository()
update = True
if update or (datetime.now() - repository.last_modified) > timedelta(days = 1):
owner, repository_name = item.split('/')
repository_dict = self.get_repository_details(owner, repository_name)
repository = self.create_or_update_repository_details(repository_dict, repository)
if not repository.private:
repository.save()
repo_link_type, _ = LinkType.objects.get_or_create(name = link_type)
try:
<|code_end|>
. Use current file imports:
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.aggregates import Max, Min
from repowatcher.main.models import RepositoryUser, RepositoryCategory, \
Repository, UserRepositoryLink, RepositoryUserRepositoryLink, LinkType
import logging
import urllib
and context (classes, functions, or code) from other files:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class RepositoryCategory(models.Model):
# name = CharField(max_length=100)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class UserRepositoryLink(models.Model):
# user = ForeignKey(UserProfile)
# repository = ForeignKey(Repository)
# order = PositiveIntegerField()
# repository_category = ForeignKey(RepositoryCategory)
# link_type = ForeignKey(LinkType)
# last_modified = DateTimeField(auto_now=True)
#
# class Meta:
# ordering = ['repository_category__name','order']
# unique_together = ("user", "repository", 'link_type')
#
# class RepositoryUserRepositoryLink(models.Model):
# user = ForeignKey(RepositoryUser)
# repository = ForeignKey(Repository)
# last_modified = DateTimeField(auto_now=True)
# link_type = ForeignKey(LinkType)
#
# class Meta:
# ordering = ['repository__language', '-repository__watchers']
# unique_together = ("user", "repository", "link_type")
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
. Output only the next line. | user_repository_link = UserRepositoryLink.objects.get(user = profile,repository = repository, link_type = repo_link_type) |
Using the snippet: <|code_start|>
def get_watched_repositories(self, username):
return self.get_repositories(username = username, link_type = "watched")
def get_owned_repositories(self, username):
return self.get_repositories(username = username, link_type = "owned")
def get_starred_repositories(self, username):
return self.get_repositories(username = username, link_type = "starred")
def save_repositories(self, repositories_dict, link_type):
profile = self.user.get_profile()
for category_name,value in repositories_dict.iteritems():
update = False
if category_name == '':
category_name = 'other'
repository_category,_ = RepositoryCategory.objects.get_or_create(name = category_name)
for order, item in enumerate(value,start=1):
if item !='':
try:
repository = Repository.objects.get(host_slug=self.host+'/'+item.lower())
except ObjectDoesNotExist:
repository = Repository()
update = True
if update or (datetime.now() - repository.last_modified) > timedelta(days = 1):
owner, repository_name = item.split('/')
repository_dict = self.get_repository_details(owner, repository_name)
repository = self.create_or_update_repository_details(repository_dict, repository)
if not repository.private:
repository.save()
<|code_end|>
, determine the next line of code. You have imports:
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.aggregates import Max, Min
from repowatcher.main.models import RepositoryUser, RepositoryCategory, \
Repository, UserRepositoryLink, RepositoryUserRepositoryLink, LinkType
import logging
import urllib
and context (class names, function names, or code) available:
# Path: repowatcher/main/models.py
# class RepositoryUser(models.Model):
# login = CharField(max_length=100,db_index = True)
# name = CharField(max_length=100, null = True)
# slug = SlugField(max_length=201, unique = True)
# email = EmailField(max_length=254, null = True)
# blog = URLField(null = True)
# followers = PositiveIntegerField(null = True)
# following = PositiveIntegerField(null = True)
# public_repos = PositiveIntegerField(null = True)
# created_at = DateTimeField(null=True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# repositories = models.ManyToManyField(Repository, through='RepositoryUserRepositoryLink')
# starred = PositiveIntegerField(null = True)
# watched = PositiveIntegerField(null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES, db_index = True)
#
# class Meta:
# unique_together = ("login", "host")
#
# def save(self, *args, **kwargs):
# self.slug = self.host + '/' + self.login.lower()
# super(RepositoryUser, self).save(*args, **kwargs)
#
# class RepositoryCategory(models.Model):
# name = CharField(max_length=100)
#
# class Repository(models.Model):
# owner = CharField(max_length=100)
# name = CharField(max_length=100)
# slug = SlugField(max_length=201)
# host_slug = SlugField(max_length=302, unique = True)
# language = CharField(max_length=100, null = True)
# html_url = URLField(null = True, max_length=400)
# homepage = URLField(null = True, max_length=400)
# watchers = PositiveIntegerField(null = True)
# created_at = DateTimeField(null = True)
# pushed_at = DateTimeField(null = True)
# description = TextField(null = True)
# extra_data = JSONField(null = True)
# last_modified = DateTimeField(auto_now=True)
# scm = CharField(max_length=100, choices=SCM_CHOICES, null = True)
# host = CharField(max_length=100, choices=HOST_CHOICES)
# private = BooleanField(default = False)
#
# class Meta:
# unique_together = ("owner", "name", "host")
# ordering = ['-watchers']
#
# def save(self, *args, **kwargs):
# self.slug = self.owner.lower() + '/' + self.name.lower()
# self.host_slug = self.host+'/'+self.slug
# if self.html_url == None or self.html_url =='':
# if self.host =='bitbucket':
# self.html_url = 'https://bitbucket.org/%s/%s' % (self.owner,self.name)
# if self.host =='github':
# self.html_url = 'https://github.com/%s/%s' % (self.owner,self.name)
#
# super(Repository, self).save(*args, **kwargs)
#
# class UserRepositoryLink(models.Model):
# user = ForeignKey(UserProfile)
# repository = ForeignKey(Repository)
# order = PositiveIntegerField()
# repository_category = ForeignKey(RepositoryCategory)
# link_type = ForeignKey(LinkType)
# last_modified = DateTimeField(auto_now=True)
#
# class Meta:
# ordering = ['repository_category__name','order']
# unique_together = ("user", "repository", 'link_type')
#
# class RepositoryUserRepositoryLink(models.Model):
# user = ForeignKey(RepositoryUser)
# repository = ForeignKey(Repository)
# last_modified = DateTimeField(auto_now=True)
# link_type = ForeignKey(LinkType)
#
# class Meta:
# ordering = ['repository__language', '-repository__watchers']
# unique_together = ("user", "repository", "link_type")
#
# class LinkType(models.Model):
# name = CharField(max_length = 100, unique = True, choices = LINK_TYPES)
. Output only the next line. | repo_link_type, _ = LinkType.objects.get_or_create(name = link_type) |
Based on the snippet: <|code_start|> self.balls_per_game = self.user_settings['Standard']['Balls Per Game']
#moonlight setup
self.moonlight_minutes = self.user_settings['Feature']['Moonlight Mins to Midnight']
self.moonlight_flag = False
self.setup_ball_search()
#self.score_display.set_left_players_justify(self.user_settings['Display']['Left side score justify'])
# Note - Game specific item:
# The last parameter should be the name of the game's ball save lamp
self.ball_save = procgame.modes.BallSave(self, self.lamps.shootAgain,'shooterLane' )
trough_switchnames = []
# Note - Game specific item:
# This range should include the number of trough switches for
# the specific game being run. In range(1,x), x = last number + 1.
for i in range(1,4):
trough_switchnames.append('trough' + str(i))
early_save_switchnames = ['rightOutlane', 'leftOutlane']
#setup trough
self.trough = Trough(game=self,drain_callback=self.drain_callback)
# Link ball_save to trough
self.trough.ball_save_callback = self.ball_save.launch_callback
self.trough.num_balls_to_save = self.ball_save.get_num_balls_to_save
self.ball_save.trough_enable_ball_save = self.trough.enable_ball_save
#setup auto launcher
<|code_end|>
, predict the immediate next line with the help of imports:
import sys
import procgame
import pinproc
import string
import time
import datetime
import locale
import math
import copy
import yaml
import os
import logging
import audits
import diagnostics
from utility import boolean_format
from ac_relay import *
from switched_coils import *
from scoredisplay.alphanumeric import *
from scoredisplay.scoredisplay import *
from effects import *
from extra_ball import *
from service import *
from attract import *
from base import *
from moonlight import *
from match import *
from tilt import *
from trough import *
from procgame import *
from player import *
from threading import Thread
from time import strftime
from scoredisplay.desktop import Desktop
from colorlogging import ColorizingStreamHandler
and context (classes, functions, sometimes code) from other files:
# Path: utility.py
# def boolean_format(value):
# if value=='Yes':
# return True
# else:
# return False
. Output only the next line. | self.auto_launch_enabled = boolean_format(self.user_settings['Feature']['Auto Launcher Installed']) |
Based on the snippet: <|code_start|>music_path = game_path +"music/"
class Pops(game.Mode):
def __init__(self, game, priority):
super(Pops, self).__init__(game, priority)
self.log = logging.getLogger('whirlwind.pops')
self.hits_layer = dmd.TextLayer(100, 0, self.game.fonts['num_14x10'], "center", opaque=False)
self.game.sound.register_sound('pop1', sound_path+"pop_1.ogg")
self.game.sound.register_sound('pop2', sound_path+"pop_2.ogg")
self.game.sound.register_sound('pop3', sound_path+"pop_3.ogg")
self.game.sound.register_sound('pop4', sound_path+"pop_4.ogg")
self.game.sound.register_sound('pop5', sound_path+"pop_5.ogg")
self.game.sound.register_sound('super1', sound_path+"super_pop_1.ogg")
self.game.sound.register_sound('super2', sound_path+"super_pop_2.ogg")
self.game.sound.register_sound('super3', sound_path+"super_pop_3.ogg")
self.game.sound.register_sound('super4', sound_path+"super_pop_4.ogg")
self.game.sound.register_sound('super5', sound_path+"super_pop_5.ogg")
self.game.sound.register_sound('pop_lit', sound_path+"pop_lit.ogg")
self.game.sound.register_sound('pop_max', sound_path+"pop_max.ogg")
self.lamps = ['lowerLeftJet','lowerRightJet','lowerTopJet','upperBottomJet','upperLeftJet','upperRightJet']
self.super_pops_default = int(self.game.user_settings['Feature']['Super Jets Start'])
<|code_end|>
, predict the immediate next line with the help of imports:
import procgame
import locale
import random
import logging
import audits
from procgame import *
from utility import boolean_format
and context (classes, functions, sometimes code) from other files:
# Path: utility.py
# def boolean_format(value):
# if value=='Yes':
# return True
# else:
# return False
. Output only the next line. | self.super_pops_enabled = boolean_format(self.game.user_settings['Feature']['Super Jets Enabled']) |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Project(ComponentBase):
def __init__(self, data, registry):
super(Project, self).__init__(data, registry)
self._placeholders = [placeholder for dashboard in self._get_dashboard_names()
<|code_end|>
. Use current file imports:
from grafana_dashboards.components.base import ComponentBase, get_placeholders
from grafana_dashboards.components.dashboards import Dashboard
from grafana_dashboards.context import Context
and context (classes, functions, or code) from other files:
# Path: grafana_dashboards/components/base.py
# class ComponentBase(object):
# def __init__(self, data, registry):
# """
#
# :type registry: ComponentRegistry
# """
# super(ComponentBase, self).__init__()
# self.data = data[get_component_type(type(self))]
# if self.data is None:
# self.data = {}
# self.name = data.get('name')
# self.registry = registry
#
# def get_placeholders(component_name):
# return [v[1] for v in string.Formatter().parse(component_name) if v[1]]
#
# Path: grafana_dashboards/components/dashboards.py
# class Dashboard(JsonGenerator):
#
# # noinspection PySetFunctionToLiteral
# _copy_fields = set(['sharedCrosshair'])
#
# def gen_json_from_data(self, data, context):
# json_data = super(Dashboard, self).gen_json_from_data(data, context)
# nav = {
# 'type': 'timepicker'
# }
# json_data.update({
# 'title': data.get('title', self.name),
# 'nav': [
# nav
# ]
# })
# if 'time' in data:
# json_data['time'] = {
# 'from': data['time']['from'],
# 'to': data['time']['to']
# }
# if 'tags' in data:
# json_data['tags'] = data.get('tags')
# if 'time_options' in data:
# nav['time_options'] = data.get('time_options', [])
# if 'refresh_intervals' in data:
# nav['refresh_intervals'] = data.get('refresh_intervals', [])
# if 'refresh' in data:
# json_data['refresh'] = data.get('refresh')
# if 'folderId' in data:
# json_data['folderId'] = data.get('folderId')
# if get_component_type(Annotations) in data:
# json_data['annotations'] = {'list': self.registry.create_component(Annotations, data).gen_json()}
# if get_component_type(Rows) in data:
# json_data['rows'] = self.registry.create_component(Rows, data).gen_json()
# if get_component_type(Templates) in data:
# json_data['templating'] = {
# 'list': self.registry.create_component(Templates, data).gen_json(),
# 'enable': True
# }
# return json_data
#
# Path: grafana_dashboards/context.py
# class Context(object):
#
# _pattern = re.compile('{.*}')
#
# def __init__(self, context=None):
# super(Context, self).__init__()
# if not context:
# self._context = None
# else:
# self._context = DictDefaultingToPlaceholder(context)
#
# def expand_placeholders(self, to_expand):
# """
#
# :rtype : dict
# """
# if not self._context:
# return to_expand
#
# if isinstance(to_expand, basestring):
# (result, to_expand) = self._expand(to_expand)
# while result != to_expand:
# (result, to_expand) = self._expand(result)
# if isinstance(result, basestring):
# return string.Formatter().vformat(result, (), self._context)
# else:
# return result
# elif isinstance(to_expand, list):
# return [self.expand_placeholders(value) for value in to_expand]
# elif isinstance(to_expand, dict):
# return dict([(key, self.expand_placeholders(value)) for (key, value) in to_expand.items()])
# else:
# return to_expand
#
# def _expand(self, to_expand):
# if not isinstance(to_expand, basestring):
# return to_expand, to_expand
# elif self._pattern.match(to_expand) and to_expand[1:-1] in self._context:
# return self._context[to_expand[1:-1]], to_expand
# escaped = to_expand.replace('{{', '{{{{').replace('}}', '}}}}')
# return string.Formatter().vformat(escaped, (), self._context), to_expand
#
# def __str__(self):
# return str(self._context)
#
# @staticmethod
# def create_context(data, keys_to_expand=None):
# return (Context(Context(context).expand_placeholders(context))
# for context in ContextExpander(keys_to_expand).create_context(None, data))
. Output only the next line. | for placeholder in get_placeholders(dashboard)] |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Project(ComponentBase):
def __init__(self, data, registry):
super(Project, self).__init__(data, registry)
self._placeholders = [placeholder for dashboard in self._get_dashboard_names()
for placeholder in get_placeholders(dashboard)]
def _get_dashboard_names(self):
return self.data.get('dashboards', [])
def get_dashboards(self):
<|code_end|>
. Use current file imports:
(from grafana_dashboards.components.base import ComponentBase, get_placeholders
from grafana_dashboards.components.dashboards import Dashboard
from grafana_dashboards.context import Context)
and context including class names, function names, or small code snippets from other files:
# Path: grafana_dashboards/components/base.py
# class ComponentBase(object):
# def __init__(self, data, registry):
# """
#
# :type registry: ComponentRegistry
# """
# super(ComponentBase, self).__init__()
# self.data = data[get_component_type(type(self))]
# if self.data is None:
# self.data = {}
# self.name = data.get('name')
# self.registry = registry
#
# def get_placeholders(component_name):
# return [v[1] for v in string.Formatter().parse(component_name) if v[1]]
#
# Path: grafana_dashboards/components/dashboards.py
# class Dashboard(JsonGenerator):
#
# # noinspection PySetFunctionToLiteral
# _copy_fields = set(['sharedCrosshair'])
#
# def gen_json_from_data(self, data, context):
# json_data = super(Dashboard, self).gen_json_from_data(data, context)
# nav = {
# 'type': 'timepicker'
# }
# json_data.update({
# 'title': data.get('title', self.name),
# 'nav': [
# nav
# ]
# })
# if 'time' in data:
# json_data['time'] = {
# 'from': data['time']['from'],
# 'to': data['time']['to']
# }
# if 'tags' in data:
# json_data['tags'] = data.get('tags')
# if 'time_options' in data:
# nav['time_options'] = data.get('time_options', [])
# if 'refresh_intervals' in data:
# nav['refresh_intervals'] = data.get('refresh_intervals', [])
# if 'refresh' in data:
# json_data['refresh'] = data.get('refresh')
# if 'folderId' in data:
# json_data['folderId'] = data.get('folderId')
# if get_component_type(Annotations) in data:
# json_data['annotations'] = {'list': self.registry.create_component(Annotations, data).gen_json()}
# if get_component_type(Rows) in data:
# json_data['rows'] = self.registry.create_component(Rows, data).gen_json()
# if get_component_type(Templates) in data:
# json_data['templating'] = {
# 'list': self.registry.create_component(Templates, data).gen_json(),
# 'enable': True
# }
# return json_data
#
# Path: grafana_dashboards/context.py
# class Context(object):
#
# _pattern = re.compile('{.*}')
#
# def __init__(self, context=None):
# super(Context, self).__init__()
# if not context:
# self._context = None
# else:
# self._context = DictDefaultingToPlaceholder(context)
#
# def expand_placeholders(self, to_expand):
# """
#
# :rtype : dict
# """
# if not self._context:
# return to_expand
#
# if isinstance(to_expand, basestring):
# (result, to_expand) = self._expand(to_expand)
# while result != to_expand:
# (result, to_expand) = self._expand(result)
# if isinstance(result, basestring):
# return string.Formatter().vformat(result, (), self._context)
# else:
# return result
# elif isinstance(to_expand, list):
# return [self.expand_placeholders(value) for value in to_expand]
# elif isinstance(to_expand, dict):
# return dict([(key, self.expand_placeholders(value)) for (key, value) in to_expand.items()])
# else:
# return to_expand
#
# def _expand(self, to_expand):
# if not isinstance(to_expand, basestring):
# return to_expand, to_expand
# elif self._pattern.match(to_expand) and to_expand[1:-1] in self._context:
# return self._context[to_expand[1:-1]], to_expand
# escaped = to_expand.replace('{{', '{{{{').replace('}}', '}}}}')
# return string.Formatter().vformat(escaped, (), self._context), to_expand
#
# def __str__(self):
# return str(self._context)
#
# @staticmethod
# def create_context(data, keys_to_expand=None):
# return (Context(Context(context).expand_placeholders(context))
# for context in ContextExpander(keys_to_expand).create_context(None, data))
. Output only the next line. | return [self.registry.get_component(Dashboard, dashboard_name) for dashboard_name in |
Next line prediction: <|code_start|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Project(ComponentBase):
def __init__(self, data, registry):
super(Project, self).__init__(data, registry)
self._placeholders = [placeholder for dashboard in self._get_dashboard_names()
for placeholder in get_placeholders(dashboard)]
def _get_dashboard_names(self):
return self.data.get('dashboards', [])
def get_dashboards(self):
return [self.registry.get_component(Dashboard, dashboard_name) for dashboard_name in
self._get_dashboard_names()]
def get_contexts(self, context=None):
if context is None:
context = {}
data = self.data.copy()
data.update(context)
<|code_end|>
. Use current file imports:
(from grafana_dashboards.components.base import ComponentBase, get_placeholders
from grafana_dashboards.components.dashboards import Dashboard
from grafana_dashboards.context import Context)
and context including class names, function names, or small code snippets from other files:
# Path: grafana_dashboards/components/base.py
# class ComponentBase(object):
# def __init__(self, data, registry):
# """
#
# :type registry: ComponentRegistry
# """
# super(ComponentBase, self).__init__()
# self.data = data[get_component_type(type(self))]
# if self.data is None:
# self.data = {}
# self.name = data.get('name')
# self.registry = registry
#
# def get_placeholders(component_name):
# return [v[1] for v in string.Formatter().parse(component_name) if v[1]]
#
# Path: grafana_dashboards/components/dashboards.py
# class Dashboard(JsonGenerator):
#
# # noinspection PySetFunctionToLiteral
# _copy_fields = set(['sharedCrosshair'])
#
# def gen_json_from_data(self, data, context):
# json_data = super(Dashboard, self).gen_json_from_data(data, context)
# nav = {
# 'type': 'timepicker'
# }
# json_data.update({
# 'title': data.get('title', self.name),
# 'nav': [
# nav
# ]
# })
# if 'time' in data:
# json_data['time'] = {
# 'from': data['time']['from'],
# 'to': data['time']['to']
# }
# if 'tags' in data:
# json_data['tags'] = data.get('tags')
# if 'time_options' in data:
# nav['time_options'] = data.get('time_options', [])
# if 'refresh_intervals' in data:
# nav['refresh_intervals'] = data.get('refresh_intervals', [])
# if 'refresh' in data:
# json_data['refresh'] = data.get('refresh')
# if 'folderId' in data:
# json_data['folderId'] = data.get('folderId')
# if get_component_type(Annotations) in data:
# json_data['annotations'] = {'list': self.registry.create_component(Annotations, data).gen_json()}
# if get_component_type(Rows) in data:
# json_data['rows'] = self.registry.create_component(Rows, data).gen_json()
# if get_component_type(Templates) in data:
# json_data['templating'] = {
# 'list': self.registry.create_component(Templates, data).gen_json(),
# 'enable': True
# }
# return json_data
#
# Path: grafana_dashboards/context.py
# class Context(object):
#
# _pattern = re.compile('{.*}')
#
# def __init__(self, context=None):
# super(Context, self).__init__()
# if not context:
# self._context = None
# else:
# self._context = DictDefaultingToPlaceholder(context)
#
# def expand_placeholders(self, to_expand):
# """
#
# :rtype : dict
# """
# if not self._context:
# return to_expand
#
# if isinstance(to_expand, basestring):
# (result, to_expand) = self._expand(to_expand)
# while result != to_expand:
# (result, to_expand) = self._expand(result)
# if isinstance(result, basestring):
# return string.Formatter().vformat(result, (), self._context)
# else:
# return result
# elif isinstance(to_expand, list):
# return [self.expand_placeholders(value) for value in to_expand]
# elif isinstance(to_expand, dict):
# return dict([(key, self.expand_placeholders(value)) for (key, value) in to_expand.items()])
# else:
# return to_expand
#
# def _expand(self, to_expand):
# if not isinstance(to_expand, basestring):
# return to_expand, to_expand
# elif self._pattern.match(to_expand) and to_expand[1:-1] in self._context:
# return self._context[to_expand[1:-1]], to_expand
# escaped = to_expand.replace('{{', '{{{{').replace('}}', '}}}}')
# return string.Formatter().vformat(escaped, (), self._context), to_expand
#
# def __str__(self):
# return str(self._context)
#
# @staticmethod
# def create_context(data, keys_to_expand=None):
# return (Context(Context(context).expand_placeholders(context))
# for context in ContextExpander(keys_to_expand).create_context(None, data))
. Output only the next line. | return Context.create_context(data, self._placeholders) |
Given the following code snippet before the placeholder: <|code_start|>def pytest_generate_tests(metafunc):
fixtures = []
ids = []
for (component, test, config, output) in load_test_fixtures():
fixtures.append((component, config, output))
ids.append('%s;%s' % (grafana_dashboards.common.get_component_type(component), test))
metafunc.parametrize('component,config,expected', fixtures, ids=ids)
def load_test_fixtures():
for component in base.get_generators(): # NOQA
component_type = grafana_dashboards.common.get_component_type(component)
dirname = os.path.join(os.path.dirname(os.path.abspath(__file__)), component_type)
if not os.path.isdir(dirname):
continue
for f in os.listdir(dirname):
if not f.endswith('.yaml'):
continue
filename = f[:-5]
with open(os.path.join(dirname, '%s.yaml' % filename), 'r') as fp:
config = yaml.load(fp, Loader=yaml.FullLoader)
with open(os.path.join(dirname, '%s.json' % filename), 'r') as fp:
output = json.load(fp)
yield component, filename, config, output
def test_component(component, config, expected):
with mock.patch('grafana_dashboards.components.base.ComponentRegistry') as registry:
def create_component(component_type, data):
gen = mock.Mock()
<|code_end|>
, predict the next line using imports from the current file:
import inspect
import json
import os
import mock as mock
import yaml
import grafana_dashboards.common
from grafana_dashboards.components import * # NOQA
from grafana_dashboards.components.base import JsonListGenerator
from grafana_dashboards.errors import UnregisteredComponentError
and context including class names, function names, and sometimes code from other files:
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# Path: grafana_dashboards/errors.py
# class UnregisteredComponentError(DashboardBuilderException):
# pass
. Output only the next line. | if inspect.isclass(component_type) and issubclass(component_type, JsonListGenerator): |
Continue the code snippet: <|code_start|> for component in base.get_generators(): # NOQA
component_type = grafana_dashboards.common.get_component_type(component)
dirname = os.path.join(os.path.dirname(os.path.abspath(__file__)), component_type)
if not os.path.isdir(dirname):
continue
for f in os.listdir(dirname):
if not f.endswith('.yaml'):
continue
filename = f[:-5]
with open(os.path.join(dirname, '%s.yaml' % filename), 'r') as fp:
config = yaml.load(fp, Loader=yaml.FullLoader)
with open(os.path.join(dirname, '%s.json' % filename), 'r') as fp:
output = json.load(fp)
yield component, filename, config, output
def test_component(component, config, expected):
with mock.patch('grafana_dashboards.components.base.ComponentRegistry') as registry:
def create_component(component_type, data):
gen = mock.Mock()
if inspect.isclass(component_type) and issubclass(component_type, JsonListGenerator):
gen.gen_json = mock.Mock(return_value=['mocked ' + str(component_type)])
else:
gen.gen_json = mock.Mock(return_value='mocked ' + str(component_type))
return gen
registry.create_component = mock.Mock(side_effect=create_component)
def get_component(component_type, name):
if name == 'not-mocked':
<|code_end|>
. Use current file imports:
import inspect
import json
import os
import mock as mock
import yaml
import grafana_dashboards.common
from grafana_dashboards.components import * # NOQA
from grafana_dashboards.components.base import JsonListGenerator
from grafana_dashboards.errors import UnregisteredComponentError
and context (classes, functions, or code) from other files:
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# Path: grafana_dashboards/errors.py
# class UnregisteredComponentError(DashboardBuilderException):
# pass
. Output only the next line. | raise UnregisteredComponentError("No component '%s' with name '%s' found!" % (component_type, name)) |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Annotations(JsonListGenerator):
def __init__(self, data, registry):
super(Annotations, self).__init__(data, registry, AnnotationsItemBase)
<|code_end|>
, predict the immediate next line with the help of imports:
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
and context (classes, functions, sometimes code) from other files:
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
. Output only the next line. | class AnnotationsItemBase(JsonGenerator): |
Predict the next line for this snippet: <|code_start|> assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
def test_connection_with_token():
connection = BearerAuthConnection('token', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer token'
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
@patch('requests.post')
def test_connection_with_kerberos(post):
<|code_end|>
with the help of current file imports:
from urllib2 import Request
from urllib.request import Request
from mock import MagicMock, patch
from requests_kerberos import HTTPKerberosAuth
from grafana_dashboards.client.connection import KerberosConnection, BasicAuthConnection, BearerAuthConnection
and context from other files:
# Path: grafana_dashboards/client/connection.py
# class KerberosConnection(object):
# def __init__(self, host):
# logger.debug('Creating new kerberos connection with host=%s', host)
# self._host = host
#
# def make_request(self, uri, body=None):
# response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False)
# return response.json()
#
# class BasicAuthConnection(BaseConnection):
# def __init__(self, username, password, host, debug=0):
# logger.debug('Creating new connection with username=%s host=%s', username, host)
#
# base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'')
#
# super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug)
#
# class BearerAuthConnection(BaseConnection):
# def __init__(self, token, host, debug=0):
# logger.debug('Creating new connection with token=%s host=%s', token[:5], host)
#
# super(BearerAuthConnection, self).__init__(host, 'Bearer %s' % token.strip(), debug)
, which may contain function names, class names, or code. Output only the next line. | connection = KerberosConnection('https://host') |
Predict the next line for this snippet: <|code_start|># http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
except ImportError:
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Capture(object):
"""
Class for use in method call verification that captures call argument that can be tested later on.
"""
def __eq__(self, other):
"""
Captures argument and always returns true to make verification successful.
:return: True
"""
self.value = other
return True
def test_connection():
<|code_end|>
with the help of current file imports:
from urllib2 import Request
from urllib.request import Request
from mock import MagicMock, patch
from requests_kerberos import HTTPKerberosAuth
from grafana_dashboards.client.connection import KerberosConnection, BasicAuthConnection, BearerAuthConnection
and context from other files:
# Path: grafana_dashboards/client/connection.py
# class KerberosConnection(object):
# def __init__(self, host):
# logger.debug('Creating new kerberos connection with host=%s', host)
# self._host = host
#
# def make_request(self, uri, body=None):
# response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False)
# return response.json()
#
# class BasicAuthConnection(BaseConnection):
# def __init__(self, username, password, host, debug=0):
# logger.debug('Creating new connection with username=%s host=%s', username, host)
#
# base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'')
#
# super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug)
#
# class BearerAuthConnection(BaseConnection):
# def __init__(self, token, host, debug=0):
# logger.debug('Creating new connection with token=%s host=%s', token[:5], host)
#
# super(BearerAuthConnection, self).__init__(host, 'Bearer %s' % token.strip(), debug)
, which may contain function names, class names, or code. Output only the next line. | connection = BasicAuthConnection('username', 'password', 'https://host') |
Next line prediction: <|code_start|> """
self.value = other
return True
def test_connection():
connection = BasicAuthConnection('username', 'password', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': b'Basic dXNlcm5hbWU6cGFzc3dvcmQ='
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
def test_connection_with_token():
<|code_end|>
. Use current file imports:
( from urllib2 import Request
from urllib.request import Request
from mock import MagicMock, patch
from requests_kerberos import HTTPKerberosAuth
from grafana_dashboards.client.connection import KerberosConnection, BasicAuthConnection, BearerAuthConnection)
and context including class names, function names, or small code snippets from other files:
# Path: grafana_dashboards/client/connection.py
# class KerberosConnection(object):
# def __init__(self, host):
# logger.debug('Creating new kerberos connection with host=%s', host)
# self._host = host
#
# def make_request(self, uri, body=None):
# response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False)
# return response.json()
#
# class BasicAuthConnection(BaseConnection):
# def __init__(self, username, password, host, debug=0):
# logger.debug('Creating new connection with username=%s host=%s', username, host)
#
# base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'')
#
# super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug)
#
# class BearerAuthConnection(BaseConnection):
# def __init__(self, token, host, debug=0):
# logger.debug('Creating new connection with token=%s host=%s', token[:5], host)
#
# super(BearerAuthConnection, self).__init__(host, 'Bearer %s' % token.strip(), debug)
. Output only the next line. | connection = BearerAuthConnection('token', 'https://host') |
Based on the snippet: <|code_start|># http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Templates(JsonListGenerator):
def __init__(self, data, registry):
super(Templates, self).__init__(data, registry, TemplatesItemBase)
<|code_end|>
, predict the immediate next line with the help of imports:
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
and context (classes, functions, sometimes code) from other files:
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
. Output only the next line. | class TemplatesItemBase(JsonGenerator): |
Given the following code snippet before the placeholder: <|code_start|># you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class DummyExporter(object):
def __init__(self, prop, **kwargs):
super(DummyExporter, self).__init__()
self.prop = prop
self.kwargs = kwargs
def test_initialize_exporters():
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml')
config = Config(config_file)
# noinspection PyProtectedMember
<|code_end|>
, predict the next line using imports from the current file:
import os
from mock import patch
from grafana_dashboards import cli
from grafana_dashboards.config import Config
and context including class names, function names, and sometimes code from other files:
# Path: grafana_dashboards/cli.py
# def _initialize_exporters(exporter_names, exporter_types, config):
# def _process_paths(paths):
# def main():
#
# Path: grafana_dashboards/config.py
# class Config(object):
#
# def __init__(self, config=None):
# super(Config, self).__init__()
# if not os.path.exists(config):
# logger.debug("Config file '%s' does not exist", config)
# self._config = {}
# else:
# with open(config) as fp:
# self._config = yaml.load(fp, Loader=yaml.FullLoader)
#
# def get_config(self, section):
# return self._config.setdefault(section, {})
. Output only the next line. | exporters = cli._initialize_exporters('dummy', [DummyExporter], config) |
Given the following code snippet before the placeholder: <|code_start|>#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class DummyExporter(object):
def __init__(self, prop, **kwargs):
super(DummyExporter, self).__init__()
self.prop = prop
self.kwargs = kwargs
def test_initialize_exporters():
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml')
<|code_end|>
, predict the next line using imports from the current file:
import os
from mock import patch
from grafana_dashboards import cli
from grafana_dashboards.config import Config
and context including class names, function names, and sometimes code from other files:
# Path: grafana_dashboards/cli.py
# def _initialize_exporters(exporter_names, exporter_types, config):
# def _process_paths(paths):
# def main():
#
# Path: grafana_dashboards/config.py
# class Config(object):
#
# def __init__(self, config=None):
# super(Config, self).__init__()
# if not os.path.exists(config):
# logger.debug("Config file '%s' does not exist", config)
# self._config = {}
# else:
# with open(config) as fp:
# self._config = yaml.load(fp, Loader=yaml.FullLoader)
#
# def get_config(self, section):
# return self._config.setdefault(section, {})
. Output only the next line. | config = Config(config_file) |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class DefinitionParser(object):
def __init__(self):
super(DefinitionParser, self).__init__()
def load_projects(self, paths):
<|code_end|>
, determine the next line of code. You have imports:
import yaml
from grafana_dashboards.components.base import ComponentRegistry
from grafana_dashboards.components.projects import Project
and context (class names, function names, or code) available:
# Path: grafana_dashboards/components/base.py
# class ComponentRegistry(object):
# def __init__(self):
# super(ComponentRegistry, self).__init__()
# self._types = {}
# self._components = {}
# for clazz in _get_subclasses(ComponentBase):
# logger.info('Loading component type %s', clazz)
# self._types[get_component_type(clazz)] = clazz
# self._components[clazz] = {}
#
# def _class_for_type(self, component_type):
# if isinstance(component_type, basestring):
# component_type = self._types.get(component_type)
# if self._components.get(component_type) is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % component_type)
# return component_type
#
# def add(self, component):
# """
#
# :type component: dict
# """
# if len(component) > 2:
# raise errors.WrongComponentAttributeCountError(
# 'Component must have exactly 2 attributes - name and component type with data.'
# 'This contains %s attributes' % len(component.keys()))
# component_name = component.get('name')
# if component_name is None:
# logger.info("Component '%s' does not have 'name' attribute, skipping", component.keys())
# return
# component_type = None
# for key in component.keys():
# if key == 'name':
# continue
# component_type = key
# break
# try:
# clazz = self._class_for_type(component_type)
# except errors.UnregisteredComponentError:
# logger.info("Missing implementation class for component '%s', skipping", component_type)
# return
# logger.debug("Adding component '%s' with name '%s'", component_type, component_name)
# components = self._get_component(clazz)
# if component_name in components:
# raise errors.DuplicateKeyError(
# "Key '%s' is already defined for component %s" % (component_name, component_type))
# components[component_name] = self.create_component(clazz, component)
#
# def __getitem__(self, item):
# return self._get_component(item).values()
#
# def _get_component(self, item):
# component = self._components.get(item)
# if component is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % item)
# return component
#
# def create_component(self, component_type, data):
# return self._class_for_type(component_type)(data, self)
#
# def get_component(self, component_type, name):
# component = self._get_component(component_type).get(name)
# if component is None:
# raise errors.UnregisteredComponentError("No component '%s' with name '%s' found!" % (component_type, name))
# return component
#
# Path: grafana_dashboards/components/projects.py
# class Project(ComponentBase):
# def __init__(self, data, registry):
# super(Project, self).__init__(data, registry)
# self._placeholders = [placeholder for dashboard in self._get_dashboard_names()
# for placeholder in get_placeholders(dashboard)]
#
# def _get_dashboard_names(self):
# return self.data.get('dashboards', [])
#
# def get_dashboards(self):
# return [self.registry.get_component(Dashboard, dashboard_name) for dashboard_name in
# self._get_dashboard_names()]
#
# def get_contexts(self, context=None):
# if context is None:
# context = {}
# data = self.data.copy()
# data.update(context)
# return Context.create_context(data, self._placeholders)
. Output only the next line. | registry = ComponentRegistry() |
Given the code snippet: <|code_start|># Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class DefinitionParser(object):
def __init__(self):
super(DefinitionParser, self).__init__()
def load_projects(self, paths):
registry = ComponentRegistry()
for path in paths:
with open(path, 'r') as fp:
for component in self._iter_over_all(yaml.load_all(fp, Loader=yaml.FullLoader)):
registry.add(component)
<|code_end|>
, generate the next line using the imports in this file:
import yaml
from grafana_dashboards.components.base import ComponentRegistry
from grafana_dashboards.components.projects import Project
and context (functions, classes, or occasionally code) from other files:
# Path: grafana_dashboards/components/base.py
# class ComponentRegistry(object):
# def __init__(self):
# super(ComponentRegistry, self).__init__()
# self._types = {}
# self._components = {}
# for clazz in _get_subclasses(ComponentBase):
# logger.info('Loading component type %s', clazz)
# self._types[get_component_type(clazz)] = clazz
# self._components[clazz] = {}
#
# def _class_for_type(self, component_type):
# if isinstance(component_type, basestring):
# component_type = self._types.get(component_type)
# if self._components.get(component_type) is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % component_type)
# return component_type
#
# def add(self, component):
# """
#
# :type component: dict
# """
# if len(component) > 2:
# raise errors.WrongComponentAttributeCountError(
# 'Component must have exactly 2 attributes - name and component type with data.'
# 'This contains %s attributes' % len(component.keys()))
# component_name = component.get('name')
# if component_name is None:
# logger.info("Component '%s' does not have 'name' attribute, skipping", component.keys())
# return
# component_type = None
# for key in component.keys():
# if key == 'name':
# continue
# component_type = key
# break
# try:
# clazz = self._class_for_type(component_type)
# except errors.UnregisteredComponentError:
# logger.info("Missing implementation class for component '%s', skipping", component_type)
# return
# logger.debug("Adding component '%s' with name '%s'", component_type, component_name)
# components = self._get_component(clazz)
# if component_name in components:
# raise errors.DuplicateKeyError(
# "Key '%s' is already defined for component %s" % (component_name, component_type))
# components[component_name] = self.create_component(clazz, component)
#
# def __getitem__(self, item):
# return self._get_component(item).values()
#
# def _get_component(self, item):
# component = self._components.get(item)
# if component is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % item)
# return component
#
# def create_component(self, component_type, data):
# return self._class_for_type(component_type)(data, self)
#
# def get_component(self, component_type, name):
# component = self._get_component(component_type).get(name)
# if component is None:
# raise errors.UnregisteredComponentError("No component '%s' with name '%s' found!" % (component_type, name))
# return component
#
# Path: grafana_dashboards/components/projects.py
# class Project(ComponentBase):
# def __init__(self, data, registry):
# super(Project, self).__init__(data, registry)
# self._placeholders = [placeholder for dashboard in self._get_dashboard_names()
# for placeholder in get_placeholders(dashboard)]
#
# def _get_dashboard_names(self):
# return self.data.get('dashboards', [])
#
# def get_dashboards(self):
# return [self.registry.get_component(Dashboard, dashboard_name) for dashboard_name in
# self._get_dashboard_names()]
#
# def get_contexts(self, context=None):
# if context is None:
# context = {}
# data = self.data.copy()
# data.update(context)
# return Context.create_context(data, self._placeholders)
. Output only the next line. | return registry[Project] |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
def test_existent_config_file():
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml')
<|code_end|>
with the help of current file imports:
import os
from grafana_dashboards.config import Config
and context from other files:
# Path: grafana_dashboards/config.py
# class Config(object):
#
# def __init__(self, config=None):
# super(Config, self).__init__()
# if not os.path.exists(config):
# logger.debug("Config file '%s' does not exist", config)
# self._config = {}
# else:
# with open(config) as fp:
# self._config = yaml.load(fp, Loader=yaml.FullLoader)
#
# def get_config(self, section):
# return self._config.setdefault(section, {})
, which may contain function names, class names, or code. Output only the next line. | config = Config(config_file) |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class TestBase(ComponentBase):
pass
def test_registry_unregistered_component():
registry = ComponentRegistry()
registry.add({'name': 'name', 'non-existent': {}})
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from grafana_dashboards import errors
from grafana_dashboards.components.base import ComponentRegistry, ComponentBase
and context (functions, classes, or occasionally code) from other files:
# Path: grafana_dashboards/errors.py
# class DashboardBuilderException(Exception):
# class DuplicateKeyError(DashboardBuilderException):
# class WrongComponentAttributeCountError(DashboardBuilderException):
# class UnregisteredComponentError(DashboardBuilderException):
#
# Path: grafana_dashboards/components/base.py
# class ComponentRegistry(object):
# def __init__(self):
# super(ComponentRegistry, self).__init__()
# self._types = {}
# self._components = {}
# for clazz in _get_subclasses(ComponentBase):
# logger.info('Loading component type %s', clazz)
# self._types[get_component_type(clazz)] = clazz
# self._components[clazz] = {}
#
# def _class_for_type(self, component_type):
# if isinstance(component_type, basestring):
# component_type = self._types.get(component_type)
# if self._components.get(component_type) is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % component_type)
# return component_type
#
# def add(self, component):
# """
#
# :type component: dict
# """
# if len(component) > 2:
# raise errors.WrongComponentAttributeCountError(
# 'Component must have exactly 2 attributes - name and component type with data.'
# 'This contains %s attributes' % len(component.keys()))
# component_name = component.get('name')
# if component_name is None:
# logger.info("Component '%s' does not have 'name' attribute, skipping", component.keys())
# return
# component_type = None
# for key in component.keys():
# if key == 'name':
# continue
# component_type = key
# break
# try:
# clazz = self._class_for_type(component_type)
# except errors.UnregisteredComponentError:
# logger.info("Missing implementation class for component '%s', skipping", component_type)
# return
# logger.debug("Adding component '%s' with name '%s'", component_type, component_name)
# components = self._get_component(clazz)
# if component_name in components:
# raise errors.DuplicateKeyError(
# "Key '%s' is already defined for component %s" % (component_name, component_type))
# components[component_name] = self.create_component(clazz, component)
#
# def __getitem__(self, item):
# return self._get_component(item).values()
#
# def _get_component(self, item):
# component = self._components.get(item)
# if component is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % item)
# return component
#
# def create_component(self, component_type, data):
# return self._class_for_type(component_type)(data, self)
#
# def get_component(self, component_type, name):
# component = self._get_component(component_type).get(name)
# if component is None:
# raise errors.UnregisteredComponentError("No component '%s' with name '%s' found!" % (component_type, name))
# return component
#
# class ComponentBase(object):
# def __init__(self, data, registry):
# """
#
# :type registry: ComponentRegistry
# """
# super(ComponentBase, self).__init__()
# self.data = data[get_component_type(type(self))]
# if self.data is None:
# self.data = {}
# self.name = data.get('name')
# self.registry = registry
. Output only the next line. | with pytest.raises(errors.UnregisteredComponentError): |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class TestBase(ComponentBase):
pass
def test_registry_unregistered_component():
<|code_end|>
with the help of current file imports:
import pytest
from grafana_dashboards import errors
from grafana_dashboards.components.base import ComponentRegistry, ComponentBase
and context from other files:
# Path: grafana_dashboards/errors.py
# class DashboardBuilderException(Exception):
# class DuplicateKeyError(DashboardBuilderException):
# class WrongComponentAttributeCountError(DashboardBuilderException):
# class UnregisteredComponentError(DashboardBuilderException):
#
# Path: grafana_dashboards/components/base.py
# class ComponentRegistry(object):
# def __init__(self):
# super(ComponentRegistry, self).__init__()
# self._types = {}
# self._components = {}
# for clazz in _get_subclasses(ComponentBase):
# logger.info('Loading component type %s', clazz)
# self._types[get_component_type(clazz)] = clazz
# self._components[clazz] = {}
#
# def _class_for_type(self, component_type):
# if isinstance(component_type, basestring):
# component_type = self._types.get(component_type)
# if self._components.get(component_type) is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % component_type)
# return component_type
#
# def add(self, component):
# """
#
# :type component: dict
# """
# if len(component) > 2:
# raise errors.WrongComponentAttributeCountError(
# 'Component must have exactly 2 attributes - name and component type with data.'
# 'This contains %s attributes' % len(component.keys()))
# component_name = component.get('name')
# if component_name is None:
# logger.info("Component '%s' does not have 'name' attribute, skipping", component.keys())
# return
# component_type = None
# for key in component.keys():
# if key == 'name':
# continue
# component_type = key
# break
# try:
# clazz = self._class_for_type(component_type)
# except errors.UnregisteredComponentError:
# logger.info("Missing implementation class for component '%s', skipping", component_type)
# return
# logger.debug("Adding component '%s' with name '%s'", component_type, component_name)
# components = self._get_component(clazz)
# if component_name in components:
# raise errors.DuplicateKeyError(
# "Key '%s' is already defined for component %s" % (component_name, component_type))
# components[component_name] = self.create_component(clazz, component)
#
# def __getitem__(self, item):
# return self._get_component(item).values()
#
# def _get_component(self, item):
# component = self._components.get(item)
# if component is None:
# raise errors.UnregisteredComponentError("No component of type '%s' found!" % item)
# return component
#
# def create_component(self, component_type, data):
# return self._class_for_type(component_type)(data, self)
#
# def get_component(self, component_type, name):
# component = self._get_component(component_type).get(name)
# if component is None:
# raise errors.UnregisteredComponentError("No component '%s' with name '%s' found!" % (component_type, name))
# return component
#
# class ComponentBase(object):
# def __init__(self, data, registry):
# """
#
# :type registry: ComponentRegistry
# """
# super(ComponentBase, self).__init__()
# self.data = data[get_component_type(type(self))]
# if self.data is None:
# self.data = {}
# self.name = data.get('name')
# self.registry = registry
, which may contain function names, class names, or code. Output only the next line. | registry = ComponentRegistry() |
Using the snippet: <|code_start|># You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
logger = logging.getLogger(__name__)
class ElasticSearchExporter(DashboardExporter):
def __init__(self, **kwargs):
super(ElasticSearchExporter, self).__init__()
self._host = os.getenv('ES_HOST', kwargs.get('host'))
password = os.getenv('ES_PASSWORD', kwargs.get('password'))
username = os.getenv('ES_USERNAME', kwargs.get('username'))
use_kerberos = os.getenv('ES_USE_KERBEROS', kwargs.get('use_kerberos'))
if use_kerberos:
self._connection = KerberosConnection(self._host)
else:
<|code_end|>
, determine the next line of code. You have imports:
import json
import logging
import os
from grafana_dashboards.client.connection import BasicAuthConnection, KerberosConnection
from grafana_dashboards.exporter import DashboardExporter
and context (class names, function names, or code) available:
# Path: grafana_dashboards/client/connection.py
# class BasicAuthConnection(BaseConnection):
# def __init__(self, username, password, host, debug=0):
# logger.debug('Creating new connection with username=%s host=%s', username, host)
#
# base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'')
#
# super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug)
#
# class KerberosConnection(object):
# def __init__(self, host):
# logger.debug('Creating new kerberos connection with host=%s', host)
# self._host = host
#
# def make_request(self, uri, body=None):
# response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False)
# return response.json()
#
# Path: grafana_dashboards/exporter.py
# class DashboardExporter(object):
#
# def process_dashboard(self, project_name, dashboard_name, dashboard_data):
# pass
. Output only the next line. | self._connection = BasicAuthConnection(username, password, self._host) |
Using the snippet: <|code_start|># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
logger = logging.getLogger(__name__)
class ElasticSearchExporter(DashboardExporter):
def __init__(self, **kwargs):
super(ElasticSearchExporter, self).__init__()
self._host = os.getenv('ES_HOST', kwargs.get('host'))
password = os.getenv('ES_PASSWORD', kwargs.get('password'))
username = os.getenv('ES_USERNAME', kwargs.get('username'))
use_kerberos = os.getenv('ES_USE_KERBEROS', kwargs.get('use_kerberos'))
if use_kerberos:
<|code_end|>
, determine the next line of code. You have imports:
import json
import logging
import os
from grafana_dashboards.client.connection import BasicAuthConnection, KerberosConnection
from grafana_dashboards.exporter import DashboardExporter
and context (class names, function names, or code) available:
# Path: grafana_dashboards/client/connection.py
# class BasicAuthConnection(BaseConnection):
# def __init__(self, username, password, host, debug=0):
# logger.debug('Creating new connection with username=%s host=%s', username, host)
#
# base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'')
#
# super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug)
#
# class KerberosConnection(object):
# def __init__(self, host):
# logger.debug('Creating new kerberos connection with host=%s', host)
# self._host = host
#
# def make_request(self, uri, body=None):
# response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False)
# return response.json()
#
# Path: grafana_dashboards/exporter.py
# class DashboardExporter(object):
#
# def process_dashboard(self, project_name, dashboard_name, dashboard_data):
# pass
. Output only the next line. | self._connection = KerberosConnection(self._host) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
logger = logging.getLogger(__name__)
<|code_end|>
, predict the next line using imports from the current file:
import json
import logging
import os
from grafana_dashboards.client.connection import BasicAuthConnection, KerberosConnection
from grafana_dashboards.exporter import DashboardExporter
and context including class names, function names, and sometimes code from other files:
# Path: grafana_dashboards/client/connection.py
# class BasicAuthConnection(BaseConnection):
# def __init__(self, username, password, host, debug=0):
# logger.debug('Creating new connection with username=%s host=%s', username, host)
#
# base64string = base64.encodestring(('%s:%s' % (username, password)).encode('utf-8')).replace(b'\n', b'')
#
# super(BasicAuthConnection, self).__init__(host, b'Basic ' + base64string, debug)
#
# class KerberosConnection(object):
# def __init__(self, host):
# logger.debug('Creating new kerberos connection with host=%s', host)
# self._host = host
#
# def make_request(self, uri, body=None):
# response = requests.post('{0}{1}'.format(self._host, uri), json=body, auth=HTTPKerberosAuth(), verify=False)
# return response.json()
#
# Path: grafana_dashboards/exporter.py
# class DashboardExporter(object):
#
# def process_dashboard(self, project_name, dashboard_name, dashboard_data):
# pass
. Output only the next line. | class ElasticSearchExporter(DashboardExporter): |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
<|code_end|>
, determine the next line of code. You have imports:
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
and context (class names, function names, or code) available:
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
. Output only the next line. | class Links(JsonListGenerator): |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Links(JsonListGenerator):
def __init__(self, data, registry):
super(Links, self).__init__(data, registry, LinksItemBase)
<|code_end|>
with the help of current file imports:
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
and context from other files:
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
, which may contain function names, class names, or code. Output only the next line. | class LinksItemBase(JsonGenerator): |
Next line prediction: <|code_start|>
def _get_subclasses(clazz):
"""
:type clazz: type
"""
direct_subclasses = clazz.__subclasses__()
return [sub for sub in
direct_subclasses + [sub_class for direct in direct_subclasses for sub_class in _get_subclasses(direct)]
if sub not in (ComponentBase, JsonGenerator, JsonListGenerator)]
def get_placeholders(component_name):
return [v[1] for v in string.Formatter().parse(component_name) if v[1]]
class ComponentRegistry(object):
def __init__(self):
super(ComponentRegistry, self).__init__()
self._types = {}
self._components = {}
for clazz in _get_subclasses(ComponentBase):
logger.info('Loading component type %s', clazz)
self._types[get_component_type(clazz)] = clazz
self._components[clazz] = {}
def _class_for_type(self, component_type):
if isinstance(component_type, basestring):
component_type = self._types.get(component_type)
if self._components.get(component_type) is None:
<|code_end|>
. Use current file imports:
(import logging
import string
from grafana_dashboards import errors
from grafana_dashboards.common import get_component_type
from grafana_dashboards.context import Context)
and context including class names, function names, or small code snippets from other files:
# Path: grafana_dashboards/errors.py
# class DashboardBuilderException(Exception):
# class DuplicateKeyError(DashboardBuilderException):
# class WrongComponentAttributeCountError(DashboardBuilderException):
# class UnregisteredComponentError(DashboardBuilderException):
#
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/context.py
# class Context(object):
#
# _pattern = re.compile('{.*}')
#
# def __init__(self, context=None):
# super(Context, self).__init__()
# if not context:
# self._context = None
# else:
# self._context = DictDefaultingToPlaceholder(context)
#
# def expand_placeholders(self, to_expand):
# """
#
# :rtype : dict
# """
# if not self._context:
# return to_expand
#
# if isinstance(to_expand, basestring):
# (result, to_expand) = self._expand(to_expand)
# while result != to_expand:
# (result, to_expand) = self._expand(result)
# if isinstance(result, basestring):
# return string.Formatter().vformat(result, (), self._context)
# else:
# return result
# elif isinstance(to_expand, list):
# return [self.expand_placeholders(value) for value in to_expand]
# elif isinstance(to_expand, dict):
# return dict([(key, self.expand_placeholders(value)) for (key, value) in to_expand.items()])
# else:
# return to_expand
#
# def _expand(self, to_expand):
# if not isinstance(to_expand, basestring):
# return to_expand, to_expand
# elif self._pattern.match(to_expand) and to_expand[1:-1] in self._context:
# return self._context[to_expand[1:-1]], to_expand
# escaped = to_expand.replace('{{', '{{{{').replace('}}', '}}}}')
# return string.Formatter().vformat(escaped, (), self._context), to_expand
#
# def __str__(self):
# return str(self._context)
#
# @staticmethod
# def create_context(data, keys_to_expand=None):
# return (Context(Context(context).expand_placeholders(context))
# for context in ContextExpander(keys_to_expand).create_context(None, data))
. Output only the next line. | raise errors.UnregisteredComponentError("No component of type '%s' found!" % component_type) |
Predict the next line after this snippet: <|code_start|>
logger = logging.getLogger(__name__)
def get_generators():
return _get_subclasses(JsonGenerator)
def _get_subclasses(clazz):
"""
:type clazz: type
"""
direct_subclasses = clazz.__subclasses__()
return [sub for sub in
direct_subclasses + [sub_class for direct in direct_subclasses for sub_class in _get_subclasses(direct)]
if sub not in (ComponentBase, JsonGenerator, JsonListGenerator)]
def get_placeholders(component_name):
return [v[1] for v in string.Formatter().parse(component_name) if v[1]]
class ComponentRegistry(object):
def __init__(self):
super(ComponentRegistry, self).__init__()
self._types = {}
self._components = {}
for clazz in _get_subclasses(ComponentBase):
logger.info('Loading component type %s', clazz)
<|code_end|>
using the current file's imports:
import logging
import string
from grafana_dashboards import errors
from grafana_dashboards.common import get_component_type
from grafana_dashboards.context import Context
and any relevant context from other files:
# Path: grafana_dashboards/errors.py
# class DashboardBuilderException(Exception):
# class DuplicateKeyError(DashboardBuilderException):
# class WrongComponentAttributeCountError(DashboardBuilderException):
# class UnregisteredComponentError(DashboardBuilderException):
#
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/context.py
# class Context(object):
#
# _pattern = re.compile('{.*}')
#
# def __init__(self, context=None):
# super(Context, self).__init__()
# if not context:
# self._context = None
# else:
# self._context = DictDefaultingToPlaceholder(context)
#
# def expand_placeholders(self, to_expand):
# """
#
# :rtype : dict
# """
# if not self._context:
# return to_expand
#
# if isinstance(to_expand, basestring):
# (result, to_expand) = self._expand(to_expand)
# while result != to_expand:
# (result, to_expand) = self._expand(result)
# if isinstance(result, basestring):
# return string.Formatter().vformat(result, (), self._context)
# else:
# return result
# elif isinstance(to_expand, list):
# return [self.expand_placeholders(value) for value in to_expand]
# elif isinstance(to_expand, dict):
# return dict([(key, self.expand_placeholders(value)) for (key, value) in to_expand.items()])
# else:
# return to_expand
#
# def _expand(self, to_expand):
# if not isinstance(to_expand, basestring):
# return to_expand, to_expand
# elif self._pattern.match(to_expand) and to_expand[1:-1] in self._context:
# return self._context[to_expand[1:-1]], to_expand
# escaped = to_expand.replace('{{', '{{{{').replace('}}', '}}}}')
# return string.Formatter().vformat(escaped, (), self._context), to_expand
#
# def __str__(self):
# return str(self._context)
#
# @staticmethod
# def create_context(data, keys_to_expand=None):
# return (Context(Context(context).expand_placeholders(context))
# for context in ContextExpander(keys_to_expand).create_context(None, data))
. Output only the next line. | self._types[get_component_type(clazz)] = clazz |
Given snippet: <|code_start|> return component
def create_component(self, component_type, data):
return self._class_for_type(component_type)(data, self)
def get_component(self, component_type, name):
component = self._get_component(component_type).get(name)
if component is None:
raise errors.UnregisteredComponentError("No component '%s' with name '%s' found!" % (component_type, name))
return component
class ComponentBase(object):
def __init__(self, data, registry):
"""
:type registry: ComponentRegistry
"""
super(ComponentBase, self).__init__()
self.data = data[get_component_type(type(self))]
if self.data is None:
self.data = {}
self.name = data.get('name')
self.registry = registry
class JsonGenerator(ComponentBase):
_copy_fields = set()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import logging
import string
from grafana_dashboards import errors
from grafana_dashboards.common import get_component_type
from grafana_dashboards.context import Context
and context:
# Path: grafana_dashboards/errors.py
# class DashboardBuilderException(Exception):
# class DuplicateKeyError(DashboardBuilderException):
# class WrongComponentAttributeCountError(DashboardBuilderException):
# class UnregisteredComponentError(DashboardBuilderException):
#
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/context.py
# class Context(object):
#
# _pattern = re.compile('{.*}')
#
# def __init__(self, context=None):
# super(Context, self).__init__()
# if not context:
# self._context = None
# else:
# self._context = DictDefaultingToPlaceholder(context)
#
# def expand_placeholders(self, to_expand):
# """
#
# :rtype : dict
# """
# if not self._context:
# return to_expand
#
# if isinstance(to_expand, basestring):
# (result, to_expand) = self._expand(to_expand)
# while result != to_expand:
# (result, to_expand) = self._expand(result)
# if isinstance(result, basestring):
# return string.Formatter().vformat(result, (), self._context)
# else:
# return result
# elif isinstance(to_expand, list):
# return [self.expand_placeholders(value) for value in to_expand]
# elif isinstance(to_expand, dict):
# return dict([(key, self.expand_placeholders(value)) for (key, value) in to_expand.items()])
# else:
# return to_expand
#
# def _expand(self, to_expand):
# if not isinstance(to_expand, basestring):
# return to_expand, to_expand
# elif self._pattern.match(to_expand) and to_expand[1:-1] in self._context:
# return self._context[to_expand[1:-1]], to_expand
# escaped = to_expand.replace('{{', '{{{{').replace('}}', '}}}}')
# return string.Formatter().vformat(escaped, (), self._context), to_expand
#
# def __str__(self):
# return str(self._context)
#
# @staticmethod
# def create_context(data, keys_to_expand=None):
# return (Context(Context(context).expand_placeholders(context))
# for context in ContextExpander(keys_to_expand).create_context(None, data))
which might include code, classes, or functions. Output only the next line. | def gen_json(self, context=Context()): |
Using the snippet: <|code_start|>
# noinspection PySetFunctionToLiteral
_copy_fields = set(['sharedCrosshair'])
def gen_json_from_data(self, data, context):
json_data = super(Dashboard, self).gen_json_from_data(data, context)
nav = {
'type': 'timepicker'
}
json_data.update({
'title': data.get('title', self.name),
'nav': [
nav
]
})
if 'time' in data:
json_data['time'] = {
'from': data['time']['from'],
'to': data['time']['to']
}
if 'tags' in data:
json_data['tags'] = data.get('tags')
if 'time_options' in data:
nav['time_options'] = data.get('time_options', [])
if 'refresh_intervals' in data:
nav['refresh_intervals'] = data.get('refresh_intervals', [])
if 'refresh' in data:
json_data['refresh'] = data.get('refresh')
if 'folderId' in data:
json_data['folderId'] = data.get('folderId')
<|code_end|>
, determine the next line of code. You have imports:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.annotations import Annotations
from grafana_dashboards.components.base import JsonGenerator
from grafana_dashboards.components.rows import Rows
from grafana_dashboards.components.templates import Templates
and context (class names, function names, or code) available:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/annotations.py
# class Annotations(JsonListGenerator):
# def __init__(self, data, registry):
# super(Annotations, self).__init__(data, registry, AnnotationsItemBase)
#
# Path: grafana_dashboards/components/base.py
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/components/rows.py
# class Rows(JsonListGenerator):
# def __init__(self, data, registry):
# super(Rows, self).__init__(data, registry, RowsItemBase)
#
# Path: grafana_dashboards/components/templates.py
# class Templates(JsonListGenerator):
# def __init__(self, data, registry):
# super(Templates, self).__init__(data, registry, TemplatesItemBase)
. Output only the next line. | if get_component_type(Annotations) in data: |
Given the code snippet: <|code_start|>
# noinspection PySetFunctionToLiteral
_copy_fields = set(['sharedCrosshair'])
def gen_json_from_data(self, data, context):
json_data = super(Dashboard, self).gen_json_from_data(data, context)
nav = {
'type': 'timepicker'
}
json_data.update({
'title': data.get('title', self.name),
'nav': [
nav
]
})
if 'time' in data:
json_data['time'] = {
'from': data['time']['from'],
'to': data['time']['to']
}
if 'tags' in data:
json_data['tags'] = data.get('tags')
if 'time_options' in data:
nav['time_options'] = data.get('time_options', [])
if 'refresh_intervals' in data:
nav['refresh_intervals'] = data.get('refresh_intervals', [])
if 'refresh' in data:
json_data['refresh'] = data.get('refresh')
if 'folderId' in data:
json_data['folderId'] = data.get('folderId')
<|code_end|>
, generate the next line using the imports in this file:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.annotations import Annotations
from grafana_dashboards.components.base import JsonGenerator
from grafana_dashboards.components.rows import Rows
from grafana_dashboards.components.templates import Templates
and context (functions, classes, or occasionally code) from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/annotations.py
# class Annotations(JsonListGenerator):
# def __init__(self, data, registry):
# super(Annotations, self).__init__(data, registry, AnnotationsItemBase)
#
# Path: grafana_dashboards/components/base.py
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/components/rows.py
# class Rows(JsonListGenerator):
# def __init__(self, data, registry):
# super(Rows, self).__init__(data, registry, RowsItemBase)
#
# Path: grafana_dashboards/components/templates.py
# class Templates(JsonListGenerator):
# def __init__(self, data, registry):
# super(Templates, self).__init__(data, registry, TemplatesItemBase)
. Output only the next line. | if get_component_type(Annotations) in data: |
Here is a snippet: <|code_start|> _copy_fields = set(['sharedCrosshair'])
def gen_json_from_data(self, data, context):
json_data = super(Dashboard, self).gen_json_from_data(data, context)
nav = {
'type': 'timepicker'
}
json_data.update({
'title': data.get('title', self.name),
'nav': [
nav
]
})
if 'time' in data:
json_data['time'] = {
'from': data['time']['from'],
'to': data['time']['to']
}
if 'tags' in data:
json_data['tags'] = data.get('tags')
if 'time_options' in data:
nav['time_options'] = data.get('time_options', [])
if 'refresh_intervals' in data:
nav['refresh_intervals'] = data.get('refresh_intervals', [])
if 'refresh' in data:
json_data['refresh'] = data.get('refresh')
if 'folderId' in data:
json_data['folderId'] = data.get('folderId')
if get_component_type(Annotations) in data:
json_data['annotations'] = {'list': self.registry.create_component(Annotations, data).gen_json()}
<|code_end|>
. Write the next line using the current file imports:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.annotations import Annotations
from grafana_dashboards.components.base import JsonGenerator
from grafana_dashboards.components.rows import Rows
from grafana_dashboards.components.templates import Templates
and context from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/annotations.py
# class Annotations(JsonListGenerator):
# def __init__(self, data, registry):
# super(Annotations, self).__init__(data, registry, AnnotationsItemBase)
#
# Path: grafana_dashboards/components/base.py
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/components/rows.py
# class Rows(JsonListGenerator):
# def __init__(self, data, registry):
# super(Rows, self).__init__(data, registry, RowsItemBase)
#
# Path: grafana_dashboards/components/templates.py
# class Templates(JsonListGenerator):
# def __init__(self, data, registry):
# super(Templates, self).__init__(data, registry, TemplatesItemBase)
, which may include functions, classes, or code. Output only the next line. | if get_component_type(Rows) in data: |
Here is a snippet: <|code_start|> def gen_json_from_data(self, data, context):
json_data = super(Dashboard, self).gen_json_from_data(data, context)
nav = {
'type': 'timepicker'
}
json_data.update({
'title': data.get('title', self.name),
'nav': [
nav
]
})
if 'time' in data:
json_data['time'] = {
'from': data['time']['from'],
'to': data['time']['to']
}
if 'tags' in data:
json_data['tags'] = data.get('tags')
if 'time_options' in data:
nav['time_options'] = data.get('time_options', [])
if 'refresh_intervals' in data:
nav['refresh_intervals'] = data.get('refresh_intervals', [])
if 'refresh' in data:
json_data['refresh'] = data.get('refresh')
if 'folderId' in data:
json_data['folderId'] = data.get('folderId')
if get_component_type(Annotations) in data:
json_data['annotations'] = {'list': self.registry.create_component(Annotations, data).gen_json()}
if get_component_type(Rows) in data:
json_data['rows'] = self.registry.create_component(Rows, data).gen_json()
<|code_end|>
. Write the next line using the current file imports:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.annotations import Annotations
from grafana_dashboards.components.base import JsonGenerator
from grafana_dashboards.components.rows import Rows
from grafana_dashboards.components.templates import Templates
and context from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/annotations.py
# class Annotations(JsonListGenerator):
# def __init__(self, data, registry):
# super(Annotations, self).__init__(data, registry, AnnotationsItemBase)
#
# Path: grafana_dashboards/components/base.py
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/components/rows.py
# class Rows(JsonListGenerator):
# def __init__(self, data, registry):
# super(Rows, self).__init__(data, registry, RowsItemBase)
#
# Path: grafana_dashboards/components/templates.py
# class Templates(JsonListGenerator):
# def __init__(self, data, registry):
# super(Templates, self).__init__(data, registry, TemplatesItemBase)
, which may include functions, classes, or code. Output only the next line. | if get_component_type(Templates) in data: |
Given the following code snippet before the placeholder: <|code_start|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
if __name__ == '__main__':
<|code_end|>
, predict the next line using imports from the current file:
from grafana_dashboards import cli
and context including class names, function names, and sometimes code from other files:
# Path: grafana_dashboards/cli.py
# def _initialize_exporters(exporter_names, exporter_types, config):
# def _process_paths(paths):
# def main():
. Output only the next line. | cli.main() |
Given the following code snippet before the placeholder: <|code_start|># you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Targets(JsonListGenerator):
def __init__(self, data, registry):
super(Targets, self).__init__(data, registry, TargetsItemBase)
def gen_item_json(self, items, result_list):
try:
super(Targets, self).gen_item_json(items, result_list)
except UnregisteredComponentError:
result_list.append(
<|code_end|>
, predict the next line using imports from the current file:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
from grafana_dashboards.errors import UnregisteredComponentError
and context including class names, function names, and sometimes code from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/errors.py
# class UnregisteredComponentError(DashboardBuilderException):
# pass
. Output only the next line. | self.registry.create_component(GraphiteTarget, {get_component_type(GraphiteTarget): items}).gen_json() |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
<|code_end|>
, predict the immediate next line with the help of imports:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
from grafana_dashboards.errors import UnregisteredComponentError
and context (classes, functions, sometimes code) from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/errors.py
# class UnregisteredComponentError(DashboardBuilderException):
# pass
. Output only the next line. | class Targets(JsonListGenerator): |
Given the following code snippet before the placeholder: <|code_start|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Targets(JsonListGenerator):
def __init__(self, data, registry):
super(Targets, self).__init__(data, registry, TargetsItemBase)
def gen_item_json(self, items, result_list):
try:
super(Targets, self).gen_item_json(items, result_list)
except UnregisteredComponentError:
result_list.append(
self.registry.create_component(GraphiteTarget, {get_component_type(GraphiteTarget): items}).gen_json()
)
<|code_end|>
, predict the next line using imports from the current file:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
from grafana_dashboards.errors import UnregisteredComponentError
and context including class names, function names, and sometimes code from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/errors.py
# class UnregisteredComponentError(DashboardBuilderException):
# pass
. Output only the next line. | class TargetsItemBase(JsonGenerator): |
Continue the code snippet: <|code_start|>#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Targets(JsonListGenerator):
def __init__(self, data, registry):
super(Targets, self).__init__(data, registry, TargetsItemBase)
def gen_item_json(self, items, result_list):
try:
super(Targets, self).gen_item_json(items, result_list)
<|code_end|>
. Use current file imports:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
from grafana_dashboards.errors import UnregisteredComponentError
and context (classes, functions, or code) from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/errors.py
# class UnregisteredComponentError(DashboardBuilderException):
# pass
. Output only the next line. | except UnregisteredComponentError: |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
def test_grafana():
<|code_end|>
. Write the next line using the current file imports:
from mock import MagicMock
from grafana_dashboards.client.grafana import GrafanaExporter
and context from other files:
# Path: grafana_dashboards/client/grafana.py
# class GrafanaExporter(DashboardExporter):
# def __init__(self, **kwargs):
# super(GrafanaExporter, self).__init__()
# self._host = os.getenv('GRAFANA_HOST', kwargs.get('host'))
# password = os.getenv('GRAFANA_PASSWORD', kwargs.get('password'))
# username = os.getenv('GRAFANA_USERNAME', kwargs.get('username'))
# auth_token = os.getenv('GRAFANA_TOKEN', kwargs.get('token'))
# use_kerberos = os.getenv('GRAFANA_USE_KERBEROS', kwargs.get('use_kerberos'))
#
# if use_kerberos:
# self._connection = KerberosConnection(self._host)
# elif auth_token:
# self._connection = BearerAuthConnection(auth_token, self._host)
# else:
# self._connection = BasicAuthConnection(username, password, self._host)
#
# def process_dashboard(self, project_name, dashboard_name, dashboard_data):
# super(GrafanaExporter, self).process_dashboard(project_name, dashboard_name, dashboard_data)
# body = {'overwrite': True, 'dashboard': dashboard_data}
#
# if 'folderId' in dashboard_data:
# body.update({'folderId': dashboard_data['folderId']})
#
# logger.info("Uploading dashboard '%s' to %s", dashboard_name, self._host)
# self._connection.make_request('/api/dashboards/db', body)
, which may include functions, classes, or code. Output only the next line. | exporter = GrafanaExporter(host='host', username='username', password='password') |
Continue the code snippet: <|code_start|># See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Rows(JsonListGenerator):
def __init__(self, data, registry):
super(Rows, self).__init__(data, registry, RowsItemBase)
class RowsItemBase(JsonGenerator):
pass
class Row(RowsItemBase):
_copy_fields = {'repeat'}
def gen_json_from_data(self, data, context):
row_json = super(Row, self).gen_json_from_data(data, context)
row_json.update({
'title': data.get('title', ''),
'height': data.get('height', '250px'),
'showTitle': data.get('showTitle', False),
'collapse': data.get('collapse', False),
'panels': []
})
<|code_end|>
. Use current file imports:
from grafana_dashboards.common import get_component_type
from grafana_dashboards.components.base import JsonListGenerator, JsonGenerator
from grafana_dashboards.components.panels import Panels
and context (classes, functions, or code) from other files:
# Path: grafana_dashboards/common.py
# def get_component_type(clazz):
# """
#
# :type clazz: type
# """
# return _all_cap_re.sub(r'\1-\2', _first_cap_re.sub(r'\1-\2', clazz.__name__)).lower()
#
# Path: grafana_dashboards/components/base.py
# class JsonListGenerator(JsonGenerator):
# def __init__(self, data, registry, item_base_class):
# super(JsonListGenerator, self).__init__(data, registry)
# self.component_item_types = [get_component_type(clazz) for clazz in _get_subclasses(item_base_class)]
#
# def gen_json_from_data(self, data, context):
# super(JsonListGenerator, self).gen_json_from_data(data, context)
# result_list = []
# for items in data:
# self.gen_item_json(items, result_list)
# return result_list
#
# def gen_item_json(self, items, result_list):
# if isinstance(items, basestring):
# # this is component without context
# result_list += self.registry.get_component(type(self), items).gen_json()
# else:
# self._gen_item_json_with_context(items, result_list)
#
# def _gen_item_json_with_context(self, items, result_list):
# # TODO add check for dictionary
# for (item_type, item_data) in items.items():
# if item_type not in self.component_item_types:
# # this is named component with context
# for context in Context.create_context(item_data, get_placeholders(item_type)):
# result_list += self.registry.get_component(type(self), item_type).gen_json(context)
# else:
# # this is inplace defined component
# item = self.registry.create_component(item_type, {item_type: item_data}).gen_json()
# if isinstance(item, list):
# result_list += item
# else:
# result_list.append(item)
#
# class JsonGenerator(ComponentBase):
#
# _copy_fields = set()
#
# def gen_json(self, context=Context()):
# return self.gen_json_from_data(context.expand_placeholders(self.data), context)
#
# def gen_json_from_data(self, data, context):
# component_type = get_component_type(type(self))
# if self.name:
# logger.debug("Processing component '%s' with name '%s' from template '%s'",
# component_type, context.expand_placeholders(self.name), self.name)
# else:
# logger.debug("Processing anonymous component '%s'", component_type)
# json = {}
# for field in self._copy_fields:
# if field in data:
# json[field] = data[field]
# return json
#
# Path: grafana_dashboards/components/panels.py
# class Panels(JsonListGenerator):
# def __init__(self, data, registry):
# super(Panels, self).__init__(data, registry, PanelsItemBase)
. Output only the next line. | if get_component_type(Panels) in data: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.