Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Given snippet: <|code_start|> m = self.counts
score = sum(gammaln(a[k] + m[k]) - gammaln(a[k]) for k in xrange(dim))
score += gammaln(a.sum())
score -= gammaln(a.sum() + m.sum())
return score
def sample_value(self, shared):
sampler = Sampler()
sampler.init(shared, self)
return sampler.eval(shared)
def load(self, raw):
self.counts = numpy.array(raw['counts'], dtype=numpy.int)
def dump(self):
return {'counts': self.counts.tolist()}
def protobuf_load(self, message):
self.counts = numpy.array(message.counts, dtype=numpy.int)
def protobuf_dump(self, message):
message.Clear()
for count in self.counts:
message.counts.append(count)
class Sampler(object):
def init(self, shared, group=None):
if group is None:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy
from distributions.dbg.special import log, gammaln
from distributions.dbg.random import sample_discrete, sample_dirichlet
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
which might include code, classes, or functions. Output only the next line. | self.ps = sample_dirichlet(shared.alphas) |
Based on the snippet: <|code_start|># THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
LOG = logging.getLogger(__name__)
# pacify pyflakes
assert sample_dirichlet and factorial and sample_poisson and sample_gamma
def seed(x):
numpy.random.seed(x)
try:
distributions.cRandom.seed(x)
except ImportError:
pass
def sample_discrete_log(scores):
<|code_end|>
, predict the immediate next line with the help of imports:
from math import log, pi, sqrt, factorial
from numpy.random.mtrand import dirichlet as sample_dirichlet
from numpy import dot, inner
from numpy.linalg import cholesky, det, inv
from numpy.random import multivariate_normal
from numpy.random import beta as sample_beta
from numpy.random import poisson as sample_poisson
from numpy.random import gamma as sample_gamma
from scipy.stats import norm, chi2, bernoulli, nbinom
from scipy.special import gammaln
from distributions.util import scores_to_probs
from distributions.vendor.stats import (
sample_invwishart as _sample_inverse_wishart
)
import numpy as np
import numpy.random
import logging
import distributions.cRandom
and context (classes, functions, sometimes code) from other files:
# Path: distributions/util.py
# def scores_to_probs(scores):
# scores = numpy.array(scores)
# scores -= scores.max()
# probs = numpy.exp(scores, out=scores)
# probs /= probs.sum()
# return probs
. Output only the next line. | probs = scores_to_probs(scores) |
Given the code snippet: <|code_start|>
# create a new partition
large_shape = (1,) + small_shape
prob += large_probs[large_shape] / large_counts[large_shape]
# add to each existing partition
for i in xrange(len(small_shape)):
large_shape = list(small_shape)
large_shape[i] += 1
large_shape.sort()
large_shape = tuple(large_shape)
prob += large_probs[large_shape] / large_counts[large_shape]
small_probs[small_shape] = count * prob
return small_probs
def get_counts(size):
'''
Count partition shapes of a given sample size.
Inputs:
size = sample_size
Returns:
dict : shape -> count
'''
assert 0 <= size
cache_file = '{}/counts.{}.json.bz2'.format(TEMP, size)
if cache_file not in CACHE:
if os.path.exists(cache_file):
<|code_end|>
, generate the next line using the imports in this file:
import os
import numpy
import math
import matplotlib
import parsable
from collections import defaultdict
from numpy import log, exp
from matplotlib import pyplot, font_manager
from distributions.lp.special import fast_log
from distributions.io.stream import json_stream_load, json_stream_dump
and context (functions, classes, or occasionally code) from other files:
# Path: distributions/io/stream.py
# class json_stream_load(object):
# '''
# Read json data that was created by json_stream_dump or json_costream_dump.
#
# Note that this exploits newline formatting in the above dumpers.
# In particular:
# - the first line is '['
# - intermediate lines are of the form '{},'.format(json_parsable_content)
# - the penultimate line is of the form '{}'.format(json_parsable_content)
# - the last line is ']'
# - there is no trailing whitespace
#
# An alternative would be to use ijson to streamingly load arbitrary json
# files, however in practice this is ~40x slower.
# '''
# def __init__(self, filename):
# self.fd = open_compressed(filename, 'rb')
# line = self.fd.readline(2)
# if line != '[\n':
# raise IOError(
# 'Unhandled format for json_stream_load. '
# 'Try recreating json file with the compatible '
# 'json_stream_dump or json_costream_dump.')
#
# def __iter__(self):
# return self
#
# def next(self):
# line = self.fd.readline().rstrip(',\n')
# if line == ']':
# self.close()
# raise StopIteration
# else:
# return simplejson.loads(line)
#
# def close(self):
# self.fd.close()
#
# def json_stream_dump(stream, filename, **kwargs):
# kwargs['separators'] = (',', ':')
# stream = iter(stream)
# with open_compressed(filename, 'w') as f:
# f.write('[')
# try:
# item = next(stream)
# f.write('\n')
# simplejson.dump(item, f, **kwargs)
# for item in stream:
# f.write(',\n')
# simplejson.dump(item, f, **kwargs)
# except StopIteration:
# pass
# f.write('\n]')
. Output only the next line. | flat = json_stream_load(cache_file) |
Continue the code snippet: <|code_start|> large_shape.sort()
large_shape = tuple(large_shape)
prob += large_probs[large_shape] / large_counts[large_shape]
small_probs[small_shape] = count * prob
return small_probs
def get_counts(size):
'''
Count partition shapes of a given sample size.
Inputs:
size = sample_size
Returns:
dict : shape -> count
'''
assert 0 <= size
cache_file = '{}/counts.{}.json.bz2'.format(TEMP, size)
if cache_file not in CACHE:
if os.path.exists(cache_file):
flat = json_stream_load(cache_file)
large = {tuple(key): val for key, val in flat}
else:
if size == 0:
large = {(): 1.0}
else:
small = get_counts(size - 1)
large = get_larger_counts(small)
print 'caching', cache_file
<|code_end|>
. Use current file imports:
import os
import numpy
import math
import matplotlib
import parsable
from collections import defaultdict
from numpy import log, exp
from matplotlib import pyplot, font_manager
from distributions.lp.special import fast_log
from distributions.io.stream import json_stream_load, json_stream_dump
and context (classes, functions, or code) from other files:
# Path: distributions/io/stream.py
# class json_stream_load(object):
# '''
# Read json data that was created by json_stream_dump or json_costream_dump.
#
# Note that this exploits newline formatting in the above dumpers.
# In particular:
# - the first line is '['
# - intermediate lines are of the form '{},'.format(json_parsable_content)
# - the penultimate line is of the form '{}'.format(json_parsable_content)
# - the last line is ']'
# - there is no trailing whitespace
#
# An alternative would be to use ijson to streamingly load arbitrary json
# files, however in practice this is ~40x slower.
# '''
# def __init__(self, filename):
# self.fd = open_compressed(filename, 'rb')
# line = self.fd.readline(2)
# if line != '[\n':
# raise IOError(
# 'Unhandled format for json_stream_load. '
# 'Try recreating json file with the compatible '
# 'json_stream_dump or json_costream_dump.')
#
# def __iter__(self):
# return self
#
# def next(self):
# line = self.fd.readline().rstrip(',\n')
# if line == ']':
# self.close()
# raise StopIteration
# else:
# return simplejson.loads(line)
#
# def close(self):
# self.fd.close()
#
# def json_stream_dump(stream, filename, **kwargs):
# kwargs['separators'] = (',', ':')
# stream = iter(stream)
# with open_compressed(filename, 'w') as f:
# f.write('[')
# try:
# item = next(stream)
# f.write('\n')
# simplejson.dump(item, f, **kwargs)
# for item in stream:
# f.write(',\n')
# simplejson.dump(item, f, **kwargs)
# except StopIteration:
# pass
# f.write('\n]')
. Output only the next line. | json_stream_dump(large.iteritems(), cache_file) |
Given the following code snippet before the placeholder: <|code_start|> def dump(self):
return {
'alpha': self.alpha,
'inv_beta': self.inv_beta,
}
def protobuf_load(self, message):
self.alpha = float(message.alpha)
self.inv_beta = float(message.inv_beta)
def protobuf_dump(self, message):
message.Clear()
message.alpha = self.alpha
message.inv_beta = self.inv_beta
class Group(GroupIoMixin):
def __init__(self):
self.count = None
self.sum = None
self.log_prod = None
def init(self, shared):
self.count = 0
self.sum = 0
self.log_prod = 0.
def add_value(self, shared, value):
self.count += 1
self.sum += int(value)
<|code_end|>
, predict the next line using imports from the current file:
from distributions.dbg.special import log, factorial, gammaln
from distributions.dbg.random import sample_gamma, sample_poisson
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context including class names, function names, and sometimes code from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | self.log_prod += log(factorial(value)) |
Predict the next line after this snippet: <|code_start|> def dump(self):
return {
'alpha': self.alpha,
'inv_beta': self.inv_beta,
}
def protobuf_load(self, message):
self.alpha = float(message.alpha)
self.inv_beta = float(message.inv_beta)
def protobuf_dump(self, message):
message.Clear()
message.alpha = self.alpha
message.inv_beta = self.inv_beta
class Group(GroupIoMixin):
def __init__(self):
self.count = None
self.sum = None
self.log_prod = None
def init(self, shared):
self.count = 0
self.sum = 0
self.log_prod = 0.
def add_value(self, shared, value):
self.count += 1
self.sum += int(value)
<|code_end|>
using the current file's imports:
from distributions.dbg.special import log, factorial, gammaln
from distributions.dbg.random import sample_gamma, sample_poisson
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and any relevant context from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | self.log_prod += log(factorial(value)) |
Given the following code snippet before the placeholder: <|code_start|> self.sum = None
self.log_prod = None
def init(self, shared):
self.count = 0
self.sum = 0
self.log_prod = 0.
def add_value(self, shared, value):
self.count += 1
self.sum += int(value)
self.log_prod += log(factorial(value))
def add_repeated_value(self, shared, value, count):
self.count += count
self.sum += int(count * value)
self.log_prod += count * log(factorial(value))
def remove_value(self, shared, value):
self.count -= 1
self.sum -= int(value)
self.log_prod -= log(factorial(value))
def merge(self, shared, source):
self.count += source.count
self.sum += source.sum
self.log_prod += source.log_prod
def score_value(self, shared, value):
post = shared.plus_group(self)
<|code_end|>
, predict the next line using imports from the current file:
from distributions.dbg.special import log, factorial, gammaln
from distributions.dbg.random import sample_gamma, sample_poisson
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context including class names, function names, and sometimes code from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | return gammaln(post.alpha + value) - gammaln(post.alpha) \ |
Given the code snippet: <|code_start|> sampler = Sampler()
sampler.init(shared, self)
return sampler.eval(shared)
def load(self, raw):
self.count = int(raw['count'])
self.sum = int(raw['sum'])
self.log_prod = float(raw['log_prod'])
def dump(self):
return {
'count': self.count,
'sum': self.sum,
'log_prod': self.log_prod,
}
def protobuf_load(self, message):
self.count = int(message.count)
self.sum = int(message.sum)
self.log_prod = float(message.log_prod)
def protobuf_dump(self, message):
message.count = self.count
message.sum = self.sum
message.log_prod = self.log_prod
class Sampler(object):
def init(self, shared, group=None):
post = shared if group is None else shared.plus_group(group)
<|code_end|>
, generate the next line using the imports in this file:
from distributions.dbg.special import log, factorial, gammaln
from distributions.dbg.random import sample_gamma, sample_poisson
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context (functions, classes, or occasionally code) from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | self.lambda_ = sample_gamma(post.alpha, 1.0 / post.inv_beta) |
Given the code snippet: <|code_start|>
def load(self, raw):
self.count = int(raw['count'])
self.sum = int(raw['sum'])
self.log_prod = float(raw['log_prod'])
def dump(self):
return {
'count': self.count,
'sum': self.sum,
'log_prod': self.log_prod,
}
def protobuf_load(self, message):
self.count = int(message.count)
self.sum = int(message.sum)
self.log_prod = float(message.log_prod)
def protobuf_dump(self, message):
message.count = self.count
message.sum = self.sum
message.log_prod = self.log_prod
class Sampler(object):
def init(self, shared, group=None):
post = shared if group is None else shared.plus_group(group)
self.lambda_ = sample_gamma(post.alpha, 1.0 / post.inv_beta)
def eval(self, shared):
<|code_end|>
, generate the next line using the imports in this file:
from distributions.dbg.special import log, factorial, gammaln
from distributions.dbg.random import sample_gamma, sample_poisson
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context (functions, classes, or occasionally code) from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | return sample_poisson(self.lambda_) |
Given the following code snippet before the placeholder: <|code_start|># - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MODULES = {}
for spec in list_models():
MODULES.setdefault(spec['name'], []).append(import_model(spec))
def test_model():
for name in MODULES:
yield _test_model, name
def _test_model(name):
modules = MODULES[name]
<|code_end|>
, predict the next line using imports from the current file:
from distributions.tests.util import (
assert_all_close,
list_models,
import_model,
)
and context including class names, function names, and sometimes code from other files:
# Path: distributions/tests/util.py
# def assert_all_close(collection, **kwargs):
# for i1, item1 in enumerate(collection[:-1]):
# for item2 in collection[i1 + 1:]:
# assert_close(item1, item2, **kwargs)
#
# def list_models():
# result = set()
# for path in glob.glob(os.path.join(ROOT, '*', 'models', '*.p*')):
# dirname, filename = os.path.split(path)
# flavor = os.path.split(os.path.dirname(dirname))[-1]
# name = os.path.splitext(filename)[0]
# if not name.startswith('__'):
# result.add((name, flavor))
# for name, flavor in sorted(result):
# spec = {'flavor': flavor, 'name': name}
# if name.startswith('_'):
# continue
# try:
# import_model(spec)
# yield spec
# except ImportError:
# module_name = 'distributions.{flavor}.models.{name}'.format(**spec)
# print 'failed to import {}'.format(module_name)
# import traceback
# print traceback.format_exc()
#
# def import_model(spec):
# module_name = 'distributions.{flavor}.models.{name}'.format(**spec)
# return importlib.import_module(module_name)
. Output only the next line. | assert_all_close([m.NAME for m in modules], err_msg='Model.__name__') |
Given snippet: <|code_start|>#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@parsable.command
def flavors_by_model():
'''
List flavors implemented of each model.
'''
models = defaultdict(lambda: [])
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import defaultdict
from distributions.tests.util import list_models, import_model
import parsable
and context:
# Path: distributions/tests/util.py
# def list_models():
# result = set()
# for path in glob.glob(os.path.join(ROOT, '*', 'models', '*.p*')):
# dirname, filename = os.path.split(path)
# flavor = os.path.split(os.path.dirname(dirname))[-1]
# name = os.path.splitext(filename)[0]
# if not name.startswith('__'):
# result.add((name, flavor))
# for name, flavor in sorted(result):
# spec = {'flavor': flavor, 'name': name}
# if name.startswith('_'):
# continue
# try:
# import_model(spec)
# yield spec
# except ImportError:
# module_name = 'distributions.{flavor}.models.{name}'.format(**spec)
# print 'failed to import {}'.format(module_name)
# import traceback
# print traceback.format_exc()
#
# def import_model(spec):
# module_name = 'distributions.{flavor}.models.{name}'.format(**spec)
# return importlib.import_module(module_name)
which might include code, classes, or functions. Output only the next line. | for spec in list_models(): |
Continue the code snippet: <|code_start|>@parsable.command
def flavors_by_model():
'''
List flavors implemented of each model.
'''
models = defaultdict(lambda: [])
for spec in list_models():
models[spec['name']].append(spec['flavor'])
for model in sorted(models):
print 'model {}: {}'.format(model, ' '.join(sorted(models[model])))
@parsable.command
def models_by_flavor():
'''
List models implemented of each flavor.
'''
flavors = defaultdict(lambda: [])
for spec in list_models():
flavors[spec['flavor']].append(spec['name'])
for flavor in sorted(flavors):
print 'flavor {}: {}'.format(flavor, ' '.join(sorted(flavors[flavor])))
@parsable.command
def model_apis():
'''
List api of each model.
'''
for spec in list_models():
<|code_end|>
. Use current file imports:
from collections import defaultdict
from distributions.tests.util import list_models, import_model
import parsable
and context (classes, functions, or code) from other files:
# Path: distributions/tests/util.py
# def list_models():
# result = set()
# for path in glob.glob(os.path.join(ROOT, '*', 'models', '*.p*')):
# dirname, filename = os.path.split(path)
# flavor = os.path.split(os.path.dirname(dirname))[-1]
# name = os.path.splitext(filename)[0]
# if not name.startswith('__'):
# result.add((name, flavor))
# for name, flavor in sorted(result):
# spec = {'flavor': flavor, 'name': name}
# if name.startswith('_'):
# continue
# try:
# import_model(spec)
# yield spec
# except ImportError:
# module_name = 'distributions.{flavor}.models.{name}'.format(**spec)
# print 'failed to import {}'.format(module_name)
# import traceback
# print traceback.format_exc()
#
# def import_model(spec):
# module_name = 'distributions.{flavor}.models.{name}'.format(**spec)
# return importlib.import_module(module_name)
. Output only the next line. | Model = import_model(spec).Model |
Given the code snippet: <|code_start|> assert value in shared.betas, 'unknown value: {}'.format(value)
if count:
self.total += count
try:
count += self.counts[value]
if count:
self.counts[value] = count
else:
del self.counts[value]
except KeyError:
self.counts[value] = count
def add_value(self, shared, value):
self.add_repeated_value(shared, value, 1)
def remove_value(self, shared, value):
self.add_repeated_value(shared, value, -1)
def score_value(self, shared, value):
"""
Adapted from dd.py, which was adapted from:
McCallum, et. al, 'Rethinking LDA: Why Priors Matter' eqn 4
"""
denom = shared.alpha + self.total
if value == OTHER:
numer = shared.beta0 * shared.alpha
else:
count = self.counts.get(value, 0)
assert count >= 0, "cannot score while in debt"
numer = shared.betas[value] * shared.alpha + count
<|code_end|>
, generate the next line using the imports in this file:
from itertools import izip
from distributions.dbg.special import log, gammaln
from distributions.dbg.random import (
sample_discrete,
sample_dirichlet,
sample_beta,
)
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context (functions, classes, or occasionally code) from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | return log(numer / denom) |
Given the following code snippet before the placeholder: <|code_start|>
def add_value(self, shared, value):
self.add_repeated_value(shared, value, 1)
def remove_value(self, shared, value):
self.add_repeated_value(shared, value, -1)
def score_value(self, shared, value):
"""
Adapted from dd.py, which was adapted from:
McCallum, et. al, 'Rethinking LDA: Why Priors Matter' eqn 4
"""
denom = shared.alpha + self.total
if value == OTHER:
numer = shared.beta0 * shared.alpha
else:
count = self.counts.get(value, 0)
assert count >= 0, "cannot score while in debt"
numer = shared.betas[value] * shared.alpha + count
return log(numer / denom)
def score_data(self, shared):
assert len(shared.betas), 'betas is empty'
"""
See doc/dpd.pdf Equation (3)
"""
score = 0.
for i, count in self.counts.iteritems():
assert count >= 0, "cannot score while in debt"
prior_i = shared.betas[i] * shared.alpha
<|code_end|>
, predict the next line using imports from the current file:
from itertools import izip
from distributions.dbg.special import log, gammaln
from distributions.dbg.random import (
sample_discrete,
sample_dirichlet,
sample_beta,
)
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context including class names, function names, and sometimes code from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | score += gammaln(prior_i + count) - gammaln(prior_i) |
Given snippet: <|code_start|> self.counts = {}
self.total = 0
for i, count in izip(message.keys, message.values):
if count:
self.counts[int(i)] = int(count)
self.total += count
def protobuf_dump(self, message):
message.Clear()
for i, count in self.counts.iteritems():
if count:
message.keys.append(i)
message.values.append(count)
class Sampler(object):
def init(self, shared, group=None):
self.values = []
post = []
alpha = shared.alpha
counts = {} if group is None else group.counts
for value, beta in shared.betas.iteritems():
self.values.append(value)
post.append(beta * alpha + counts.get(value, 0))
if shared.beta0 > 0:
self.values.append(OTHER)
post.append(shared.beta0 * alpha)
self.probs = sample_dirichlet(post)
def eval(self, shared):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from itertools import izip
from distributions.dbg.special import log, gammaln
from distributions.dbg.random import (
sample_discrete,
sample_dirichlet,
sample_beta,
)
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
which might include code, classes, or functions. Output only the next line. | index = sample_discrete(self.probs) |
Predict the next line after this snippet: <|code_start|> return {'counts': counts}
def protobuf_load(self, message):
self.counts = {}
self.total = 0
for i, count in izip(message.keys, message.values):
if count:
self.counts[int(i)] = int(count)
self.total += count
def protobuf_dump(self, message):
message.Clear()
for i, count in self.counts.iteritems():
if count:
message.keys.append(i)
message.values.append(count)
class Sampler(object):
def init(self, shared, group=None):
self.values = []
post = []
alpha = shared.alpha
counts = {} if group is None else group.counts
for value, beta in shared.betas.iteritems():
self.values.append(value)
post.append(beta * alpha + counts.get(value, 0))
if shared.beta0 > 0:
self.values.append(OTHER)
post.append(shared.beta0 * alpha)
<|code_end|>
using the current file's imports:
from itertools import izip
from distributions.dbg.special import log, gammaln
from distributions.dbg.random import (
sample_discrete,
sample_dirichlet,
sample_beta,
)
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and any relevant context from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | self.probs = sample_dirichlet(post) |
Using the snippet: <|code_start|>
def protobuf_load(self, message):
assert len(message.betas) == len(message.values), "invalid message"
assert len(message.counts) == len(message.values), "invalid message"
self.gamma = float(message.gamma)
self.alpha = float(message.alpha)
self.betas = {
int(value): float(beta)
for value, beta in izip(message.values, message.betas)
}
self.counts = {
int(value): int(count)
for value, count in izip(message.values, message.counts)
}
self._load_beta0()
def protobuf_dump(self, message):
message.Clear()
message.gamma = self.gamma
message.alpha = self.alpha
for value, beta in self.betas.iteritems():
message.values.append(value)
message.betas.append(beta)
message.counts.append(self.counts[value])
def add_value(self, value):
assert value != OTHER, 'cannot add OTHER'
count = self.counts.get(value, 0) + 1
self.counts[value] = count
if count == 1:
<|code_end|>
, determine the next line of code. You have imports:
from itertools import izip
from distributions.dbg.special import log, gammaln
from distributions.dbg.random import (
sample_discrete,
sample_dirichlet,
sample_beta,
)
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context (class names, function names, or code) available:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | beta = self.beta0 * sample_beta(1.0, self.gamma) |
Given the following code snippet before the placeholder: <|code_start|> self.sum_xxT = np.zeros((shared.dim(), shared.dim()))
def add_value(self, shared, value):
self.count += 1
self.sum_x += value
self.sum_xxT += np.outer(value, value)
def add_repeated_value(self, shared, value, count):
self.count += count
self.sum_x += (count * value)
self.sum_xxT += (count * np.outer(value, value))
def remove_value(self, shared, value):
self.count -= 1
self.sum_x -= value
self.sum_xxT -= np.outer(value, value)
def merge(self, shared, source):
self.count += source.count
self.sum_x += source.sum_x
self.sum_xxT += source.sum_xxT
def score_value(self, shared, value):
"""
\cite{murphy2007conjugate}, Eq. 258
"""
post = shared.plus_group(self)
mu_n, kappa_n, psi_n, nu_n = post.mu, post.kappa, post.psi, post.nu
dof = nu_n - shared.dim() + 1.
sigma_n = psi_n * (kappa_n + 1.) / (kappa_n * dof)
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
import math
from scipy.special import multigammaln
from distributions.dbg.random import (
score_student_t,
sample_normal_inverse_wishart,
)
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context including class names, function names, and sometimes code from other files:
# Path: distributions/dbg/random.py
# def score_student_t(x, nu, mu, sigma):
# """
# multivariate score_student_t
#
# \cite{murphy2007conjugate}, Eq. 313
# """
# p = len(mu)
# z = x - mu
# S = inner(inner(z, inv(sigma)), z)
# score = (
# gammaln(0.5 * (nu + p))
# - gammaln(0.5 * nu)
# - 0.5 * (
# p * log(nu * pi)
# + log(det(sigma))
# + (nu + p) * log(1 + S / nu)
# )
# )
# return score
#
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# D, = mu0.shape
# assert psi0.shape == (D, D)
# assert lambda0 > 0.0
# assert nu0 > D - 1
# cov = sample_inverse_wishart(nu0, psi0)
# mu = np.random.multivariate_normal(mean=mu0, cov=(1. / lambda0) * cov)
# return mu, cov
. Output only the next line. | return score_student_t(value, dof, mu_n, sigma_n) |
Predict the next line after this snippet: <|code_start|> assert self.sum_xxT.shape == (D, D)
def dump(self):
return {
'count': self.count,
'sum_x': self.sum_x.copy(),
'sum_xxT': self.sum_xxT.copy(),
}
def protobuf_load(self, message):
self.count = message.count
self.sum_x = np.array(message.sum_x, dtype=np.float)
self.sum_xxT = np.array(message.sum_xxT, dtype=np.float)
D = self.sum_x.shape[0]
self.sum_xxT = self.sum_xxT.reshape((D, D))
def protobuf_dump(self, message):
message.Clear()
message.count = self.count
for x in self.sum_x:
message.sum_x.append(x)
for x in self.sum_xxT:
for y in x:
message.sum_xxT.append(y)
class Sampler(object):
def init(self, shared, group=None):
if group is not None:
shared = shared.plus_group(group)
<|code_end|>
using the current file's imports:
import numpy as np
import math
from scipy.special import multigammaln
from distributions.dbg.random import (
score_student_t,
sample_normal_inverse_wishart,
)
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and any relevant context from other files:
# Path: distributions/dbg/random.py
# def score_student_t(x, nu, mu, sigma):
# """
# multivariate score_student_t
#
# \cite{murphy2007conjugate}, Eq. 313
# """
# p = len(mu)
# z = x - mu
# S = inner(inner(z, inv(sigma)), z)
# score = (
# gammaln(0.5 * (nu + p))
# - gammaln(0.5 * nu)
# - 0.5 * (
# p * log(nu * pi)
# + log(det(sigma))
# + (nu + p) * log(1 + S / nu)
# )
# )
# return score
#
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# D, = mu0.shape
# assert psi0.shape == (D, D)
# assert lambda0 > 0.0
# assert nu0 > D - 1
# cov = sample_inverse_wishart(nu0, psi0)
# mu = np.random.multivariate_normal(mean=mu0, cov=(1. / lambda0) * cov)
# return mu, cov
. Output only the next line. | self.mu, self.sigma = sample_normal_inverse_wishart( |
Using the snippet: <|code_start|> counts[sample] += 1
probs[sample] += prob
for key, count in counts.iteritems():
probs[key] /= count
total_prob = sum(probs.itervalues())
assert_close(total_prob, 1.0, tol=1e-2, err_msg='total_prob is biased')
return counts, probs
def assert_counts_match_probs(counts, probs, tol=1e-3):
'''
Check goodness of fit of observed counts to predicted probabilities
using Pearson's chi-squared test.
Inputs:
- counts : key -> int
- probs : key -> float
'''
keys = counts.keys()
probs = [probs[key] for key in keys]
counts = [counts[key] for key in keys]
total_count = sum(counts)
print 'EXPECT\tACTUAL\tVALUE'
for prob, count, key in sorted(izip(probs, counts, keys), reverse=True):
expect = prob * total_count
print '{:0.1f}\t{}\t{}'.format(expect, count, key)
<|code_end|>
, determine the next line of code. You have imports:
import os
import glob
import math
import numpy
import importlib
import distributions
import distributions.dbg.random
import distributions.hp.random
import traceback
from collections import defaultdict
from itertools import izip
from numpy.testing import assert_array_almost_equal
from nose import SkipTest
from nose.tools import assert_true, assert_less, assert_equal
from distributions.util import multinomial_goodness_of_fit
and context (class names, function names, or code) available:
# Path: distributions/util.py
# def multinomial_goodness_of_fit(
# probs,
# counts,
# total_count,
# truncated=False,
# plot=False):
# """
# Pearson's chi^2 test, on possibly truncated data.
# http://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
#
# Returns:
# p-value of truncated multinomial sample.
# """
# assert len(probs) == len(counts)
# assert truncated or total_count == sum(counts)
# chi_squared = 0
# dof = 0
# if plot:
# print_histogram(probs, counts)
# for p, c in zip(probs, counts):
# if p == 1:
# return 1 if c == total_count else 0
# assert p < 1, 'bad probability: %g' % p
# if p > 0:
# mean = total_count * p
# variance = total_count * p * (1 - p)
# assert variance > 1,\
# 'WARNING goodness of fit is inaccurate; use more samples'
# chi_squared += (c - mean) ** 2 / variance
# dof += 1
# else:
# print 'WARNING zero probability in goodness-of-fit test'
# if c > 0:
# return float('inf')
#
# if not truncated:
# dof -= 1
#
# survival = scipy.stats.chi2.sf(chi_squared, dof)
# return survival
. Output only the next line. | gof = multinomial_goodness_of_fit(probs, counts, total_count) |
Predict the next line for this snippet: <|code_start|>
def _test_normals(nich, niw):
mu = np.array([30.0])
kappa = 0.3
psi = np.array([[2.]])
nu = 3
# make the NIW case
niw_shared = niw.Shared()
niw_shared.load({'mu': mu, 'kappa': kappa, 'psi': psi, 'nu': nu})
niw_group = niw.Group()
niw_group.init(niw_shared)
# make the NIX case
nix_shared = nich.Shared()
nix_shared.load({
'mu': mu[0],
'kappa': kappa,
'sigmasq': psi[0, 0] / nu,
'nu': nu
})
nix_group = nich.Group()
nix_group.init(nix_shared)
data = np.array([4., 54., 3., -12., 7., 10.])
for d in data:
niw_group.add_value(niw_shared, np.array([d]))
nix_group.add_value(nix_shared, d)
# check marginals
<|code_end|>
with the help of current file imports:
import numpy as np
from nose import SkipTest
from distributions.tests.util import assert_close
from distributions.dbg.models import nich, niw
from distributions.lp.models import nich, niw
and context from other files:
# Path: distributions/tests/util.py
# def assert_close(lhs, rhs, tol=TOL, err_msg=None):
# try:
# if isinstance(lhs, dict):
# assert_true(
# isinstance(rhs, dict),
# 'type mismatch: {} vs {}'.format(type(lhs), type(rhs)))
# assert_equal(set(lhs.keys()), set(rhs.keys()))
# for key, val in lhs.iteritems():
# msg = '{}[{}]'.format(err_msg or '', key)
# assert_close(val, rhs[key], tol, msg)
# elif isinstance(lhs, float) or isinstance(lhs, numpy.float64):
# assert_true(
# isinstance(rhs, float) or isinstance(rhs, numpy.float64),
# 'type mismatch: {} vs {}'.format(type(lhs), type(rhs)))
# diff = abs(lhs - rhs)
# norm = 1 + abs(lhs) + abs(rhs)
# msg = '{} off by {}% = {}'.format(
# err_msg or '',
# 100 * diff / norm,
# diff)
# assert_less(diff, tol * norm, msg)
# elif isinstance(lhs, numpy.ndarray) or isinstance(rhs, numpy.ndarray):
# assert_true(
# (isinstance(lhs, numpy.ndarray) or isinstance(lhs, list)) and
# (isinstance(rhs, numpy.ndarray) or isinstance(rhs, list)),
# 'type mismatch: {} vs {}'.format(type(lhs), type(rhs)))
# decimal = int(round(-math.log10(tol)))
# assert_array_almost_equal(
# lhs,
# rhs,
# decimal=decimal,
# err_msg=(err_msg or ''))
# elif isinstance(lhs, list) or isinstance(lhs, tuple):
# assert_true(
# isinstance(rhs, list) or isinstance(rhs, tuple),
# 'type mismatch: {} vs {}'.format(type(lhs), type(rhs)))
# for pos, (x, y) in enumerate(izip(lhs, rhs)):
# msg = '{}[{}]'.format(err_msg or '', pos)
# assert_close(x, y, tol, msg)
# else:
# assert_equal(lhs, rhs, err_msg)
# except Exception:
# print err_msg or ''
# print 'actual = {}'.format(print_short(lhs))
# print 'expected = {}'.format(print_short(rhs))
# raise
, which may contain function names, class names, or code. Output only the next line. | assert_close(niw_group.score_data(niw_shared), |
Continue the code snippet: <|code_start|>
class Group(GroupIoMixin):
def __init__(self):
self.count = None
self.sum = None
def init(self, shared):
self.count = 0
self.sum = 0
def add_value(self, shared, value):
self.count += 1
self.sum += int(value)
def add_repeated_value(self, shared, value, count):
self.count += count
self.sum += count * int(value)
def remove_value(self, shared, value):
self.count -= 1
self.sum -= int(value)
def merge(self, shared, source):
self.count += source.count
self.sum += source.sum
def score_value(self, shared, value):
post = shared.plus_group(self)
alpha = post.alpha + shared.r
beta = post.beta + value
<|code_end|>
. Use current file imports:
from distributions.dbg.special import gammaln
from distributions.dbg.random import sample_beta, sample_negative_binomial
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context (classes, functions, or code) from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | score = gammaln(post.alpha + post.beta) |
Continue the code snippet: <|code_start|> score += gammaln(post.beta) - gammaln(shared.beta)
return score
def sample_value(self, shared):
sampler = Sampler()
sampler.init(shared, self)
return sampler.eval(shared)
def dump(self):
return {
'count': self.count,
'sum': self.sum,
}
def load(self, raw):
self.count = int(raw['count'])
self.sum = int(raw['sum'])
def protobuf_load(self, message):
self.count = int(message.count)
self.sum = int(message.sum)
def protobuf_dump(self, message):
message.count = self.count
message.sum = self.sum
class Sampler(object):
def init(self, shared, group=None):
post = shared if group is None else shared.plus_group(group)
<|code_end|>
. Use current file imports:
from distributions.dbg.special import gammaln
from distributions.dbg.random import sample_beta, sample_negative_binomial
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and context (classes, functions, or code) from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | self.p = sample_beta(post.alpha, post.beta) |
Predict the next line after this snippet: <|code_start|> def sample_value(self, shared):
sampler = Sampler()
sampler.init(shared, self)
return sampler.eval(shared)
def dump(self):
return {
'count': self.count,
'sum': self.sum,
}
def load(self, raw):
self.count = int(raw['count'])
self.sum = int(raw['sum'])
def protobuf_load(self, message):
self.count = int(message.count)
self.sum = int(message.sum)
def protobuf_dump(self, message):
message.count = self.count
message.sum = self.sum
class Sampler(object):
def init(self, shared, group=None):
post = shared if group is None else shared.plus_group(group)
self.p = sample_beta(post.alpha, post.beta)
def eval(self, shared):
<|code_end|>
using the current file's imports:
from distributions.dbg.special import gammaln
from distributions.dbg.random import sample_beta, sample_negative_binomial
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
and any relevant context from other files:
# Path: distributions/dbg/special.py
#
# Path: distributions/dbg/random.py
# LOG = logging.getLogger(__name__)
# S = inner(inner(z, inv(sigma)), z)
# X = multivariate_normal(mean=numpy.zeros(d), cov=Lambda, size=nu)
# S = numpy.dot(X.T, X)
# T = numpy.zeros((d, d))
# D, = mu0.shape
# Z = 0.
# def seed(x):
# def sample_discrete_log(scores):
# def sample_bernoulli(prob):
# def sample_discrete(probs, total=None):
# def sample_normal(mu, sigmasq):
# def sample_chi2(nu):
# def sample_student_t(dof, mu, Sigma):
# def score_student_t(x, nu, mu, sigma):
# def sample_wishart_naive(nu, Lambda):
# def sample_wishart(nu, Lambda):
# def sample_wishart_v2(nu, Lambda):
# def sample_inverse_wishart(nu, S):
# def sample_normal_inverse_wishart(mu0, lambda0, psi0, nu0):
# def sample_partition_from_counts(items, counts):
# def sample_stick(gamma, tol=1e-3):
# def sample_negative_binomial(p, r):
. Output only the next line. | return sample_negative_binomial(self.p, shared.r) |
Next line prediction: <|code_start|>"""Setup Celery."""
_logger = logging.getLogger(__name__)
def _use_sqs():
"""Check if worker should use Amazon SQS.
:return: True if worker should use Amazon SQS
"""
<|code_end|>
. Use current file imports:
(import os
import logging
from urllib.parse import quote
from celery.signals import setup_logging
from f8a_worker.defaults import configuration)
and context including class names, function names, or small code snippets from other files:
# Path: f8a_worker/defaults.py
# class F8AConfiguration(object):
# def _make_postgres_string(password):
# def is_local_deployment(cls):
# def _rate_limit_exceeded(cls, headers):
# def _decide_token_usage(cls):
# def select_random_github_token(cls):
# def libraries_io_project_url(cls, ecosystem, name):
# def dependency_check_script_path(self):
# BIGQUERY_JSON_KEY = environ.get('GITHUB_CONSUMER_KEY', 'not-set')
# BROKER_CONNECTION = "amqp://guest@{host}:{port}".format(
# host=environ.get('RABBITMQ_SERVICE_SERVICE_HOST', 'coreapi-broker'),
# port=environ.get('RABBITMQ_SERVICE_SERVICE_PORT', '5672'))
# GIT_USER_NAME = environ.get('GIT_USER_NAME', 'f8a')
# GIT_USER_EMAIL = environ.get('GIT_USER_EMAIL', 'f8a@f8a')
# GITHUB_TOKEN = environ.get('GITHUB_TOKEN', 'not-set').split(',')
# GITHUB_API = "https://api.github.com/"
# LIBRARIES_IO_TOKEN = environ.get('LIBRARIES_IO_TOKEN', 'not-set')
# LIBRARIES_IO_API = 'https://libraries.io/api'
# NPMJS_CHANGES_URL = environ.get('NPMJS_CHANGES_URL',
# "https://skimdb.npmjs.com/registry/"
# "_changes?descending=true&include_docs=true&feed=continuous")
# UNQUOTED_POSTGRES_CONNECTION = _make_postgres_string(environ.get('POSTGRESQL_PASSWORD', ''))
# POSTGRES_CONNECTION = _make_postgres_string(
# quote(environ.get('POSTGRESQL_PASSWORD', ''), safe=''))
# WORKER_DATA_DIR = environ.get('WORKER_DATA_DIR', 'not-set')
# NPM_DATA_DIR = path.join(environ.get('HOME', '.npm'))
# SCANCODE_LICENSE_SCORE = environ.get('SCANCODE_LICENSE_SCORE', '20') # scancode's default is 0
# SCANCODE_TIMEOUT = environ.get('SCANCODE_TIMEOUT', '120') # scancode's default is 120
# SCANCODE_PROCESSES = environ.get('SCANCODE_PROCESSES', '1') # scancode's default is 1
# SCANCODE_PATH = environ.get('SCANCODE_PATH', '/opt/scancode-toolkit/')
# SCANCODE_IGNORE = ['*.pyc', '*.so', '*.dll', '*.rar', '*.jar',
# '*.zip', '*.tar', '*.tar.gz', '*.tar.xz', '*.png'] # don't scan binaries
# AWS_S3_REGION = environ.get('AWS_S3_REGION')
# AWS_S3_ACCESS_KEY_ID = environ.get('AWS_S3_ACCESS_KEY_ID')
# AWS_S3_SECRET_ACCESS_KEY = environ.get('AWS_S3_SECRET_ACCESS_KEY')
# S3_ENDPOINT_URL = environ.get('S3_ENDPOINT_URL')
# DEPLOYMENT_PREFIX = environ.get('DEPLOYMENT_PREFIX')
# BAYESIAN_SYNC_S3 = int(environ.get('BAYESIAN_SYNC_S3', 0)) == 1
# AWS_SQS_ACCESS_KEY_ID = environ.get('AWS_SQS_ACCESS_KEY_ID')
# AWS_SQS_SECRET_ACCESS_KEY = environ.get('AWS_SQS_SECRET_ACCESS_KEY')
# CELERY_RESULT_BACKEND = environ.get('CELERY_RESULT_BACKEND')
# AWS_SQS_REGION = environ.get('AWS_SQS_REGION')
# JAVANCSS_PATH = environ.get('JAVANCSS_PATH')
# OWASP_DEP_CHECK_PATH = environ.get('OWASP_DEP_CHECK_PATH')
# USAGE_THRESHOLD = int(environ.get("LOW_USAGE_THRESHOLD", "5000"))
# USAGE_THRESHOLD = 5000
# POPULARITY_THRESHOLD = int(environ.get("LOW_POPULARITY_THRESHOLD", "5000"))
# POPULARITY_THRESHOLD = 5000
# BAYESIAN_GREMLIN_HTTP_SERVICE_HOST = environ.get("BAYESIAN_GREMLIN_HTTP_SERVICE_HOST",
# "localhost")
# BAYESIAN_GREMLIN_HTTP_SERVICE_PORT = environ.get("BAYESIAN_GREMLIN_HTTP_SERVICE_PORT", "8182")
. Output only the next line. | has_key_id = configuration.AWS_SQS_ACCESS_KEY_ID is not None |
Based on the snippet: <|code_start|>"""Git Operations Task."""
_dir_path = "/tmp/clonedRepos"
worker_count = int(os.getenv('FUTURES_SESSION_WORKER_COUNT', '100'))
_session = FuturesSession(max_workers=worker_count)
F8_API_BACKBONE_HOST = os.getenv('F8_API_BACKBONE_HOST', 'http://f8a-server-backbone:5000')
GEMINI_SERVER_URL = os.getenv('F8A_GEMINI_SERVER_SERVICE_HOST', 'http://f8a-gemini-server:5000')
AUTH_KEY = os.getenv('OS_AUTH_KEY', '')
<|code_end|>
, predict the immediate next line with the help of imports:
import json
import os
from f8a_worker.base import BaseTask
from git import Repo
from werkzeug.datastructures import FileStorage
from f8a_utils.dependency_finder import DependencyFinder
from requests_futures.sessions import FuturesSession
and context (classes, functions, sometimes code) from other files:
# Path: f8a_worker/base.py
# class BaseTask(SelinonTask):
# """Base class for selinon tasks."""
#
# description = 'Root of the Task object hierarchy'
# schema_ref = _schema = None
# # set this to False if your task shouldn't get the `_audit` value added to result dict
# add_audit_info = True
#
# def __init__(self, *args, **kwargs):
# """Initialize object."""
# super().__init__(*args, **kwargs)
# self.log = get_task_logger(self.__class__.__name__)
# self.configuration = configuration
#
# @classmethod
# def _strict_assert(cls, assert_cond):
# """Assert on condition.
#
# If condition is False, fatal error is raised so task is not retried.
# """
# if not assert_cond:
# raise FatalTaskError("Strict assert failed in task '%s'" % cls.__name__)
#
# @staticmethod
# def _add_audit_info(task_result: dict,
# task_start: datetime,
# task_end: datetime,
# node_args):
# """Add the audit and release information to the result dictionary.
#
# :param task_result: dict, task result
# :param task_start: datetime, the start of the task
# :param task_end: datetime, the end of the task
# :param node_args: arguments passed to flow/node
# """
# task_result['_audit'] = {
# 'started_at': json_serial(task_start),
# 'ended_at': json_serial(task_end),
# 'version': 'v1'
# }
#
# ecosystem_name = node_args.get('ecosystem')
# task_result['_release'] = '{}:{}:{}'.format(ecosystem_name,
# node_args.get('name'),
# node_args.get('version'))
#
# def run(self, node_args):
# """To be transparently called by Selinon.
#
# Selinon transparently calls run(), which takes care of task audit and
# some additional checks and calls execute().
# """
# # SQS guarantees 'deliver at least once', so there could be multiple
# # messages of a type, give up immediately
# if self.storage and isinstance(self.storage, (BayesianPostgres, PackagePostgres)):
# if self.storage.get_worker_id_count(self.task_id) > 0:
# raise TaskAlreadyExistsError("Task with ID '%s'"
# " was already processed" % self.task_id)
#
# start = datetime.utcnow()
# try:
# result = self.execute(node_args)
#
# except Exception as exc:
# if self.add_audit_info:
# # `_audit` key is added to every analysis info submitted
# end = datetime.utcnow()
# result = dict()
#
# self._add_audit_info(
# task_result=result,
# task_start=start,
# task_end=end,
# node_args=node_args,
# )
#
# # write the audit info to the storage
# self.storage.store_error(
# node_args=node_args,
# flow_name=self.flow_name,
# task_name=self.task_name,
# task_id=self.task_id,
# exc_info=sys.exc_info(),
# result=result
# )
#
# raise exc
#
# finally:
# # remove all files that were downloaded for this task
# ObjectCache.wipe()
#
# end = datetime.utcnow()
#
# if result:
# # Ensure result complies with the defined schema (if any) before saving
# self.validate_result(result)
#
# if result is None:
# # Keep track of None results and add _audit and _release keys
# result = {}
#
# if self.add_audit_info:
# # `_audit` key is added to every analysis info submitted
# self._add_audit_info(
# task_result=result,
# task_start=start,
# task_end=end,
# node_args=node_args,
# )
#
# return result
#
# @classmethod
# def create_test_instance(cls, flow_name=None, task_name=None, parent=None, task_id=None,
# dispatcher_id=None):
# """Create instance of task for tests."""
# # used in tests so we do not do ugly things like this, this correctly done by dispatcher
# return cls(flow_name, task_name or cls.__name__, parent, task_id, dispatcher_id)
#
# def validate_result(self, result):
# """Ensure that results comply with the task schema, if defined.
#
# Tasks define a schema by setting schema_ref appropriately.
# Schemas are retrieved from workers/schemas/generated via pkgutil.
# """
# # Skip validation if no schema is defined
# schema_ref = self.schema_ref
# if schema_ref is None:
# return
# # Load schema if not yet loaded
# schema = self._schema
# if schema is None:
# schema = self._schema = load_worker_schema(schema_ref)
# # Validate result against schema
# try:
# jsonschema.validate(result, schema)
# except jsonschema.exceptions.ValidationError as e:
# raise FatalTaskError('Schema validation failed: {e}'.format(e=str(e)))
# # Record the validated schema details
# set_schema_ref(result, schema_ref)
#
# def execute(self, _arguments):
# """Return dictionary with results - must be implemented by any subclass."""
# raise NotImplementedError("Task not implemented")
. Output only the next line. | class GitOperationTask(BaseTask): |
Given snippet: <|code_start|>
@pytest.mark.parametrize('data, expected', [
({'pom.xml': {'dependencies': {'compile': {'g:a::': '1.0'}}}},
{'dependencies': ['g:a 1.0']}),
({'pom.xml': {'dependencies': {'runtime': {'g:a::': '1.0'}}}},
{'dependencies': ['g:a 1.0']}),
({'pom.xml': {'dependencies': {'provided': {'g:a::': '1.0'}}}},
{'dependencies': ['g:a 1.0']}),
({'pom.xml': {'dependencies': {'test': {'g:a::': '1.0'}}}},
{'devel_dependencies': ['g:a 1.0']}),
({'pom.xml': {'dependencies': {'compile': {'g:a::': '1.0', 'g2:a2::': '1.0.3-SNAPSHOT'},
'test': {'t:t::': '0'},
'runtime': {'r:r::': 'version'},
'provided': {'p:p::': '1000'}}}},
{'dependencies': sorted(['g:a 1.0', 'g2:a2 1.0.3-SNAPSHOT', 'r:r version', 'p:p 1000']),
'devel_dependencies': sorted(['t:t 0'])}),
({'pom.xml': {'scm_url': 'git@github.com:fabric8-analytics/fabric8-analytics-worker.git'}},
{'code_repository': {'url':
'git@github.com:fabric8-analytics/fabric8-analytics-worker.git',
'type': 'git'}}),
({'pom.xml': {'licenses': ['ASL 2.0', 'MIT']}},
{'declared_licenses': ['ASL 2.0', 'MIT']}),
({'pom.xml': {'description': 'Ich bin ein Bayesianer'}},
{'description': 'Ich bin ein Bayesianer'}),
({'pom.xml': {'url': 'https://github.com/fabric8-analytics/fabric8-analytics-worker'}},
{'homepage': 'https://github.com/fabric8-analytics/fabric8-analytics-worker'}),
])
def test_transforming_java_data(data, expected):
"""Test normalizing of pom.xml data."""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from f8a_worker.data_normalizer import MavenDataNormalizer
and context:
# Path: f8a_worker/data_normalizer/java.py
# class MavenDataNormalizer(AbstractDataNormalizer):
# """Maven data normalizer.
#
# This normalizer handles data extracted from pom.xml files by mercator-go.
# """
#
# _key_map = (
# ('name',),
# ('version',),
# ('description',),
# ('url', 'homepage'),
# ('licenses', 'declared_licenses')
# )
#
# def __init__(self, mercator_json):
# """Initialize function."""
# pom = mercator_json.get('pom.xml', {})
# super().__init__(pom)
#
# def normalize(self):
# """Normalize output from Mercator for pom.xml (Maven)."""
# if not self._raw_data:
# return {}
#
# if self._data['name'] is None:
# self._data['name'] = "{}:{}".format(
# self._raw_data.get('groupId'), self._raw_data.get('artifactId')
# )
# # dependencies with scope 'compile' and 'runtime' are needed at runtime;
# # dependencies with scope 'provided' are not necessarily runtime dependencies,
# # but they are commonly used for example in web applications
# dependencies_dict = self._raw_data.get('dependencies', {}).get('compile', {})
# dependencies_dict.update(self._raw_data.get('dependencies', {}).get('runtime', {}))
# dependencies_dict.update(self._raw_data.get('dependencies', {}).get('provided', {}))
# # dependencies with scope 'test' are only needed for testing;
# dev_dependencies_dict = self._raw_data.get('dependencies', {}).get('test', {})
#
# self._data['dependencies'] = [
# k.rstrip(':') + ' ' + v for k, v in dependencies_dict.items()
# ]
#
# self._data['devel_dependencies'] = [
# k.rstrip(':') + ' ' + v for k, v in dev_dependencies_dict.items()
# ]
#
# # handle code_repository
# if 'scm_url' in self._raw_data:
# # TODO: there's no way we can tell 100 % what the type is, but we could
# # try to handle at least some cases, e.g. github will always be git etc
# repo_type = 'git' if parse_gh_repo(self._raw_data['scm_url']) else 'unknown'
# self._data['code_repository'] = {
# 'url': self._raw_data['scm_url'], 'type': repo_type
# }
#
# return self._data
which might include code, classes, or functions. Output only the next line. | transformed_data = MavenDataNormalizer(data).normalize() |
Continue the code snippet: <|code_start|>"""Tests for NewInitPackageFlow module."""
data_v1 = {
"ecosystem": "dummy_eco",
"name": "dummy_name"
}
data_v2 = {
"ecosystem": "golang",
"name": "dummy_name"
}
class TestInitPackageFlowNew(TestCase):
"""Tests for the NewInitPackageFlow task."""
def _strict_assert(self, assert_cond):
if not assert_cond:
False
def test_execute(self):
"""Tests for 'execute'."""
<|code_end|>
. Use current file imports:
from unittest import TestCase
from selinon import FatalTaskError
from f8a_worker.workers.new_init_package_analysis_flow import NewInitPackageAnlysisFlow
from unittest import mock
and context (classes, functions, or code) from other files:
# Path: f8a_worker/workers/new_init_package_analysis_flow.py
# class NewInitPackageAnlysisFlow(BaseTask):
# """Initialize package-level analysis."""
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# self._strict_assert(isinstance(arguments.get('ecosystem'), str))
# self._strict_assert(isinstance(arguments.get('name'), str))
#
# if arguments['ecosystem'] not in _SUPPORTED_ECOSYSTEMS:
# raise FatalTaskError('Unknown ecosystem: %r' % arguments['ecosystem'])
#
# # Don't ingest for private packages
# if not is_pkg_public(arguments['ecosystem'], arguments['name']):
# logger.info("Private package ingestion ignored %s %s",
# arguments['ecosystem'], arguments['name'])
# raise NotABugFatalTaskError("Private package alert {} {}".format(
# arguments['ecosystem'], arguments['name']))
#
# return arguments
. Output only the next line. | self.assertRaises(FatalTaskError, NewInitPackageAnlysisFlow.execute, self, data_v1) |
Next line prediction: <|code_start|>assert fieldGreaterEqual
assert fieldInt
assert fieldLenEqual
assert fieldLenGreater
assert fieldLenGreaterEqual
assert fieldLenLess
assert fieldLenNotEqual
assert fieldLess
assert fieldLessEqual
assert fieldList
assert fieldNone
assert fieldNotEqual
assert fieldStr
assert fieldUrlNetloc
assert fieldUrlPath
assert fieldUrlScheme
assert httpStatus
assert isBool
assert isDict
assert isFloat
assert isInt
assert isList
assert isNone
assert isStr
def isGhRepo(node_args, key):
"""Predicate if the repository is on GitHub."""
try:
val = reduce(lambda m, k: m[k], key if isinstance(key, list) else [key], node_args)
<|code_end|>
. Use current file imports:
(from functools import reduce
from f8a_worker.utils import parse_gh_repo
from selinon.predicates import alwaysFalse
from selinon.predicates import alwaysTrue
from selinon.predicates import argsEmpty
from selinon.predicates import argsFieldBool
from selinon.predicates import argsFieldContain
from selinon.predicates import argsFieldDict
from selinon.predicates import argsFieldEqual
from selinon.predicates import argsFieldExist
from selinon.predicates import argsFieldFloat
from selinon.predicates import argsFieldGreater
from selinon.predicates import argsFieldGreaterEqual
from selinon.predicates import argsFieldInt
from selinon.predicates import argsFieldLenEqual
from selinon.predicates import argsFieldLenGreater
from selinon.predicates import argsFieldLenGreaterEqual
from selinon.predicates import argsFieldLenLess
from selinon.predicates import argsFieldLenNotEqual
from selinon.predicates import argsFieldLess
from selinon.predicates import argsFieldLessEqual
from selinon.predicates import argsFieldList
from selinon.predicates import argsFieldNone
from selinon.predicates import argsFieldNotEqual
from selinon.predicates import argsFieldStr
from selinon.predicates import argsFieldUrlNetloc
from selinon.predicates import argsFieldUrlPath
from selinon.predicates import argsFieldUrlScheme
from selinon.predicates import argsIsBool
from selinon.predicates import argsIsDict
from selinon.predicates import argsIsFloat
from selinon.predicates import argsIsInt
from selinon.predicates import argsIsList
from selinon.predicates import argsIsNone
from selinon.predicates import argsIsStr
from selinon.predicates import empty
from selinon.predicates import envEqual
from selinon.predicates import envExist
from selinon.predicates import fieldBool
from selinon.predicates import fieldContain
from selinon.predicates import fieldDict
from selinon.predicates import fieldEqual
from selinon.predicates import fieldExist
from selinon.predicates import fieldFloat
from selinon.predicates import fieldGreater
from selinon.predicates import fieldGreaterEqual
from selinon.predicates import fieldInt
from selinon.predicates import fieldLenEqual
from selinon.predicates import fieldLenGreater
from selinon.predicates import fieldLenGreaterEqual
from selinon.predicates import fieldLenLess
from selinon.predicates import fieldLenNotEqual
from selinon.predicates import fieldLess
from selinon.predicates import fieldLessEqual
from selinon.predicates import fieldList
from selinon.predicates import fieldNone
from selinon.predicates import fieldNotEqual
from selinon.predicates import fieldStr
from selinon.predicates import fieldUrlNetloc
from selinon.predicates import fieldUrlPath
from selinon.predicates import fieldUrlScheme
from selinon.predicates import httpStatus
from selinon.predicates import isBool
from selinon.predicates import isDict
from selinon.predicates import isFloat
from selinon.predicates import isInt
from selinon.predicates import isList
from selinon.predicates import isNone
from selinon.predicates import isStr)
and context including class names, function names, or small code snippets from other files:
# Path: f8a_worker/utils.py
# def parse_gh_repo(potential_url):
# """Cover the following variety of URL forms for Github repo referencing.
#
# 1) www.github.com/foo/bar
# 2) (same as above, but with ".git" in the end)
# 3) (same as the two above, but without "www.")
# # all of the three above, but starting with "http://", "https://", "git://" or "git+https://"
# 4) git@github.com:foo/bar
# 5) (same as above, but with ".git" in the end)
# 6) (same as the two above but with "ssh://" in front or with "git+ssh" instead of "git")
#
# We return repository name in form `<username>/<reponame>` or `None` if this does not
# seem to be a Github repo (or if someone invented yet another form that we can't parse yet...)
#
# Notably, the Github repo *must* have exactly username and reponame, nothing else and nothing
# more. E.g. `github.com/<username>/<reponame>/<something>` is *not* recognized.
# """
# # TODO: reduce cyclomatic complexity
# if not potential_url:
# return None
#
# repo_name = None
# # transform 4-6 to a URL-like string, so that we can handle it together with 1-3
# if '@' in potential_url:
# split = potential_url.split('@')
# if len(split) == 2 and split[1].startswith('github.com:'):
# potential_url = 'http://' + split[1].replace('github.com:', 'github.com/')
#
# # make it parsable by urlparse if it doesn't contain scheme
# if not potential_url.startswith(('http://', 'https://', 'git://', 'git+https://')):
# potential_url = 'http://' + potential_url
#
# # urlparse should handle it now
# parsed = urlparse(potential_url)
# if parsed.netloc in ['github.com', 'www.github.com'] and \
# parsed.scheme in ['http', 'https', 'git', 'git+https']:
# repo_name = parsed.path
# if repo_name.endswith('.git'):
# repo_name = repo_name[:-len('.git')]
#
# if repo_name:
# repo_name = repo_name.strip('/')
# if len(repo_name.split('/')) > 2:
# temp_list = repo_name.split('/')
# repo_name = temp_list[0] + '/' + temp_list[1]
# if repo_name.count('/') != 1:
# return None
#
# return repo_name
. Output only the next line. | if parse_gh_repo(val): |
Next line prediction: <|code_start|> if isinstance(name_email_dict.get(email_key), str):
if name_email_str:
name_email_str += ' '
name_email_str += '<' + name_email_dict[email_key] + '>'
return name_email_str
@staticmethod
def _rf(iterable):
"""Remove false/empty/None items from iterable."""
return list(filter(None, iterable))
@staticmethod
def _split_keywords(keywords, separator=None):
"""Split keywords (string) with separator.
If separator is not specified, use either colon or whitespace.
"""
if keywords is None:
return []
if isinstance(keywords, list):
return keywords
if separator is None:
separator = ',' if ',' in keywords else ' '
keywords = keywords.split(separator)
keywords = [kw.strip() for kw in keywords]
return keywords
@staticmethod
def _identify_gh_repo(homepage):
"""Return code repository dict filled with homepage."""
<|code_end|>
. Use current file imports:
(import abc
from f8a_worker.utils import parse_gh_repo)
and context including class names, function names, or small code snippets from other files:
# Path: f8a_worker/utils.py
# def parse_gh_repo(potential_url):
# """Cover the following variety of URL forms for Github repo referencing.
#
# 1) www.github.com/foo/bar
# 2) (same as above, but with ".git" in the end)
# 3) (same as the two above, but without "www.")
# # all of the three above, but starting with "http://", "https://", "git://" or "git+https://"
# 4) git@github.com:foo/bar
# 5) (same as above, but with ".git" in the end)
# 6) (same as the two above but with "ssh://" in front or with "git+ssh" instead of "git")
#
# We return repository name in form `<username>/<reponame>` or `None` if this does not
# seem to be a Github repo (or if someone invented yet another form that we can't parse yet...)
#
# Notably, the Github repo *must* have exactly username and reponame, nothing else and nothing
# more. E.g. `github.com/<username>/<reponame>/<something>` is *not* recognized.
# """
# # TODO: reduce cyclomatic complexity
# if not potential_url:
# return None
#
# repo_name = None
# # transform 4-6 to a URL-like string, so that we can handle it together with 1-3
# if '@' in potential_url:
# split = potential_url.split('@')
# if len(split) == 2 and split[1].startswith('github.com:'):
# potential_url = 'http://' + split[1].replace('github.com:', 'github.com/')
#
# # make it parsable by urlparse if it doesn't contain scheme
# if not potential_url.startswith(('http://', 'https://', 'git://', 'git+https://')):
# potential_url = 'http://' + potential_url
#
# # urlparse should handle it now
# parsed = urlparse(potential_url)
# if parsed.netloc in ['github.com', 'www.github.com'] and \
# parsed.scheme in ['http', 'https', 'git', 'git+https']:
# repo_name = parsed.path
# if repo_name.endswith('.git'):
# repo_name = repo_name[:-len('.git')]
#
# if repo_name:
# repo_name = repo_name.strip('/')
# if len(repo_name.split('/')) > 2:
# temp_list = repo_name.split('/')
# repo_name = temp_list[0] + '/' + temp_list[1]
# if repo_name.count('/') != 1:
# return None
#
# return repo_name
. Output only the next line. | if parse_gh_repo(homepage): |
Using the snippet: <|code_start|> "ecosystem": "golang",
"name": "dummy_name",
"version": "dummy_version"
}
data_v3 = {
"ecosystem": "golang",
"version": "dummy_version"
}
data_v4 = {
"name": "dummy_name",
"version": "dummy_version"
}
data_v5 = {
"ecosystem": "golang",
"name": "dummy_name",
}
class TestInitPackageFlowNew(TestCase):
"""Tests for the NewInitPackageFlow task."""
def _strict_assert(self, assert_cond):
if not assert_cond:
raise FatalTaskError("Strict assert failed.")
def test_execute(self):
"""Tests for 'execute'."""
<|code_end|>
, determine the next line of code. You have imports:
from unittest import TestCase
from selinon import FatalTaskError
from f8a_worker.workers.new_init_package_flow import NewInitPackageFlow
from unittest import mock
and context (class names, function names, or code) available:
# Path: f8a_worker/workers/new_init_package_flow.py
# class NewInitPackageFlow(BaseTask):
# """Initialize package-version-level analysis."""
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# self._strict_assert(isinstance(arguments.get('ecosystem'), str))
# self._strict_assert(isinstance(arguments.get('name'), str))
# self._strict_assert(isinstance(arguments.get('version'), str))
#
# if arguments['ecosystem'] not in _SUPPORTED_ECOSYSTEMS:
# raise FatalTaskError('Unknown ecosystem: %r' % arguments['ecosystem'])
#
# # Don't ingest for private packages
# if not is_pkg_public(arguments['ecosystem'], arguments['name']):
# logger.info("Private package ingestion ignored %s %s",
# arguments['ecosystem'], arguments['name'])
# raise NotABugFatalTaskError("Private package alert {} {}".format(
# arguments['ecosystem'], arguments['name']))
#
# return arguments
. Output only the next line. | self.assertRaises(FatalTaskError, NewInitPackageFlow.execute, self, data_v1) |
Based on the snippet: <|code_start|>"""Tests covering code in start.py."""
_SQS_MSG_LIFETIME_IN_SEC = (int(os.environ.get('SQS_MSG_LIFETIME', '24')) + 1) * 60 * 60
class TestStartFunctions():
"""Test functions from start.py."""
def test_check_hung_task(self):
"""Test _check_hung_task."""
flow_info = {'node_args': {}}
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
import os
import time
from f8a_worker.monkey_patch import _check_hung_task
and context (classes, functions, sometimes code) from other files:
# Path: f8a_worker/monkey_patch.py
# def _check_hung_task(self, flow_info):
# """Remove tasks which are rotating in dispatcher for more than given time.
#
# :param flow_info: information about the current flow
# """
# node_args = flow_info['node_args']
# flow_start_time = node_args.get('flow_start_time', 0)
# if flow_start_time > 0:
# now = time.time()
# if now - flow_start_time > _SQS_MSG_LIFETIME_IN_SEC:
# exc = NotABugFatalTaskError("Flow could not be completed in configured time limit. "
# "It is being stopped forcefully. "
# "Flow information: {}".format(flow_info))
# raise self.retry(max_retries=0, exc=exc)
# else:
# # If message is arrived for the first time, then put current time in node arguments
# # and consider it as starting time of the flow.
# node_args['flow_start_time'] = time.time()
. Output only the next line. | _check_hung_task(self, flow_info) |
Given the code snippet: <|code_start|> "ecosystem": "dummy_eco",
"name": "dummy_name",
"version": "dummy_version"
}
class Response:
"""Custom response class."""
status_code = 200
text = 'dummy_data'
class ErrorResponse:
"""Custom response class for error."""
status_code = 404
text = 'dummy_data'
class TestNewPackageAnalysisGraphImporterTask(TestCase):
"""Tests for the NewGraphImporterTask task."""
def _strict_assert(self, assert_cond):
if not assert_cond:
False
@mock.patch('f8a_worker.workers.new_graph_importer.requests.post', return_value=ErrorResponse())
def test_execute(self, _mock1):
"""Tests for 'execute'."""
<|code_end|>
, generate the next line using the imports in this file:
from unittest import TestCase, mock
from f8a_worker.workers.new_graph_importer import \
NewPackageAnalysisGraphImporterTask, \
NewPackageGraphImporterTask
and context (functions, classes, or occasionally code) from other files:
# Path: f8a_worker/workers/new_graph_importer.py
# class NewPackageAnalysisGraphImporterTask(BaseTask):
# """Ingest to graph node."""
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# self._strict_assert(arguments.get('ecosystem'))
# self._strict_assert(arguments.get('name'))
#
# package_list = [
# {
# 'ecosystem': arguments.get('ecosystem'),
# 'name': arguments['name'],
# 'version': '',
# 'source_repo': arguments.get('ecosystem')
# }
# ]
#
# param = {
# 'select_ingest': ['github_details'],
# 'package_list': package_list
# }
#
# logger.info("v2_:_Invoke graph importer at url: '%s' for %s",
# _SELECTIVE_API_URL, param)
# # Calling Data Importer API end point to ingest data into graph db.
# response = requests.post(_SELECTIVE_API_URL, json=param)
#
# if response.status_code != 200:
# raise RuntimeError("v2_:_Failed to invoke graph import at '%s' for %s" %
# (_SELECTIVE_API_URL, param))
#
# logger.info("v2_:_Graph import succeeded with response: %s", response.text)
#
# class NewPackageGraphImporterTask(BaseTask):
# """Ingest to graph node."""
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# self._strict_assert(arguments.get('ecosystem'))
# self._strict_assert(arguments.get('name'))
#
# package_list = [{
# 'ecosystem': arguments.get('ecosystem'),
# 'name': arguments['name'],
# 'version': arguments.get('version'),
# 'source_repo': arguments.get('ecosystem')
# }]
#
# param = {
# 'select_ingest': ['metadata'],
# 'package_list': package_list
# }
#
# logger.info("v2_:_Invoke graph importer at url: '%s' for %s",
# _SELECTIVE_API_URL, param)
# # Calling Data Importer API end point to ingest data into graph db.
# response = requests.post(_SELECTIVE_API_URL, json=param)
#
# if response.status_code != 200:
# raise RuntimeError("v2_:_Failed to invoke graph import at '%s' for %s" %
# (_SELECTIVE_API_URL, param))
#
# logger.info("v2_:_Graph import succeeded with response: %s", response.text)
. Output only the next line. | self.assertRaises(RuntimeError, NewPackageAnalysisGraphImporterTask.execute, self, data) |
Predict the next line after this snippet: <|code_start|>
class TestNewPackageAnalysisGraphImporterTask(TestCase):
"""Tests for the NewGraphImporterTask task."""
def _strict_assert(self, assert_cond):
if not assert_cond:
False
@mock.patch('f8a_worker.workers.new_graph_importer.requests.post', return_value=ErrorResponse())
def test_execute(self, _mock1):
"""Tests for 'execute'."""
self.assertRaises(RuntimeError, NewPackageAnalysisGraphImporterTask.execute, self, data)
@mock.patch('f8a_worker.workers.new_graph_importer.requests.post', return_value=Response())
def test_execute1(self, _mock1):
"""Tests for 'execute'."""
NewPackageAnalysisGraphImporterTask.execute(self, data)
class TestNewPackageGraphImporterTask(TestCase):
"""Tests for the NewGraphImporterTask task."""
def _strict_assert(self, assert_cond):
if not assert_cond:
False
@mock.patch('f8a_worker.workers.new_graph_importer.requests.post', return_value=ErrorResponse())
def test_execute(self, _mock1):
"""Tests for 'execute'."""
<|code_end|>
using the current file's imports:
from unittest import TestCase, mock
from f8a_worker.workers.new_graph_importer import \
NewPackageAnalysisGraphImporterTask, \
NewPackageGraphImporterTask
and any relevant context from other files:
# Path: f8a_worker/workers/new_graph_importer.py
# class NewPackageAnalysisGraphImporterTask(BaseTask):
# """Ingest to graph node."""
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# self._strict_assert(arguments.get('ecosystem'))
# self._strict_assert(arguments.get('name'))
#
# package_list = [
# {
# 'ecosystem': arguments.get('ecosystem'),
# 'name': arguments['name'],
# 'version': '',
# 'source_repo': arguments.get('ecosystem')
# }
# ]
#
# param = {
# 'select_ingest': ['github_details'],
# 'package_list': package_list
# }
#
# logger.info("v2_:_Invoke graph importer at url: '%s' for %s",
# _SELECTIVE_API_URL, param)
# # Calling Data Importer API end point to ingest data into graph db.
# response = requests.post(_SELECTIVE_API_URL, json=param)
#
# if response.status_code != 200:
# raise RuntimeError("v2_:_Failed to invoke graph import at '%s' for %s" %
# (_SELECTIVE_API_URL, param))
#
# logger.info("v2_:_Graph import succeeded with response: %s", response.text)
#
# class NewPackageGraphImporterTask(BaseTask):
# """Ingest to graph node."""
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# self._strict_assert(arguments.get('ecosystem'))
# self._strict_assert(arguments.get('name'))
#
# package_list = [{
# 'ecosystem': arguments.get('ecosystem'),
# 'name': arguments['name'],
# 'version': arguments.get('version'),
# 'source_repo': arguments.get('ecosystem')
# }]
#
# param = {
# 'select_ingest': ['metadata'],
# 'package_list': package_list
# }
#
# logger.info("v2_:_Invoke graph importer at url: '%s' for %s",
# _SELECTIVE_API_URL, param)
# # Calling Data Importer API end point to ingest data into graph db.
# response = requests.post(_SELECTIVE_API_URL, json=param)
#
# if response.status_code != 200:
# raise RuntimeError("v2_:_Failed to invoke graph import at '%s' for %s" %
# (_SELECTIVE_API_URL, param))
#
# logger.info("v2_:_Graph import succeeded with response: %s", response.text)
. Output only the next line. | self.assertRaises(RuntimeError, NewPackageGraphImporterTask.execute, self, data) |
Given the code snippet: <|code_start|>"""Ingest to graph task."""
logger = logging.getLogger(__name__)
_SERVICE_HOST = environ.get("BAYESIAN_DATA_IMPORTER_SERVICE_HOST", "bayesian-data-importer")
_SERVICE_PORT = environ.get("BAYESIAN_DATA_IMPORTER_SERVICE_PORT", "9192")
_SELECTIVE_SERVICE_ENDPOINT = "api/v1/selective_ingest"
_SELECTIVE_API_URL = "http://{host}:{port}/{endpoint}".format(
host=_SERVICE_HOST,
port=_SERVICE_PORT,
endpoint=_SELECTIVE_SERVICE_ENDPOINT)
<|code_end|>
, generate the next line using the imports in this file:
from f8a_worker.base import BaseTask
from os import environ
import requests
import logging
and context (functions, classes, or occasionally code) from other files:
# Path: f8a_worker/base.py
# class BaseTask(SelinonTask):
# """Base class for selinon tasks."""
#
# description = 'Root of the Task object hierarchy'
# schema_ref = _schema = None
# # set this to False if your task shouldn't get the `_audit` value added to result dict
# add_audit_info = True
#
# def __init__(self, *args, **kwargs):
# """Initialize object."""
# super().__init__(*args, **kwargs)
# self.log = get_task_logger(self.__class__.__name__)
# self.configuration = configuration
#
# @classmethod
# def _strict_assert(cls, assert_cond):
# """Assert on condition.
#
# If condition is False, fatal error is raised so task is not retried.
# """
# if not assert_cond:
# raise FatalTaskError("Strict assert failed in task '%s'" % cls.__name__)
#
# @staticmethod
# def _add_audit_info(task_result: dict,
# task_start: datetime,
# task_end: datetime,
# node_args):
# """Add the audit and release information to the result dictionary.
#
# :param task_result: dict, task result
# :param task_start: datetime, the start of the task
# :param task_end: datetime, the end of the task
# :param node_args: arguments passed to flow/node
# """
# task_result['_audit'] = {
# 'started_at': json_serial(task_start),
# 'ended_at': json_serial(task_end),
# 'version': 'v1'
# }
#
# ecosystem_name = node_args.get('ecosystem')
# task_result['_release'] = '{}:{}:{}'.format(ecosystem_name,
# node_args.get('name'),
# node_args.get('version'))
#
# def run(self, node_args):
# """To be transparently called by Selinon.
#
# Selinon transparently calls run(), which takes care of task audit and
# some additional checks and calls execute().
# """
# # SQS guarantees 'deliver at least once', so there could be multiple
# # messages of a type, give up immediately
# if self.storage and isinstance(self.storage, (BayesianPostgres, PackagePostgres)):
# if self.storage.get_worker_id_count(self.task_id) > 0:
# raise TaskAlreadyExistsError("Task with ID '%s'"
# " was already processed" % self.task_id)
#
# start = datetime.utcnow()
# try:
# result = self.execute(node_args)
#
# except Exception as exc:
# if self.add_audit_info:
# # `_audit` key is added to every analysis info submitted
# end = datetime.utcnow()
# result = dict()
#
# self._add_audit_info(
# task_result=result,
# task_start=start,
# task_end=end,
# node_args=node_args,
# )
#
# # write the audit info to the storage
# self.storage.store_error(
# node_args=node_args,
# flow_name=self.flow_name,
# task_name=self.task_name,
# task_id=self.task_id,
# exc_info=sys.exc_info(),
# result=result
# )
#
# raise exc
#
# finally:
# # remove all files that were downloaded for this task
# ObjectCache.wipe()
#
# end = datetime.utcnow()
#
# if result:
# # Ensure result complies with the defined schema (if any) before saving
# self.validate_result(result)
#
# if result is None:
# # Keep track of None results and add _audit and _release keys
# result = {}
#
# if self.add_audit_info:
# # `_audit` key is added to every analysis info submitted
# self._add_audit_info(
# task_result=result,
# task_start=start,
# task_end=end,
# node_args=node_args,
# )
#
# return result
#
# @classmethod
# def create_test_instance(cls, flow_name=None, task_name=None, parent=None, task_id=None,
# dispatcher_id=None):
# """Create instance of task for tests."""
# # used in tests so we do not do ugly things like this, this correctly done by dispatcher
# return cls(flow_name, task_name or cls.__name__, parent, task_id, dispatcher_id)
#
# def validate_result(self, result):
# """Ensure that results comply with the task schema, if defined.
#
# Tasks define a schema by setting schema_ref appropriately.
# Schemas are retrieved from workers/schemas/generated via pkgutil.
# """
# # Skip validation if no schema is defined
# schema_ref = self.schema_ref
# if schema_ref is None:
# return
# # Load schema if not yet loaded
# schema = self._schema
# if schema is None:
# schema = self._schema = load_worker_schema(schema_ref)
# # Validate result against schema
# try:
# jsonschema.validate(result, schema)
# except jsonschema.exceptions.ValidationError as e:
# raise FatalTaskError('Schema validation failed: {e}'.format(e=str(e)))
# # Record the validated schema details
# set_schema_ref(result, schema_ref)
#
# def execute(self, _arguments):
# """Return dictionary with results - must be implemented by any subclass."""
# raise NotImplementedError("Task not implemented")
. Output only the next line. | class NewPackageGraphImporterTask(BaseTask): |
Here is a snippet: <|code_start|> configuration.select_random_github_token.return_value = ['a', 'b']
GITHUB_API_URL = 'https://api.github.com/repos/'
GITHUB_URL = 'https://github.com/'
GITHUB_TOKEN = ''
get_response_issues = mock.Mock()
get_response_issues.return_value = {
"url": "https://api.github.com/repos/kubeup/archon/issues/4",
"repository_url": "https://api.github.com/repos/kubeup/archon",
"comments_url": "https://api.github.com/repos/kubeup/archon/issues/4/comments",
"number": 4,
"title": "how to generate types.generated.go",
"created_at": "2017-03-27T12:52:28Z",
"updated_at": "2017-03-27T14:03:20Z",
"body": "could u support a script for generate *.generated.go in this project?",
}
_processJSonIssuePR = mock.Mock()
_processJSonIssuePR.return_value = {
"githublink": "https://github.com/kubeup/archon",
"issue": "how to generate types.generated.go\ncould u "
"support a script for generate *.generated.go in this "
"project?",
"number": 4,
"package": "kubeup/archon"
}
log = mock.Mock()
def test_execute_noarg(self):
"""Tests for the Golang CVE ingestion worker with no argument."""
<|code_end|>
. Write the next line using the current file imports:
import pytest
from f8a_worker.workers import GitIssuesPRsTask as gocve
from unittest import mock
from selinon import FatalTaskError
and context from other files:
# Path: f8a_worker/workers/golangcvepredictor.py
# class GitIssuesPRsTask(BaseTask):
# """Computes various Issues and PRS for Golang Packages/repositories."""
#
# _analysis_name = 'GitIssuesPRsTask'
# GITHUB_API_URL = 'https://api.github.com/repos/'
# GITHUB_URL = 'https://github.com/'
# GITHUB_TOKEN = ''
#
# def get_response_issues(self, url, headers=None, sleep_time=2, retry_count=10):
# """Wrap requests which tries to get response.
#
# :param url: URL where to do the request
# :param headers: additional headers for request
# :param sleep_time: sleep time between retries
# :param retry_count: number of retries
# :return: content of response's json
# """
# try:
# for _ in range(retry_count):
# response = requests.get(url, headers=headers,
# params={'access_token': self.GITHUB_TOKEN})
# response.raise_for_status()
# if response.status_code == 204:
# # json() below would otherwise fail with JSONDecodeError
# raise HTTPError('No content')
# response = response.json()
# if response:
# return response
# time.sleep(sleep_time)
# else:
# raise NotABugTaskError("Number of retries exceeded")
# except HTTPError as err:
# message = "Failed to get results from {url} with {err}".format(url=url, err=err)
# raise NotABugTaskError(message) from err
#
# def _processJSonIssuePR(self, result, repository, event, package, URL):
#
# comments = ""
# finaldata = {}
# finaldata['source'] = 'github'
# finaldata['package'] = package
# finaldata['link'] = URL
# finaldata['type'] = event
#
# # Fetching Comments section
# comments_json = self.get_response_issues(result['comments_url'])
# for entry in comments_json:
# comments = comments + '\n' + entry['body']
#
# description = result['title'] + '\n' + result['body'] + comments
# finaldata['content'] = description
# return finaldata
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
#
# """
# result_data = {'status': 'unknown',
# 'package': '',
# 'summary': [],
# 'details': {}}
# # self._strict_assert(arguments.get('package'))
# # self._strict_assert(arguments.get('repository'))
# # self._strict_assert(arguments.get('event'))
# # self._strict_assert(arguments.get('number'))
# event = ''
# package = arguments.get('package')
# repository = arguments.get('repository')
# if arguments.get('event'):
# event = arguments.get('event').split('-')[0]
# isprnumber = arguments.get('id')
#
# # For testing purposes
# if package is None:
# return result_data
#
# try:
# token, header = self.configuration.select_random_github_token()
# self.GITHUB_TOKEN = token
# except F8AConfigurationException as e:
# self.log.error(e)
# raise FatalTaskError from e
# except Exception as e:
# self.log.error(e)
# raise FatalTaskError from e
#
# # Generating Request URL to fetch Data
# url_path = repository + '/' + event + 's/' + isprnumber
# url_template = urllib.parse.urljoin(self.GITHUB_API_URL, url_path)
#
# # Call the GitHub APIs to get the data
# try:
# result = self.get_response_issues(url_template.format())
# except NotABugTaskError as e:
# self.log.error(e)
# raise NotABugFatalTaskError from e
#
# # Process the received data
# result_data['status'] = 'success'
# result_data['package'] = package
# finaldata = self._processJSonIssuePR(result, repository, event,
# package, url_template.format())
# result_data['details'] = finaldata
# return result_data
, which may include functions, classes, or code. Output only the next line. | results = gocve.execute(self, arguments={}) |
Given the following code snippet before the placeholder: <|code_start|> "from": "normalize-registry-metadata@^1.1.2",
"resolved": "https://registry.npmjs.org/normalize-registry-metadata-1.1.2.tgz",
"dependencies": {
"semver": {
"version": "5.5.1",
"from": "semver@5.5.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.5.1.tgz"
}
}
},
"revalidator": {
"version": "0.3.1",
"from": "revalidator@^0.3.1",
"resolved": "https://registry.npmjs.org/revalidator/-/revalidator-0.3.1.tgz"
},
"semver": {
"version": "5.5.1",
"from": "semver@5.5.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.5.1.tgz"
}
}
}
def test_create_repo_and_generate_files():
"""Test create_repo_and_generate_files function."""
giturl = "https://github.com/heroku/node-js-sample"
access = {
"access_token": "blahblah"
}
<|code_end|>
, predict the next line using imports from the current file:
from f8a_worker.workers import git_operations as go
and context including class names, function names, and sometimes code from other files:
# Path: f8a_worker/workers/git_operations.py
# F8_API_BACKBONE_HOST = os.getenv('F8_API_BACKBONE_HOST', 'http://f8a-server-backbone:5000')
# GEMINI_SERVER_URL = os.getenv('F8A_GEMINI_SERVER_SERVICE_HOST', 'http://f8a-gemini-server:5000')
# AUTH_KEY = os.getenv('OS_AUTH_KEY', '')
# class GitOperationTask(BaseTask):
# def generate_files_for_maven(path, manifests):
# def generate_files_for_node(path, manifests):
# def create_repo_and_generate_files(self,
# giturl,
# ecosystem,
# gh_token):
# def gemini_call_for_cve_scan(self, scan_repo_url, deps, auth_key):
# def backbone_for_stack_analysis(self, deps, request_id, is_modified_flag, check_license):
# def execute(self, arguments):
. Output only the next line. | instance = go.GitOperationTask.create_test_instance() |
Predict the next line after this snippet: <|code_start|>"""Functions for dispatcher."""
logger = logging.getLogger(__name__)
def _create_analysis_arguments(ecosystem, name, version):
"""Create arguments for analysis."""
return {
'ecosystem': ecosystem,
<|code_end|>
using the current file's imports:
import logging
from urllib.parse import urlparse
from selinon import StoragePool
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Ecosystem
from f8a_worker.utils import MavenCoordinates
and any relevant context from other files:
# Path: f8a_worker/models.py
# class Ecosystem(Base):
# """Table for Ecosystem."""
#
# __tablename__ = 'ecosystems'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), unique=True)
# url = Column(String(255))
# fetch_url = Column(String(255))
# _backend = Column(
# Enum(*[b.name for b in EcosystemBackend], name='ecosystem_backend_enum'))
#
# packages = relationship('Package', back_populates='ecosystem')
# feedback = relationship('RecommendationFeedback',
# back_populates='ecosystem')
#
# @property
# def backend(self):
# """Get backend property."""
# return EcosystemBackend[self._backend]
#
# @backend.setter
# def backend(self, backend):
# """Set backend property."""
# self._backend = EcosystemBackend(backend).name
#
# def is_backed_by(self, backend):
# """Is this ecosystem backed by specified backend?."""
# return self.backend == backend
#
# @classmethod
# def by_name(cls, session, name):
# """Get a row with specified name."""
# try:
# return cls._by_attrs(session, name=name)
# except NoResultFound:
# # What to do here ?
# raise
#
# Path: f8a_worker/utils.py
# class MavenCoordinates(object):
# """Represents Maven coordinates.
#
# https://maven.apache.org/pom.html#Maven_Coordinates
# """
#
# _default_packaging = 'jar'
#
# def __init__(self, groupId, artifactId, version='',
# classifier='', packaging=None):
# """Initialize attributes."""
# self.groupId = groupId
# self.artifactId = artifactId
# self.classifier = classifier
# self.packaging = packaging or MavenCoordinates._default_packaging
# self.version = version
#
# def is_valid(self):
# """Check if the current coordinates are valid."""
# return self.groupId and self.artifactId and self.version and self.packaging
#
# def to_str(self, omit_version=False):
# """Return string representation of the coordinates."""
# mvnstr = "{g}:{a}".format(g=self.groupId, a=self.artifactId)
# pack = self.packaging
# if pack == MavenCoordinates._default_packaging:
# pack = ''
# if pack:
# mvnstr += ":{p}".format(p=pack)
# if self.classifier:
# if not pack:
# mvnstr += ':'
# mvnstr += ":{c}".format(c=self.classifier)
# if not self.version or omit_version:
# if self.classifier or pack:
# mvnstr += ':'
# else:
# mvnstr += ":{v}".format(v=self.version)
#
# return mvnstr
#
# def to_repo_url(self, ga_only=False):
# """Return relative path to the artifact in Maven repository."""
# if ga_only:
# return "{g}/{a}".format(g=self.groupId.replace('.', '/'),
# a=self.artifactId)
#
# dir_path = "{g}/{a}/{v}/".format(g=self.groupId.replace('.', '/'),
# a=self.artifactId,
# v=self.version)
# classifier = "-{c}".format(c=self.classifier) if self.classifier else ''
# filename = "{a}-{v}{c}.{e}".format(a=self.artifactId,
# v=self.version,
# c=classifier,
# e=self.packaging)
# return dir_path + filename
#
# @staticmethod
# def _parse_string(coordinates_str):
# """Parse string representation into a dictionary."""
# a = {'groupId': '',
# 'artifactId': '',
# 'packaging': MavenCoordinates._default_packaging,
# 'classifier': '',
# 'version': ''}
#
# ncolons = coordinates_str.count(':')
# if ncolons == 1:
# a['groupId'], a['artifactId'] = coordinates_str.split(':')
# elif ncolons == 2:
# a['groupId'], a['artifactId'], a['version'] = coordinates_str.split(':')
# elif ncolons == 3:
# a['groupId'], a['artifactId'], a['packaging'], a['version'] = coordinates_str.split(':')
# elif ncolons == 4:
# a['groupId'], a['artifactId'], a['packaging'], a['classifier'], a['version'] = \
# coordinates_str.split(':')
# else:
# raise ValueError('Invalid Maven coordinates %s', coordinates_str)
#
# return a
#
# def __repr__(self):
# """Represent as string."""
# return self.to_str()
#
# def __eq__(self, other):
# """Implement == operator."""
# return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
#
# def __ne__(self, other):
# """Implement != operator."""
# return not self.__eq__(other)
#
# @classmethod
# def normalize_str(cls, coordinates_str):
# """Normalize string representation."""
# return cls.from_str(coordinates_str).to_str()
#
# @classmethod
# def from_str(cls, coordinates_str):
# """Create instance from string."""
# coordinates = MavenCoordinates._parse_string(coordinates_str)
# return cls(**coordinates)
. Output only the next line. | 'name': MavenCoordinates.normalize_str(name) if Ecosystem.by_name( |
Next line prediction: <|code_start|>"""Functions for dispatcher."""
logger = logging.getLogger(__name__)
def _create_analysis_arguments(ecosystem, name, version):
"""Create arguments for analysis."""
return {
'ecosystem': ecosystem,
<|code_end|>
. Use current file imports:
(import logging
from urllib.parse import urlparse
from selinon import StoragePool
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Ecosystem
from f8a_worker.utils import MavenCoordinates)
and context including class names, function names, or small code snippets from other files:
# Path: f8a_worker/models.py
# class Ecosystem(Base):
# """Table for Ecosystem."""
#
# __tablename__ = 'ecosystems'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), unique=True)
# url = Column(String(255))
# fetch_url = Column(String(255))
# _backend = Column(
# Enum(*[b.name for b in EcosystemBackend], name='ecosystem_backend_enum'))
#
# packages = relationship('Package', back_populates='ecosystem')
# feedback = relationship('RecommendationFeedback',
# back_populates='ecosystem')
#
# @property
# def backend(self):
# """Get backend property."""
# return EcosystemBackend[self._backend]
#
# @backend.setter
# def backend(self, backend):
# """Set backend property."""
# self._backend = EcosystemBackend(backend).name
#
# def is_backed_by(self, backend):
# """Is this ecosystem backed by specified backend?."""
# return self.backend == backend
#
# @classmethod
# def by_name(cls, session, name):
# """Get a row with specified name."""
# try:
# return cls._by_attrs(session, name=name)
# except NoResultFound:
# # What to do here ?
# raise
#
# Path: f8a_worker/utils.py
# class MavenCoordinates(object):
# """Represents Maven coordinates.
#
# https://maven.apache.org/pom.html#Maven_Coordinates
# """
#
# _default_packaging = 'jar'
#
# def __init__(self, groupId, artifactId, version='',
# classifier='', packaging=None):
# """Initialize attributes."""
# self.groupId = groupId
# self.artifactId = artifactId
# self.classifier = classifier
# self.packaging = packaging or MavenCoordinates._default_packaging
# self.version = version
#
# def is_valid(self):
# """Check if the current coordinates are valid."""
# return self.groupId and self.artifactId and self.version and self.packaging
#
# def to_str(self, omit_version=False):
# """Return string representation of the coordinates."""
# mvnstr = "{g}:{a}".format(g=self.groupId, a=self.artifactId)
# pack = self.packaging
# if pack == MavenCoordinates._default_packaging:
# pack = ''
# if pack:
# mvnstr += ":{p}".format(p=pack)
# if self.classifier:
# if not pack:
# mvnstr += ':'
# mvnstr += ":{c}".format(c=self.classifier)
# if not self.version or omit_version:
# if self.classifier or pack:
# mvnstr += ':'
# else:
# mvnstr += ":{v}".format(v=self.version)
#
# return mvnstr
#
# def to_repo_url(self, ga_only=False):
# """Return relative path to the artifact in Maven repository."""
# if ga_only:
# return "{g}/{a}".format(g=self.groupId.replace('.', '/'),
# a=self.artifactId)
#
# dir_path = "{g}/{a}/{v}/".format(g=self.groupId.replace('.', '/'),
# a=self.artifactId,
# v=self.version)
# classifier = "-{c}".format(c=self.classifier) if self.classifier else ''
# filename = "{a}-{v}{c}.{e}".format(a=self.artifactId,
# v=self.version,
# c=classifier,
# e=self.packaging)
# return dir_path + filename
#
# @staticmethod
# def _parse_string(coordinates_str):
# """Parse string representation into a dictionary."""
# a = {'groupId': '',
# 'artifactId': '',
# 'packaging': MavenCoordinates._default_packaging,
# 'classifier': '',
# 'version': ''}
#
# ncolons = coordinates_str.count(':')
# if ncolons == 1:
# a['groupId'], a['artifactId'] = coordinates_str.split(':')
# elif ncolons == 2:
# a['groupId'], a['artifactId'], a['version'] = coordinates_str.split(':')
# elif ncolons == 3:
# a['groupId'], a['artifactId'], a['packaging'], a['version'] = coordinates_str.split(':')
# elif ncolons == 4:
# a['groupId'], a['artifactId'], a['packaging'], a['classifier'], a['version'] = \
# coordinates_str.split(':')
# else:
# raise ValueError('Invalid Maven coordinates %s', coordinates_str)
#
# return a
#
# def __repr__(self):
# """Represent as string."""
# return self.to_str()
#
# def __eq__(self, other):
# """Implement == operator."""
# return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
#
# def __ne__(self, other):
# """Implement != operator."""
# return not self.__eq__(other)
#
# @classmethod
# def normalize_str(cls, coordinates_str):
# """Normalize string representation."""
# return cls.from_str(coordinates_str).to_str()
#
# @classmethod
# def from_str(cls, coordinates_str):
# """Create instance from string."""
# coordinates = MavenCoordinates._parse_string(coordinates_str)
# return cls(**coordinates)
. Output only the next line. | 'name': MavenCoordinates.normalize_str(name) if Ecosystem.by_name( |
Using the snippet: <|code_start|>"""Computes various Issues and PRS for Golang Packages repositories.
output: cve format containing PR,Issues for the a golang package/repositories
sample output:
{'status': 'success','package': '','summary': [],'details': {}}
"""
<|code_end|>
, determine the next line of code. You have imports:
from f8a_worker.base import BaseTask
from f8a_worker.errors import F8AConfigurationException, NotABugTaskError, NotABugFatalTaskError
from selinon import FatalTaskError
from requests import HTTPError
import requests
import urllib
import time
and context (class names, function names, or code) available:
# Path: f8a_worker/base.py
# class BaseTask(SelinonTask):
# """Base class for selinon tasks."""
#
# description = 'Root of the Task object hierarchy'
# schema_ref = _schema = None
# # set this to False if your task shouldn't get the `_audit` value added to result dict
# add_audit_info = True
#
# def __init__(self, *args, **kwargs):
# """Initialize object."""
# super().__init__(*args, **kwargs)
# self.log = get_task_logger(self.__class__.__name__)
# self.configuration = configuration
#
# @classmethod
# def _strict_assert(cls, assert_cond):
# """Assert on condition.
#
# If condition is False, fatal error is raised so task is not retried.
# """
# if not assert_cond:
# raise FatalTaskError("Strict assert failed in task '%s'" % cls.__name__)
#
# @staticmethod
# def _add_audit_info(task_result: dict,
# task_start: datetime,
# task_end: datetime,
# node_args):
# """Add the audit and release information to the result dictionary.
#
# :param task_result: dict, task result
# :param task_start: datetime, the start of the task
# :param task_end: datetime, the end of the task
# :param node_args: arguments passed to flow/node
# """
# task_result['_audit'] = {
# 'started_at': json_serial(task_start),
# 'ended_at': json_serial(task_end),
# 'version': 'v1'
# }
#
# ecosystem_name = node_args.get('ecosystem')
# task_result['_release'] = '{}:{}:{}'.format(ecosystem_name,
# node_args.get('name'),
# node_args.get('version'))
#
# def run(self, node_args):
# """To be transparently called by Selinon.
#
# Selinon transparently calls run(), which takes care of task audit and
# some additional checks and calls execute().
# """
# # SQS guarantees 'deliver at least once', so there could be multiple
# # messages of a type, give up immediately
# if self.storage and isinstance(self.storage, (BayesianPostgres, PackagePostgres)):
# if self.storage.get_worker_id_count(self.task_id) > 0:
# raise TaskAlreadyExistsError("Task with ID '%s'"
# " was already processed" % self.task_id)
#
# start = datetime.utcnow()
# try:
# result = self.execute(node_args)
#
# except Exception as exc:
# if self.add_audit_info:
# # `_audit` key is added to every analysis info submitted
# end = datetime.utcnow()
# result = dict()
#
# self._add_audit_info(
# task_result=result,
# task_start=start,
# task_end=end,
# node_args=node_args,
# )
#
# # write the audit info to the storage
# self.storage.store_error(
# node_args=node_args,
# flow_name=self.flow_name,
# task_name=self.task_name,
# task_id=self.task_id,
# exc_info=sys.exc_info(),
# result=result
# )
#
# raise exc
#
# finally:
# # remove all files that were downloaded for this task
# ObjectCache.wipe()
#
# end = datetime.utcnow()
#
# if result:
# # Ensure result complies with the defined schema (if any) before saving
# self.validate_result(result)
#
# if result is None:
# # Keep track of None results and add _audit and _release keys
# result = {}
#
# if self.add_audit_info:
# # `_audit` key is added to every analysis info submitted
# self._add_audit_info(
# task_result=result,
# task_start=start,
# task_end=end,
# node_args=node_args,
# )
#
# return result
#
# @classmethod
# def create_test_instance(cls, flow_name=None, task_name=None, parent=None, task_id=None,
# dispatcher_id=None):
# """Create instance of task for tests."""
# # used in tests so we do not do ugly things like this, this correctly done by dispatcher
# return cls(flow_name, task_name or cls.__name__, parent, task_id, dispatcher_id)
#
# def validate_result(self, result):
# """Ensure that results comply with the task schema, if defined.
#
# Tasks define a schema by setting schema_ref appropriately.
# Schemas are retrieved from workers/schemas/generated via pkgutil.
# """
# # Skip validation if no schema is defined
# schema_ref = self.schema_ref
# if schema_ref is None:
# return
# # Load schema if not yet loaded
# schema = self._schema
# if schema is None:
# schema = self._schema = load_worker_schema(schema_ref)
# # Validate result against schema
# try:
# jsonschema.validate(result, schema)
# except jsonschema.exceptions.ValidationError as e:
# raise FatalTaskError('Schema validation failed: {e}'.format(e=str(e)))
# # Record the validated schema details
# set_schema_ref(result, schema_ref)
#
# def execute(self, _arguments):
# """Return dictionary with results - must be implemented by any subclass."""
# raise NotImplementedError("Task not implemented")
. Output only the next line. | class GitIssuesPRsTask(BaseTask): |
Next line prediction: <|code_start|>"""SQLAlchemy domain models."""
def create_db_scoped_session(connection_string=None):
"""Create scoped session."""
# we use NullPool, so that SQLAlchemy doesn't pool local connections
# and only really uses connections while writing results
return scoped_session(
sessionmaker(bind=create_engine(
<|code_end|>
. Use current file imports:
(from sqlalchemy import (Column, DateTime, Enum, ForeignKey, Integer, String, UniqueConstraint,
create_engine, Boolean, Text)
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, scoped_session, sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.session import Session
from sqlalchemy.pool import NullPool
from f8a_worker.defaults import configuration
from f8a_worker.enums import EcosystemBackend)
and context including class names, function names, or small code snippets from other files:
# Path: f8a_worker/defaults.py
# class F8AConfiguration(object):
# def _make_postgres_string(password):
# def is_local_deployment(cls):
# def _rate_limit_exceeded(cls, headers):
# def _decide_token_usage(cls):
# def select_random_github_token(cls):
# def libraries_io_project_url(cls, ecosystem, name):
# def dependency_check_script_path(self):
# BIGQUERY_JSON_KEY = environ.get('GITHUB_CONSUMER_KEY', 'not-set')
# BROKER_CONNECTION = "amqp://guest@{host}:{port}".format(
# host=environ.get('RABBITMQ_SERVICE_SERVICE_HOST', 'coreapi-broker'),
# port=environ.get('RABBITMQ_SERVICE_SERVICE_PORT', '5672'))
# GIT_USER_NAME = environ.get('GIT_USER_NAME', 'f8a')
# GIT_USER_EMAIL = environ.get('GIT_USER_EMAIL', 'f8a@f8a')
# GITHUB_TOKEN = environ.get('GITHUB_TOKEN', 'not-set').split(',')
# GITHUB_API = "https://api.github.com/"
# LIBRARIES_IO_TOKEN = environ.get('LIBRARIES_IO_TOKEN', 'not-set')
# LIBRARIES_IO_API = 'https://libraries.io/api'
# NPMJS_CHANGES_URL = environ.get('NPMJS_CHANGES_URL',
# "https://skimdb.npmjs.com/registry/"
# "_changes?descending=true&include_docs=true&feed=continuous")
# UNQUOTED_POSTGRES_CONNECTION = _make_postgres_string(environ.get('POSTGRESQL_PASSWORD', ''))
# POSTGRES_CONNECTION = _make_postgres_string(
# quote(environ.get('POSTGRESQL_PASSWORD', ''), safe=''))
# WORKER_DATA_DIR = environ.get('WORKER_DATA_DIR', 'not-set')
# NPM_DATA_DIR = path.join(environ.get('HOME', '.npm'))
# SCANCODE_LICENSE_SCORE = environ.get('SCANCODE_LICENSE_SCORE', '20') # scancode's default is 0
# SCANCODE_TIMEOUT = environ.get('SCANCODE_TIMEOUT', '120') # scancode's default is 120
# SCANCODE_PROCESSES = environ.get('SCANCODE_PROCESSES', '1') # scancode's default is 1
# SCANCODE_PATH = environ.get('SCANCODE_PATH', '/opt/scancode-toolkit/')
# SCANCODE_IGNORE = ['*.pyc', '*.so', '*.dll', '*.rar', '*.jar',
# '*.zip', '*.tar', '*.tar.gz', '*.tar.xz', '*.png'] # don't scan binaries
# AWS_S3_REGION = environ.get('AWS_S3_REGION')
# AWS_S3_ACCESS_KEY_ID = environ.get('AWS_S3_ACCESS_KEY_ID')
# AWS_S3_SECRET_ACCESS_KEY = environ.get('AWS_S3_SECRET_ACCESS_KEY')
# S3_ENDPOINT_URL = environ.get('S3_ENDPOINT_URL')
# DEPLOYMENT_PREFIX = environ.get('DEPLOYMENT_PREFIX')
# BAYESIAN_SYNC_S3 = int(environ.get('BAYESIAN_SYNC_S3', 0)) == 1
# AWS_SQS_ACCESS_KEY_ID = environ.get('AWS_SQS_ACCESS_KEY_ID')
# AWS_SQS_SECRET_ACCESS_KEY = environ.get('AWS_SQS_SECRET_ACCESS_KEY')
# CELERY_RESULT_BACKEND = environ.get('CELERY_RESULT_BACKEND')
# AWS_SQS_REGION = environ.get('AWS_SQS_REGION')
# JAVANCSS_PATH = environ.get('JAVANCSS_PATH')
# OWASP_DEP_CHECK_PATH = environ.get('OWASP_DEP_CHECK_PATH')
# USAGE_THRESHOLD = int(environ.get("LOW_USAGE_THRESHOLD", "5000"))
# USAGE_THRESHOLD = 5000
# POPULARITY_THRESHOLD = int(environ.get("LOW_POPULARITY_THRESHOLD", "5000"))
# POPULARITY_THRESHOLD = 5000
# BAYESIAN_GREMLIN_HTTP_SERVICE_HOST = environ.get("BAYESIAN_GREMLIN_HTTP_SERVICE_HOST",
# "localhost")
# BAYESIAN_GREMLIN_HTTP_SERVICE_PORT = environ.get("BAYESIAN_GREMLIN_HTTP_SERVICE_PORT", "8182")
. Output only the next line. | connection_string or configuration.POSTGRES_CONNECTION, |
Given snippet: <|code_start|> return None
class S3():
"""Dummy Class."""
def store_data(self, arguments, result):
"""Test Function."""
pass
class TestNewMetaDataTask(TestCase):
"""Tests for the NewInitPackageFlow task."""
def store_data_to_s3(self, arguments, s3, result):
"""Test Function."""
if not arguments:
raise
if not s3:
raise
if not result:
raise
pass
@mock.patch('f8a_worker.workers.new_metadata.StoragePool.get_connected_storage',
return_value='')
@mock.patch('f8a_worker.workers.new_metadata.GolangUtils',
return_value=GolangUtils(data_v1['name']))
def test_execute(self, _mock1, _mock2):
"""Tests for 'execute'."""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from unittest import TestCase, mock
from f8a_worker.workers.new_metadata import NewMetaDataTask
and context:
# Path: f8a_worker/workers/new_metadata.py
# class NewMetaDataTask(BaseTask):
# """Initialize package-version-level analysis for metadata."""
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# result_data = {'status': 'success',
# 'details': []}
#
# metadata_dict = {
# 'description': '',
# 'name': arguments.get('name'),
# 'version': arguments.get('version'),
# 'ecosystem': arguments.get('ecosystem')
# }
#
# result_data['details'].append(metadata_dict)
#
# # Store base file required by Data importer
# store_data_to_s3(arguments,
# StoragePool.get_connected_storage('S3InItData'),
# result_data)
#
# # Get the license for package
# golang_util = GolangUtils(arguments.get('name'))
# license = golang_util.get_license()
#
# if license is not None:
# metadata_dict['declared_licenses'] = license
# else:
# metadata_dict['declared_licenses'] = []
#
# # Store metadata file for being used in Data-Importer
# store_data_to_s3(arguments,
# StoragePool.get_connected_storage('S3MetaData'),
# result_data)
#
# return arguments
which might include code, classes, or functions. Output only the next line. | result = NewMetaDataTask.execute(self, data_v1) |
Given the code snippet: <|code_start|>"""Tests for Java data normalizers."""
@pytest.mark.parametrize('data,keymap,expected', [
# pick one key which IS there
({'author': 'me', 'version': '0.1.2'}, (('author',),), {'author': 'me'}),
# pick one key which IS NOT there
({'author-name': 'me', 'version': '0.1.2'}, (('author',),),
{'author': None}),
# pick & and rename one key which IS there
({'author-name': 'me'}, (('author-name', 'author',),),
{'author': 'me'}),
# pick & and rename one key which IS NOT there
({'authors': 'they'}, (('author-name', 'author',),),
{'author': None}),
# pick one of keys
({'license': 'MIT'}, ((('license', 'licenses',), ),),
{'license': 'MIT'}),
# pick one of keys
({'licenses': ['MIT', 'BSD']}, ((('license', 'licenses',),),),
{'licenses': ['MIT', 'BSD']}),
# pick one of keys and rename it
({'license': 'MIT'}, ((('license', 'licenses',), 'declared_licenses'),),
{'declared_licenses': 'MIT'}),
])
def test_constructor(data, keymap, expected):
"""Test PythonDataNormalizer constructor."""
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from f8a_worker.data_normalizer import PythonDataNormalizer
and context (functions, classes, or occasionally code) from other files:
# Path: f8a_worker/data_normalizer/python.py
# class PythonDataNormalizer(AbstractDataNormalizer):
# """Python data normalizer.
#
# This normalizer handles data extracted from setup.py files by mercator-go.
# """
#
# _key_map = (
# ('url', 'homepage'),
# ('install_requires', 'dependencies'), ('name',),
# ('description',), ('version',)
# )
#
# def __init__(self, mercator_json):
# """Initialize function."""
# if 'error' in mercator_json:
# # mercator by default (MERCATOR_INTERPRET_SETUP_PY=false) doesn't interpret setup.py
# mercator_json = {}
# super().__init__(mercator_json)
#
# def normalize(self):
# """Normalize output from Mercator for setup.py (Python)."""
# if not self._raw_data:
# return {}
#
# self._data['declared_licenses'] = self._split_keywords(
# self._raw_data.get('license'), separator=','
# )
# self._data['author'] = self._join_name_email(self._raw_data, 'author', 'author_email')
# self._data['code_repository'] = (
# self._identify_gh_repo(self._raw_data.get('url')) or
# self._identify_gh_repo(self._raw_data.get('download_url'))
# )
# self._data['keywords'] = self._split_keywords(self._raw_data.get('keywords'))
# return self._data
. Output only the next line. | dn = PythonDataNormalizer(data) |
Here is a snippet: <|code_start|>#!/usr/bin/env python
"""Start the application."""
class SentryCelery(celery.Celery):
"""Celery class to configure sentry."""
def on_configure(self):
"""Set up sentry client."""
dsn = os.environ.get("SENTRY_DSN")
client = raven.Client(dsn)
register_logger_signal(client)
register_signal(client)
client.ignore_exceptions = [
"f8a_worker.errors.NotABugFatalTaskError",
"f8a_worker.errors.NotABugTaskError",
"f8a_worker.errors.TaskAlreadyExistsError"
]
# Patch to drain out old messages
<|code_end|>
. Write the next line using the current file imports:
import celery
import os
import raven
from f8a_worker.setup_celery import init_celery, init_selinon
from raven.contrib.celery import register_signal, register_logger_signal
from f8a_worker.monkey_patch import patch
and context from other files:
# Path: f8a_worker/monkey_patch.py
# def patch(self):
# """Monkey Patching "Dispatcher.migrate_message" function to modify code at runtime."""
# original_migrate_message = Dispatcher.migrate_message
#
# def patched_migrate_message(self, flow_info):
# res = original_migrate_message(self, flow_info)
# # Adding patch to throw error if the message is older than SQS_MSG_LIFETIME
# _check_hung_task(self, flow_info)
# return res
#
# Dispatcher.migrate_message = patched_migrate_message
, which may include functions, classes, or code. Output only the next line. | patch(self) |
Predict the next line for this snippet: <|code_start|> remove weird-looking errors like (un-committed changes due to errors
in init task):
DETAIL: Key (package_analysis_id)=(1113452) is not present in table "package_analyses".
"""
if task_name in ('InitPackageFlow', 'InitAnalysisFlow')\
or issubclass(exc_info[0], TaskAlreadyExistsError):
return
# Sanity checks
if not self.is_connected():
self.connect()
res = self._create_result_entry(node_args, flow_name, task_name, task_id, result=result,
error=True)
try:
PostgresBase.session.add(res)
PostgresBase.session.commit()
except IntegrityError:
# the result has been already stored before the error occurred
# hence there is no reason to re-raise
PostgresBase.session.rollback()
except SQLAlchemyError:
PostgresBase.session.rollback()
raise
def get_ecosystem(self, name):
"""Get ecosystem by name."""
if not self.is_connected():
self.connect()
<|code_end|>
with the help of current file imports:
import os
from selinon import DataStorage
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from f8a_worker.errors import TaskAlreadyExistsError
from f8a_worker.models import Ecosystem
and context from other files:
# Path: f8a_worker/models.py
# class Ecosystem(Base):
# """Table for Ecosystem."""
#
# __tablename__ = 'ecosystems'
#
# id = Column(Integer, primary_key=True)
# name = Column(String(255), unique=True)
# url = Column(String(255))
# fetch_url = Column(String(255))
# _backend = Column(
# Enum(*[b.name for b in EcosystemBackend], name='ecosystem_backend_enum'))
#
# packages = relationship('Package', back_populates='ecosystem')
# feedback = relationship('RecommendationFeedback',
# back_populates='ecosystem')
#
# @property
# def backend(self):
# """Get backend property."""
# return EcosystemBackend[self._backend]
#
# @backend.setter
# def backend(self, backend):
# """Set backend property."""
# self._backend = EcosystemBackend(backend).name
#
# def is_backed_by(self, backend):
# """Is this ecosystem backed by specified backend?."""
# return self.backend == backend
#
# @classmethod
# def by_name(cls, session, name):
# """Get a row with specified name."""
# try:
# return cls._by_attrs(session, name=name)
# except NoResultFound:
# # What to do here ?
# raise
, which may contain function names, class names, or code. Output only the next line. | return Ecosystem.by_name(PostgresBase.session, name) |
Given snippet: <|code_start|>#!/usr/bin/env python3
"""Basic interface to the Amazon S3 database."""
class AmazonS3(DataStorage):
"""Basic interface to the Amazon S3 database."""
_DEFAULT_REGION_NAME = 'us-east-1'
_DEFAULT_BUCKET_NAME = 'bayesian-core-unknown'
_DEFAULT_LOCAL_ENDPOINT = 'http://coreapi-s3:33000'
_DEFAULT_ENCRYPTION = 'aws:kms'
_DEFAULT_VERSIONED = True
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, bucket_name=None,
region_name=None, endpoint_url=None, use_ssl=False, encryption=None,
versioned=None):
"""Initialize object, setup connection to the AWS S3."""
# TODO: reduce cyclomatic complexity
# Priority for configuration options:
# 1. environment variables
# 2. arguments passed to constructor
# 3. defaults as listed in self._DEFAULT_*
super().__init__()
self._s3 = None
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import json
import uuid
import boto3
import botocore
from selinon import DataStorage
from selinon import StoragePool
from f8a_worker.defaults import configuration
and context:
# Path: f8a_worker/defaults.py
# class F8AConfiguration(object):
# def _make_postgres_string(password):
# def is_local_deployment(cls):
# def _rate_limit_exceeded(cls, headers):
# def _decide_token_usage(cls):
# def select_random_github_token(cls):
# def libraries_io_project_url(cls, ecosystem, name):
# def dependency_check_script_path(self):
# BIGQUERY_JSON_KEY = environ.get('GITHUB_CONSUMER_KEY', 'not-set')
# BROKER_CONNECTION = "amqp://guest@{host}:{port}".format(
# host=environ.get('RABBITMQ_SERVICE_SERVICE_HOST', 'coreapi-broker'),
# port=environ.get('RABBITMQ_SERVICE_SERVICE_PORT', '5672'))
# GIT_USER_NAME = environ.get('GIT_USER_NAME', 'f8a')
# GIT_USER_EMAIL = environ.get('GIT_USER_EMAIL', 'f8a@f8a')
# GITHUB_TOKEN = environ.get('GITHUB_TOKEN', 'not-set').split(',')
# GITHUB_API = "https://api.github.com/"
# LIBRARIES_IO_TOKEN = environ.get('LIBRARIES_IO_TOKEN', 'not-set')
# LIBRARIES_IO_API = 'https://libraries.io/api'
# NPMJS_CHANGES_URL = environ.get('NPMJS_CHANGES_URL',
# "https://skimdb.npmjs.com/registry/"
# "_changes?descending=true&include_docs=true&feed=continuous")
# UNQUOTED_POSTGRES_CONNECTION = _make_postgres_string(environ.get('POSTGRESQL_PASSWORD', ''))
# POSTGRES_CONNECTION = _make_postgres_string(
# quote(environ.get('POSTGRESQL_PASSWORD', ''), safe=''))
# WORKER_DATA_DIR = environ.get('WORKER_DATA_DIR', 'not-set')
# NPM_DATA_DIR = path.join(environ.get('HOME', '.npm'))
# SCANCODE_LICENSE_SCORE = environ.get('SCANCODE_LICENSE_SCORE', '20') # scancode's default is 0
# SCANCODE_TIMEOUT = environ.get('SCANCODE_TIMEOUT', '120') # scancode's default is 120
# SCANCODE_PROCESSES = environ.get('SCANCODE_PROCESSES', '1') # scancode's default is 1
# SCANCODE_PATH = environ.get('SCANCODE_PATH', '/opt/scancode-toolkit/')
# SCANCODE_IGNORE = ['*.pyc', '*.so', '*.dll', '*.rar', '*.jar',
# '*.zip', '*.tar', '*.tar.gz', '*.tar.xz', '*.png'] # don't scan binaries
# AWS_S3_REGION = environ.get('AWS_S3_REGION')
# AWS_S3_ACCESS_KEY_ID = environ.get('AWS_S3_ACCESS_KEY_ID')
# AWS_S3_SECRET_ACCESS_KEY = environ.get('AWS_S3_SECRET_ACCESS_KEY')
# S3_ENDPOINT_URL = environ.get('S3_ENDPOINT_URL')
# DEPLOYMENT_PREFIX = environ.get('DEPLOYMENT_PREFIX')
# BAYESIAN_SYNC_S3 = int(environ.get('BAYESIAN_SYNC_S3', 0)) == 1
# AWS_SQS_ACCESS_KEY_ID = environ.get('AWS_SQS_ACCESS_KEY_ID')
# AWS_SQS_SECRET_ACCESS_KEY = environ.get('AWS_SQS_SECRET_ACCESS_KEY')
# CELERY_RESULT_BACKEND = environ.get('CELERY_RESULT_BACKEND')
# AWS_SQS_REGION = environ.get('AWS_SQS_REGION')
# JAVANCSS_PATH = environ.get('JAVANCSS_PATH')
# OWASP_DEP_CHECK_PATH = environ.get('OWASP_DEP_CHECK_PATH')
# USAGE_THRESHOLD = int(environ.get("LOW_USAGE_THRESHOLD", "5000"))
# USAGE_THRESHOLD = 5000
# POPULARITY_THRESHOLD = int(environ.get("LOW_POPULARITY_THRESHOLD", "5000"))
# POPULARITY_THRESHOLD = 5000
# BAYESIAN_GREMLIN_HTTP_SERVICE_HOST = environ.get("BAYESIAN_GREMLIN_HTTP_SERVICE_HOST",
# "localhost")
# BAYESIAN_GREMLIN_HTTP_SERVICE_PORT = environ.get("BAYESIAN_GREMLIN_HTTP_SERVICE_PORT", "8182")
which might include code, classes, or functions. Output only the next line. | self.region_name = configuration.AWS_S3_REGION or region_name or self._DEFAULT_REGION_NAME |
Here is a snippet: <|code_start|>"""Tests for abstract data normalizer."""
@pytest.mark.parametrize('args, expected', [
({'keywords': None},
[]),
({'keywords': []},
[]),
({'keywords': ['x', 'y']},
['x', 'y']),
({'keywords': ''},
['']),
({'keywords': 'one'},
['one']),
({'keywords': 'one, two'},
['one', 'two']),
({'keywords': 'one two'},
['one', 'two']),
({'keywords': 'one two', 'separator': ' '},
['one', 'two']),
({'keywords': 'one, two', 'separator': ','},
['one', 'two']),
])
def test__split_keywords(args, expected):
"""Test AbstractDataNormalizer._split_keywords()."""
<|code_end|>
. Write the next line using the current file imports:
import pytest
from f8a_worker.data_normalizer import PythonDataNormalizer, AbstractDataNormalizer
and context from other files:
# Path: f8a_worker/data_normalizer/abstract.py
# class AbstractDataNormalizer(abc.ABC):
# """Abstract data normalizer.
#
# Base class for all other data normalizers.
# """
#
# """Mapping from ecosystem-specific keys to their normalized form.
#
# E.g: (('licenses', 'declared_licenses'),)
# """
# _key_map = tuple()
#
# @abc.abstractmethod
# def __init__(self, mercator_json):
# """Initialize function.
#
# :param mercator_json: dict, data from mercator
# """
# self._raw_data = mercator_json
# self._data = self._transform_keys(self._key_map)
#
# @abc.abstractmethod
# def normalize(self):
# """Normalize output from Mercator."""
#
# def _transform_keys(self, keymap, lower=True):
# """Collect known keys and/or rename existing keys.
#
# :param keymap: n-tuple of 2-tuples
# each 2-tuple can have one of these forms:
# ('a',) - get 'a'
# ('b', 'c',) - get 'b' and rename it to 'c'
# (('d', 'e',),) - get 'd' or 'e'
# (('f', 'g',), 'h') - get 'f' or 'g' and rename it to 'h'
# :param lower: bool, convert keys to lowercase
# :return: dictionary with keys from keymap only
# """
# out = {}
# value = None
# for pair in keymap:
# in_key = pair[0]
# if not isinstance(in_key, tuple):
# value = self._raw_data.get(in_key, None)
# else: # e.g. ('license', 'licenses',)
# for in_k in in_key:
# value = self._raw_data.get(in_k, None)
# if value is not None:
# break
# in_key = in_k
# key = in_key if len(pair) == 1 else pair[1]
# if lower:
# key = key.lower()
# out[key] = value
# return out
#
# @staticmethod
# def _join_name_email(name_email_dict, name_key='name', email_key='email'):
# """Join name and email values into a string.
#
# # {'name':'A', 'email':'B@C.com'} -> 'A <B@C.com>'
# """
# if not isinstance(name_email_dict, dict):
# return None
#
# if not name_email_dict:
# return None
#
# name_email_str = name_email_dict.get(name_key) or ''
# if isinstance(name_email_dict.get(email_key), str):
# if name_email_str:
# name_email_str += ' '
# name_email_str += '<' + name_email_dict[email_key] + '>'
# return name_email_str
#
# @staticmethod
# def _rf(iterable):
# """Remove false/empty/None items from iterable."""
# return list(filter(None, iterable))
#
# @staticmethod
# def _split_keywords(keywords, separator=None):
# """Split keywords (string) with separator.
#
# If separator is not specified, use either colon or whitespace.
# """
# if keywords is None:
# return []
# if isinstance(keywords, list):
# return keywords
# if separator is None:
# separator = ',' if ',' in keywords else ' '
# keywords = keywords.split(separator)
# keywords = [kw.strip() for kw in keywords]
# return keywords
#
# @staticmethod
# def _identify_gh_repo(homepage):
# """Return code repository dict filled with homepage."""
# if parse_gh_repo(homepage):
# return {'url': homepage, 'type': 'git'}
# return None
#
# Path: f8a_worker/data_normalizer/python.py
# class PythonDataNormalizer(AbstractDataNormalizer):
# """Python data normalizer.
#
# This normalizer handles data extracted from setup.py files by mercator-go.
# """
#
# _key_map = (
# ('url', 'homepage'),
# ('install_requires', 'dependencies'), ('name',),
# ('description',), ('version',)
# )
#
# def __init__(self, mercator_json):
# """Initialize function."""
# if 'error' in mercator_json:
# # mercator by default (MERCATOR_INTERPRET_SETUP_PY=false) doesn't interpret setup.py
# mercator_json = {}
# super().__init__(mercator_json)
#
# def normalize(self):
# """Normalize output from Mercator for setup.py (Python)."""
# if not self._raw_data:
# return {}
#
# self._data['declared_licenses'] = self._split_keywords(
# self._raw_data.get('license'), separator=','
# )
# self._data['author'] = self._join_name_email(self._raw_data, 'author', 'author_email')
# self._data['code_repository'] = (
# self._identify_gh_repo(self._raw_data.get('url')) or
# self._identify_gh_repo(self._raw_data.get('download_url'))
# )
# self._data['keywords'] = self._split_keywords(self._raw_data.get('keywords'))
# return self._data
, which may include functions, classes, or code. Output only the next line. | assert AbstractDataNormalizer._split_keywords(**args) == expected |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
"""Tests for the GithubTask worker task."""
@pytest.mark.usefixtures("dispatcher_setup")
class TestGithuber(object):
"""Tests for the GithubTask worker task."""
@pytest.mark.parametrize(('repo_name', 'repo_url'), [
('projectatomic/atomic-reactor', 'https://github.com/projectatomic/atomic-reactor'),
])
def test_execute(self, repo_name, repo_url):
"""Start the GithubTask worker task and test its results."""
<|code_end|>
. Write the next line using the current file imports:
import pytest
from f8a_worker.workers import GithubTask
and context from other files:
# Path: f8a_worker/workers/githuber.py
# class GithubTask(BaseTask):
# """Collects statistics using Github API."""
#
# # used for testing
# _repo_name = None
# _repo_url = None
#
# @classmethod
# def create_test_instance(cls, repo_name, repo_url):
# """Create instance of task for tests."""
# assert cls
# instance = super().create_test_instance()
# # set for testing as we are not querying DB for mercator results
# instance._repo_name = repo_name
# instance._repo_url = repo_url
# return instance
#
# def _get_last_years_commits(self, repo_url):
# """Get weekly commit activity for last year."""
# try:
# activity = get_response(urljoin(repo_url + '/', "stats/commit_activity"))
# if activity is None:
# return []
# return [x.get('total', 0) for x in activity]
# except NotABugTaskError as e:
# self.log.debug(e)
# return []
#
# def _get_repo_stats(self, repo):
# """Collect various repository properties."""
# try:
# url = repo.get('contributors_url', '')
# if url:
# contributors = get_gh_contributors(url)
# else:
# contributors = -1
# except NotABugTaskError as e:
# self.log.debug(e)
# contributors = -1
# d = {'contributors_count': contributors}
# for prop in REPO_PROPS:
# d[prop] = repo.get(prop, -1)
# return d
#
# def _get_repo_name(self, url):
# """Retrieve GitHub repo from a preceding Mercator scan."""
# parsed = parse_gh_repo(url)
# if not parsed:
# self.log.debug('Could not parse Github repo URL %s', url)
# else:
# self._repo_url = 'https://github.com/' + parsed
# return parsed
#
# def execute(self, arguments):
# """Task code.
#
# :param arguments: dictionary with task arguments
# :return: {}, results
# """
# result_data = {'status': 'unknown',
# 'summary': [],
# 'details': {}}
# # For testing purposes, a repo may be specified at task creation time
# if self._repo_name is None:
# # Otherwise, get the repo name from earlier Mercator scan results
# self._repo_name = self._get_repo_name(arguments['url'])
# if self._repo_name is None:
# # Not a GitHub hosted project
# return result_data
#
# repo_url = urljoin(self.configuration.GITHUB_API + "repos/", self._repo_name)
# try:
# repo = get_response(repo_url)
# if not repo:
# raise NotABugFatalTaskError('Page not found on {}'.format(repo_url))
# except NotABugTaskError as e:
# self.log.error(e)
# raise NotABugFatalTaskError from e
#
# result_data['status'] = 'success'
#
# issues = {}
# # Get Repo Statistics
# notoriety = self._get_repo_stats(repo)
#
# if notoriety:
# issues.update(notoriety)
# issues['topics'] = repo.get('topics', [])
# issues['license'] = repo.get('license') or {}
#
# # Get Commit Statistics
# last_year_commits = self._get_last_years_commits(repo['url'])
# commits = {'last_year_commits': {'sum': sum(last_year_commits),
# 'weekly': last_year_commits}}
# t_stamp = datetime.datetime.utcnow()
# refreshed_on = {'updated_on': t_stamp.strftime("%Y-%m-%d %H:%M:%S")}
# issues.update(refreshed_on)
# issues.update(commits)
#
# # Get PR/Issue details for previous Month and Year
# gh_pr_issue_details = get_gh_pr_issue_counts(repo['full_name'])
# issues.update(gh_pr_issue_details)
#
# result_data['details'] = issues
# return result_data
, which may include functions, classes, or code. Output only the next line. | task = GithubTask.create_test_instance(repo_name, repo_url) |
Predict the next line after this snippet: <|code_start|> def stable_lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._stable_lhs
@property
def stable_rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._stable_rhs
@property
def cond_rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._cond_rhs
@property
def cond_lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._cond_lhs
@property
def lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._cond_lhs | self._stable_lhs
@property
def rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._cond_rhs | self._stable_rhs
def visitAugAssign(self, node):
<|code_end|>
using the current file's imports:
from . import Visitor, visit_children
from ..visitors.symbol_visitor import get_symbols
from ...utils import py2op
import ast
and any relevant context from other files:
# Path: lang/femtocode/thirdparty/meta/asttools/visitors/symbol_visitor.py
# def get_symbols(node, ctx_types=(ast.Load, ast.Store)):
# '''
# Returns all symbols defined in an ast node.
#
# if ctx_types is given, then restrict the symbols to ones with that context.
#
# :param node: ast node
# :param ctx_types: type or tuple of types that may be found assigned to the `ctx` attribute of
# an ast Name node.
#
# '''
# gen = SymbolVisitor(ctx_types)
# return gen.visit(node)
. Output only the next line. | values = get_symbols(node.value) |
Given the code snippet: <|code_start|> for attr in ('starargs', 'kwargs'):
child = getattr(node, attr)
if child:
right.update(self.visit(child))
for src in left | right:
if not self.graph.has_node(src):
self.undefined.add(src)
if self.call_deps:
add_edges(self.graph, left, right)
add_edges(self.graph, right, left)
right.update(left)
return right
def visitSubscript(self, node):
if isinstance(node.ctx, _ast.Load):
return collect_(self, node)
else:
sources = self.visit(node.slice)
targets = self.visit(node.value)
self.modified.update(targets)
add_edges(self.graph, targets, sources)
return targets
def handle_generators(self, generators):
defined = set()
required = set()
for generator in generators:
<|code_end|>
, generate the next line using the imports in this file:
from . import Visitor, visit_children
from .symbol_visitor import get_symbols
from networkx import DiGraph
import _ast
and context (functions, classes, or occasionally code) from other files:
# Path: lang/femtocode/thirdparty/meta/asttools/visitors/symbol_visitor.py
# def get_symbols(node, ctx_types=(ast.Load, ast.Store)):
# '''
# Returns all symbols defined in an ast node.
#
# if ctx_types is given, then restrict the symbols to ones with that context.
#
# :param node: ast node
# :param ctx_types: type or tuple of types that may be found assigned to the `ctx` attribute of
# an ast Name node.
#
# '''
# gen = SymbolVisitor(ctx_types)
# return gen.visit(node)
. Output only the next line. | get_symbols(generator, _ast.Load) |
Given the code snippet: <|code_start|> interfaces", ... }
:rtype: dict
"""
self._connect()
confs = {} # used by the threads to return the confs
threads = []
self.log = logger
args = [(confs, hn, 'firelet') for hn in self._targets ]
Forker(self._get_conf, args, logger=logger)
# parse the configurations
log.debug("Parsing configurations")
for hostname in self._targets:
if hostname not in confs:
raise Exception("No configuration received from %s" % \
hostname)
iptables_save, ip_addr_show = confs[hostname]
if iptables_save is None:
raise Exception("No configuration received from %s" % \
hostname)
log.debug("iptables_save:" + repr(iptables_save))
iptables_p = self.parse_iptables_save(iptables_save,
hostname=hostname)
#TODO: iptables-save can be very slow when a firewall cannot
# resolve localhost - add a warning?
#log.debug("iptables_p %s" % repr(iptables_p))
ip_a_s_p = self.parse_ip_addr_show(ip_addr_show)
<|code_end|>
, generate the next line using the imports in this file:
from datetime import datetime
from time import time
from threading import Thread
from .flutils import Bunch
import logging
import paramiko
and context (functions, classes, or occasionally code) from other files:
# Path: firelet/flutils.py
# class Bunch(object):
# """A dict that exposes its values as attributes."""
# def __init__(self, **kw):
# self.__dict__ = dict(kw)
#
# def __repr__(self):
# return repr(self.__dict__)
#
# def __len__(self):
# return len(self.__dict__)
#
# def __getitem__(self, name):
# return self.__dict__.__getitem__(name)
#
# def __setitem__(self, name, value):
# return self.__dict__.__setitem__(name, value)
#
# def __iter__(self):
# return self.__dict__.__iter__()
#
# def keys(self):
# """Get the instance attributes
#
# :rtype: list
# """
# return self.__dict__.keys()
#
# def _token(self):
# """Generate a simple hash to detect changes in the bunch attributes
# """
# h = hashlib.md5()
# [h.update(k + str(v)) for k, v in sorted(self.__dict__.iteritems())]
# return h.hexdigest()[:8]
#
# def validate_token(self, token):
# """Check if the given token matches the instance own token to ensure
# that the instance attributes has not been modified.
# The token is a hash of the instance's attributes.
#
# :param token: token
# :type token: str
# :returns: True or False
# """
# assert token == self._token(), \
# "Unable to update: one or more items has been modified in the meantime."
#
# def attr_dict(self):
# """Provide a copy of the internal dict, with a token"""
# d = deepcopy(self.__dict__)
# d['token'] = self._token()
# return d
#
# def update(self, d):
# """Set/update the internal dictionary"""
# for k in self.__dict__:
# self.__dict__[k] = d[k]
. Output only the next line. | d = Bunch(iptables=iptables_p, ip_a_s=ip_a_s_p) |
Given the code snippet: <|code_start|> def __init__(self):
deb("Mocking say()...")
self.reset_history()
def __call__(self, s):
"""Append one or more lines to the history"""
self._output_history.extend(s.split('\n'))
def hist(self):
return '\n-----\n' + '\n'.join(self._output_history) + '\n-----\n'
def flush(self):
self._output_history = []
@property
def output_history(self):
return self._output_history
@property
def last(self):
return self._output_history[-1]
def reset_history(self):
self._output_history = []
@pytest.fixture
def say(monkeypatch):
<|code_end|>
, generate the next line using the imports in this file:
from logging import getLogger
from firelet import cli
from firelet.flcore import DemoGitFireSet
from testingutils import show
import os.path
import pytest
and context (functions, classes, or occasionally code) from other files:
# Path: firelet/cli.py
# def cli_args(mockargs=None):
# def give_help(): # pragma: no cover
# def help(s=None):
# def to_int(s):
# def deletion(table):
# def max_len(li):
# def prettyprint(li):
# def say(s):
# def open_fs(repodir):
# def main(mockargs=None): # pragma: no cover
#
# Path: firelet/flcore.py
# class DemoGitFireSet(GitFireSet):
# """Based on GitFireSet. Provide a demo version without real network interaction.
# The status of the simulated remote hosts is kept on files.
# """
# def __init__(self, repodir):
# GitFireSet.__init__(self, repodir=repodir)
# self.SSHConnector = MockSSHConnector
# self.SSHConnector.repodir = repodir
# self._demo_rulelist = defaultdict(list)
. Output only the next line. | monkeypatch.setattr(cli, "say", MockSay()) |
Based on the snippet: <|code_start|> def flush(self):
self._output_history = []
@property
def output_history(self):
return self._output_history
@property
def last(self):
return self._output_history[-1]
def reset_history(self):
self._output_history = []
@pytest.fixture
def say(monkeypatch):
monkeypatch.setattr(cli, "say", MockSay())
@pytest.fixture
def getpass(monkeypatch):
"""Mock getpass() to unit-test user creation"""
monkeypatch.setattr(cli, "getpass", lambda x: "12345")
@pytest.fixture
def demofireset(repodir):
"Testing is performed against the Demo FireSet"
deb(show("Using %s as repodir" % repodir))
<|code_end|>
, predict the immediate next line with the help of imports:
from logging import getLogger
from firelet import cli
from firelet.flcore import DemoGitFireSet
from testingutils import show
import os.path
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: firelet/cli.py
# def cli_args(mockargs=None):
# def give_help(): # pragma: no cover
# def help(s=None):
# def to_int(s):
# def deletion(table):
# def max_len(li):
# def prettyprint(li):
# def say(s):
# def open_fs(repodir):
# def main(mockargs=None): # pragma: no cover
#
# Path: firelet/flcore.py
# class DemoGitFireSet(GitFireSet):
# """Based on GitFireSet. Provide a demo version without real network interaction.
# The status of the simulated remote hosts is kept on files.
# """
# def __init__(self, repodir):
# GitFireSet.__init__(self, repodir=repodir)
# self.SSHConnector = MockSSHConnector
# self.SSHConnector.repodir = repodir
# self._demo_rulelist = defaultdict(list)
. Output only the next line. | return DemoGitFireSet(repodir=repodir) |
Given the code snippet: <|code_start|> ctx = xmlsec.SignatureContext(key_mgr)
try:
ctx.verify(signode)
except xmlsec.error.Error:
validity = (ref.attrib['URI'], False)
else:
validity = (ref.attrib['URI'], True)
results.append(validity)
return results
def validate_schema(doc_xml, schema_xml=None):
""" Validate XML against its XSD Schema definition provided by the SII.
:param `lxml.etree.Element` doc_xml: Handle to XML etree root node.
"""
doc_xml = deepcopy(doc_xml)
doc_new = etree.Element(doc_xml.tag, nsmap={None: 'http://www.sii.cl/SiiDte'})
doc_new[:] = doc_xml[:] # move children into new root
doc_new.attrib.update(doc_xml.attrib) # copy attributes of the root node
# reload xml
buff = BytesIO(etree.tostring(doc_new, method='c14n'))
xml = etree.parse(buff).getroot()
if not schema_xml:
<|code_end|>
, generate the next line using the imports in this file:
from io import BytesIO
from copy import deepcopy
from lxml import etree
from .schemas import resolve_schema
from .helpers import (
prepend_dtd,
extract_signodes,
extract_signode_certificate,
extract_signode_reference,
)
import tempfile
import xmlsec
and context (functions, classes, or occasionally code) from other files:
# Path: src/sii/lib/schemas.py
# def resolve_schema(xml):
# """ Resolves the schema based on the root node's tag
#
# :param `etree.Element` tag_name: The root node of the XML in question.
#
# :return: A string containing the path to the correponding schema for validation.
# """
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# try:
# path = SCHEMA_FILES[root.tag]
# except KeyError as exc:
# raise KeyError("Could not find schema for root tag '{0}'".format(root.tag)) from exc
# else:
# return path
#
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
#
# def extract_signode_certificate(signode):
# """ Extract the x509 Certificate Information from a <ds:Signature>.
#
# Raises exception if it does not find any <X509Certificate> information in the <Signature>.
#
# :param `etree.Element` signode: Root node of the document.
#
# :return: UTF8 encoded string containing the base64 encoded PEM certificate in it.
# """
# cert_node = signode.find('.//{http://www.w3.org/2000/09/xmldsig#}X509Certificate')
# cert_text = ''
#
# if cert_node is None:
# raise ValueError("Could not find x509 certificate on this signode")
# else:
# cert_text = cert_node.text
#
# buff = '-----BEGIN CERTIFICATE-----\n'
# buff += cert_text.strip('\n')
# buff += '\n-----END CERTIFICATE-----\n'
#
# return buff
#
# def extract_signode_reference(signode):
# """ Extracts the <ds:Reference> of a <ds:Signature> node.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Reference>
# """
# refs = signode.xpath('.//ds:Reference', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
#
# if len(refs) != 1:
# raise ValueError("Could not find x509 reference on this signode")
# else:
# return refs[0]
. Output only the next line. | schema_pth = resolve_schema(doc_xml) |
Here is a snippet: <|code_start|>""" SII Document Signature Verification Process Functions
"""
__all__ = [
'validate_signatures',
'validate_schema'
]
def validate_signatures(xml):
""" Validate internal Document Signatures. Public Key are provided by them, so no need for
anything else than the XML itself.
:param `etree.Element` xml: Element to the rootnode of the document.
:return: [tuple(URI, True | False), ...]
"""
<|code_end|>
. Write the next line using the current file imports:
from io import BytesIO
from copy import deepcopy
from lxml import etree
from .schemas import resolve_schema
from .helpers import (
prepend_dtd,
extract_signodes,
extract_signode_certificate,
extract_signode_reference,
)
import tempfile
import xmlsec
and context from other files:
# Path: src/sii/lib/schemas.py
# def resolve_schema(xml):
# """ Resolves the schema based on the root node's tag
#
# :param `etree.Element` tag_name: The root node of the XML in question.
#
# :return: A string containing the path to the correponding schema for validation.
# """
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# try:
# path = SCHEMA_FILES[root.tag]
# except KeyError as exc:
# raise KeyError("Could not find schema for root tag '{0}'".format(root.tag)) from exc
# else:
# return path
#
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
#
# def extract_signode_certificate(signode):
# """ Extract the x509 Certificate Information from a <ds:Signature>.
#
# Raises exception if it does not find any <X509Certificate> information in the <Signature>.
#
# :param `etree.Element` signode: Root node of the document.
#
# :return: UTF8 encoded string containing the base64 encoded PEM certificate in it.
# """
# cert_node = signode.find('.//{http://www.w3.org/2000/09/xmldsig#}X509Certificate')
# cert_text = ''
#
# if cert_node is None:
# raise ValueError("Could not find x509 certificate on this signode")
# else:
# cert_text = cert_node.text
#
# buff = '-----BEGIN CERTIFICATE-----\n'
# buff += cert_text.strip('\n')
# buff += '\n-----END CERTIFICATE-----\n'
#
# return buff
#
# def extract_signode_reference(signode):
# """ Extracts the <ds:Reference> of a <ds:Signature> node.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Reference>
# """
# refs = signode.xpath('.//ds:Reference', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
#
# if len(refs) != 1:
# raise ValueError("Could not find x509 reference on this signode")
# else:
# return refs[0]
, which may include functions, classes, or code. Output only the next line. | xml = prepend_dtd(xml) |
Predict the next line after this snippet: <|code_start|>""" SII Document Signature Verification Process Functions
"""
__all__ = [
'validate_signatures',
'validate_schema'
]
def validate_signatures(xml):
""" Validate internal Document Signatures. Public Key are provided by them, so no need for
anything else than the XML itself.
:param `etree.Element` xml: Element to the rootnode of the document.
:return: [tuple(URI, True | False), ...]
"""
xml = prepend_dtd(xml)
<|code_end|>
using the current file's imports:
from io import BytesIO
from copy import deepcopy
from lxml import etree
from .schemas import resolve_schema
from .helpers import (
prepend_dtd,
extract_signodes,
extract_signode_certificate,
extract_signode_reference,
)
import tempfile
import xmlsec
and any relevant context from other files:
# Path: src/sii/lib/schemas.py
# def resolve_schema(xml):
# """ Resolves the schema based on the root node's tag
#
# :param `etree.Element` tag_name: The root node of the XML in question.
#
# :return: A string containing the path to the correponding schema for validation.
# """
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# try:
# path = SCHEMA_FILES[root.tag]
# except KeyError as exc:
# raise KeyError("Could not find schema for root tag '{0}'".format(root.tag)) from exc
# else:
# return path
#
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
#
# def extract_signode_certificate(signode):
# """ Extract the x509 Certificate Information from a <ds:Signature>.
#
# Raises exception if it does not find any <X509Certificate> information in the <Signature>.
#
# :param `etree.Element` signode: Root node of the document.
#
# :return: UTF8 encoded string containing the base64 encoded PEM certificate in it.
# """
# cert_node = signode.find('.//{http://www.w3.org/2000/09/xmldsig#}X509Certificate')
# cert_text = ''
#
# if cert_node is None:
# raise ValueError("Could not find x509 certificate on this signode")
# else:
# cert_text = cert_node.text
#
# buff = '-----BEGIN CERTIFICATE-----\n'
# buff += cert_text.strip('\n')
# buff += '\n-----END CERTIFICATE-----\n'
#
# return buff
#
# def extract_signode_reference(signode):
# """ Extracts the <ds:Reference> of a <ds:Signature> node.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Reference>
# """
# refs = signode.xpath('.//ds:Reference', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
#
# if len(refs) != 1:
# raise ValueError("Could not find x509 reference on this signode")
# else:
# return refs[0]
. Output only the next line. | signodes = extract_signodes(xml) |
Predict the next line after this snippet: <|code_start|>""" SII Document Signature Verification Process Functions
"""
__all__ = [
'validate_signatures',
'validate_schema'
]
def validate_signatures(xml):
""" Validate internal Document Signatures. Public Key are provided by them, so no need for
anything else than the XML itself.
:param `etree.Element` xml: Element to the rootnode of the document.
:return: [tuple(URI, True | False), ...]
"""
xml = prepend_dtd(xml)
signodes = extract_signodes(xml)
results = []
for signode in signodes:
<|code_end|>
using the current file's imports:
from io import BytesIO
from copy import deepcopy
from lxml import etree
from .schemas import resolve_schema
from .helpers import (
prepend_dtd,
extract_signodes,
extract_signode_certificate,
extract_signode_reference,
)
import tempfile
import xmlsec
and any relevant context from other files:
# Path: src/sii/lib/schemas.py
# def resolve_schema(xml):
# """ Resolves the schema based on the root node's tag
#
# :param `etree.Element` tag_name: The root node of the XML in question.
#
# :return: A string containing the path to the correponding schema for validation.
# """
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# try:
# path = SCHEMA_FILES[root.tag]
# except KeyError as exc:
# raise KeyError("Could not find schema for root tag '{0}'".format(root.tag)) from exc
# else:
# return path
#
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
#
# def extract_signode_certificate(signode):
# """ Extract the x509 Certificate Information from a <ds:Signature>.
#
# Raises exception if it does not find any <X509Certificate> information in the <Signature>.
#
# :param `etree.Element` signode: Root node of the document.
#
# :return: UTF8 encoded string containing the base64 encoded PEM certificate in it.
# """
# cert_node = signode.find('.//{http://www.w3.org/2000/09/xmldsig#}X509Certificate')
# cert_text = ''
#
# if cert_node is None:
# raise ValueError("Could not find x509 certificate on this signode")
# else:
# cert_text = cert_node.text
#
# buff = '-----BEGIN CERTIFICATE-----\n'
# buff += cert_text.strip('\n')
# buff += '\n-----END CERTIFICATE-----\n'
#
# return buff
#
# def extract_signode_reference(signode):
# """ Extracts the <ds:Reference> of a <ds:Signature> node.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Reference>
# """
# refs = signode.xpath('.//ds:Reference', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
#
# if len(refs) != 1:
# raise ValueError("Could not find x509 reference on this signode")
# else:
# return refs[0]
. Output only the next line. | cert = extract_signode_certificate(signode) |
Next line prediction: <|code_start|>""" SII Document Signature Verification Process Functions
"""
__all__ = [
'validate_signatures',
'validate_schema'
]
def validate_signatures(xml):
""" Validate internal Document Signatures. Public Key are provided by them, so no need for
anything else than the XML itself.
:param `etree.Element` xml: Element to the rootnode of the document.
:return: [tuple(URI, True | False), ...]
"""
xml = prepend_dtd(xml)
signodes = extract_signodes(xml)
results = []
for signode in signodes:
cert = extract_signode_certificate(signode)
<|code_end|>
. Use current file imports:
(from io import BytesIO
from copy import deepcopy
from lxml import etree
from .schemas import resolve_schema
from .helpers import (
prepend_dtd,
extract_signodes,
extract_signode_certificate,
extract_signode_reference,
)
import tempfile
import xmlsec)
and context including class names, function names, or small code snippets from other files:
# Path: src/sii/lib/schemas.py
# def resolve_schema(xml):
# """ Resolves the schema based on the root node's tag
#
# :param `etree.Element` tag_name: The root node of the XML in question.
#
# :return: A string containing the path to the correponding schema for validation.
# """
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# try:
# path = SCHEMA_FILES[root.tag]
# except KeyError as exc:
# raise KeyError("Could not find schema for root tag '{0}'".format(root.tag)) from exc
# else:
# return path
#
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
#
# def extract_signode_certificate(signode):
# """ Extract the x509 Certificate Information from a <ds:Signature>.
#
# Raises exception if it does not find any <X509Certificate> information in the <Signature>.
#
# :param `etree.Element` signode: Root node of the document.
#
# :return: UTF8 encoded string containing the base64 encoded PEM certificate in it.
# """
# cert_node = signode.find('.//{http://www.w3.org/2000/09/xmldsig#}X509Certificate')
# cert_text = ''
#
# if cert_node is None:
# raise ValueError("Could not find x509 certificate on this signode")
# else:
# cert_text = cert_node.text
#
# buff = '-----BEGIN CERTIFICATE-----\n'
# buff += cert_text.strip('\n')
# buff += '\n-----END CERTIFICATE-----\n'
#
# return buff
#
# def extract_signode_reference(signode):
# """ Extracts the <ds:Reference> of a <ds:Signature> node.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Reference>
# """
# refs = signode.xpath('.//ds:Reference', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
#
# if len(refs) != 1:
# raise ValueError("Could not find x509 reference on this signode")
# else:
# return refs[0]
. Output only the next line. | ref = extract_signode_reference(signode) |
Given the code snippet: <|code_start|> 46 : "FACTURA DE COMPRA ELECTRÓNICA",
50 : "GUÍA DE DESPACHO.",
52 : "GUÍA DE DESPACHO ELECTRÓNICA",
55 : "NOTA DE DÉBITO",
56 : "NOTA DE DÉBITO ELECTRÓNICA",
60 : "NOTA DE CRÉDITO",
61 : "NOTA DE CRÉDITO ELECTRÓNICA",
103 : "LIQUIDACIÓN",
110 : "FACTURA DE EXPORTACIÓN ELECTRÓNICA",
111 : "NOTA DE DÉBITO DE EXPORTACIÓN ELECTRÓNICA",
112 : "NOTA DE CRÉDITO DE EXPORTACIÓN ELECTRÓNICA",
801 : "ORDEN DE COMPRA",
802 : "NOTA DE PEDIDO",
803 : "CONTRATO",
804 : "RESOLUCIÓN",
805 : "PROCESO CHILECOMPRA",
806 : "FICHA CHILECOMPRA",
807 : "DUS",
808 : "B/L (CONOCIMIENTO DE EMBARQUE)",
809 : "AWB (AIR WILL BILL)",
810 : "MIC/DTA",
811 : "CARTA DE PORTE",
812 : "RESOLUCIÓN DEL SNA DONDE CALIFICA SERVICIOS DE EXPORTACIÓN",
813 : "PASAPORTE",
814 : "CERTIFICADO DE DEPÓSITO BOLSA PROD. CHILE.",
815 : "VALE DE PRENDA BOLSA PROD. CHILE",
'SET': "SET"
}
<|code_end|>
, generate the next line using the imports in this file:
from .TemplateElement import TemplateElement
and context (functions, classes, or occasionally code) from other files:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
. Output only the next line. | class SectionReferences(TemplateElement): |
Based on the snippet: <|code_start|>""" Totals Section of the Document
Contains:
* Discount
* Net Amount
* Tax exempt Amount
* Tax
* (optional) Other Tax Types
* Total
"""
SPECIAL_TAX = {
19: ('IAH', 12, '+'),
15: ('RTT', 19, '-'),
33: ('IRM', 8, '-'),
331: ('IRM', 19, '-'),
34: ('IRT', 4, '-'),
39: ('IRPPA', 19, '-')
}
<|code_end|>
, predict the immediate next line with the help of imports:
from .TemplateElement import TemplateElement
and context (classes, functions, sometimes code) from other files:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
. Output only the next line. | class SectionTotals(TemplateElement): |
Given the code snippet: <|code_start|>""" Payments Section of the Document
Contains:
* Payment Mode/Type
* Amount
* Descriptor (Payment Detail / further Information / Description)
"""
<|code_end|>
, generate the next line using the imports in this file:
from .TemplateElement import TemplateElement
and context (functions, classes, or occasionally code) from other files:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
. Output only the next line. | class SectionPayments(TemplateElement): |
Given snippet: <|code_start|> \\begin{mdframed}[style=emitter]
{
\\tabulinesep=_1.0mm^1.0mm
\\vspace{1mm}
\\begin{tabu}{X[-1r] X[-1l] X[-1r] X[-1l]}
\\rowfont{\\footnotesize}
\\everyrow{\\rowfont{\\footnotesize}}
\\textbf{SEÑOR(ES):} & %s &
\\textbf{R.U.T.:} & %s \\\\
\\textbf{DIRECCION:} & %s &
\\textbf{COMUNA:} & %s \\\\
\\textbf{GIRO:} & %s &
\\textbf{CIUDAD:} & %s \\\\
\\hline
\\textbf{VENDEDOR:} & %s &
\\textbf{PATENTE:} & %s \\\\
\\textbf{N\\textdegree PEDIDO:} & %s &
& \\\\
\\end{tabu}
}
\\end{mdframed}
"""
def __init__(self, emission_date, expiration_date,
receivername, receiverrut, receiveraddress,
receivercomune, receiveractivity, receivercity='',
emittersalesman='', ordernumber='', licenceplate=''):
self._emission_date = emission_date
self._expiration_date = expiration_date
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .helpers import escape_tex
from .TemplateElement import TemplateElement
and context:
# Path: src/sii/lib/printing/helpers.py
# def escape_tex(string):
# chars = '%|\$|&'
#
# return re.sub('(?<!\\\\)({0})'.format(chars), r'\\\1', string)
#
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
which might include code, classes, or functions. Output only the next line. | self._receivername = escape_tex(receivername) |
Next line prediction: <|code_start|>""" Receiver Section in the Document
Contains:
* Emission Date
* Expiration Date
* Receiver Name
* Receiver RUT
* Receiver Activity (Economic Role)
* Receiver Address
* Receiver Comune
* Receiver City
* (optional)(carta/oficio only) Emitter Salesman
* (optional)(carta/oficio only) Order Number
* (optional)(carta/oficio only) Licence Plate
"""
<|code_end|>
. Use current file imports:
(from .helpers import escape_tex
from .TemplateElement import TemplateElement)
and context including class names, function names, or small code snippets from other files:
# Path: src/sii/lib/printing/helpers.py
# def escape_tex(string):
# chars = '%|\$|&'
#
# return re.sub('(?<!\\\\)({0})'.format(chars), r'\\\1', string)
#
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
. Output only the next line. | class SectionReceiver(TemplateElement): |
Predict the next line for this snippet: <|code_start|>""" Company Pool to resolve Company objects from (coming from YAML)
For a Template example look into files/companies.yml at the root of the repository. Such a file can
hold more than one instances of company metadata inside, allowing for multiple companies being
handled transparently by the same library or client/server application.
"""
class CompanyPool(object):
def __init__(self, yml):
self._companies = {}
for rut, data in yml.items():
<|code_end|>
with the help of current file imports:
import io
import yaml
from sii.lib.lib import fileio
from .Company import Company
from .Branch import Branch
and context from other files:
# Path: src/sii/lib/types/Company.py
# class Company(object):
#
# def __init__(self, data):
# self.__dict__.update(data)
# self.branches = [Branch(b) for b in self.branches]
#
# def __getattr__(self, key):
# if key.startswith('__'):
# return super().__getattr__(key)
#
# if key in self.__dict__:
# return super().__getattr__(key)
# else:
# raise RuntimeError("Expected and did not find <{0}> in company YAML.".format(key))
#
# Path: src/sii/lib/types/Branch.py
# class Branch(object):
#
# def __init__(self, yml):
# self.__dict__.update(yml)
#
# def __getattr__(self, key):
# if key.startswith('__'):
# return super().__getattr__(key)
#
# if key in self.__dict__:
# return super().__getattr__(key)
# else:
# raise RuntimeError("Expected and did not find <{0}> in branch section of YAML.".format(key))
, which may contain function names, class names, or code. Output only the next line. | self._companies[rut] = Company(data) |
Given the code snippet: <|code_start|> 'small', 'footnotesize',
self._build_headers(),
self._build_rows(),
self._build_disclaimer()
)
@property
def thermal80mm(self):
return self.__doc__ % (
'scriptsize', 'scriptsize',
self._build_headers(),
self._build_rows(),
self._build_disclaimer()
)
def _build_tablecols(self):
cols = []
cols.append('' if self._table_margins else '@{}')
for coldef in self._colsettings:
setstr = 'X['
setstr += {True: '', False: '-1'}[coldef['expand']]
setstr += {'left': 'l', 'center': 'c', 'right': 'r'}[coldef['align']]
setstr += ']'
cols.append(setstr)
cols.append('' if self._table_margins else '@{}')
return ' '.join(cols)
def _build_headers(self):
<|code_end|>
, generate the next line using the imports in this file:
from .helpers import escape_tex
from .TemplateElement import TemplateElement
and context (functions, classes, or occasionally code) from other files:
# Path: src/sii/lib/printing/helpers.py
# def escape_tex(string):
# chars = '%|\$|&'
#
# return re.sub('(?<!\\\\)({0})'.format(chars), r'\\\1', string)
#
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
. Output only the next line. | cols = ['\\textbf{%s}' % escape_tex(col['name']) for col in self._colsettings] |
Predict the next line after this snippet: <|code_start|>""" Items Section of the Document
Contains:
* Header Tags
* Item Rows
"""
GUIA_DESPACHO_TYPES = {
1: "OPERACIÓN CONSTITUYE VENTA",
2: "VENTA POR EFECTUAR",
3: "CONSIGNACION",
4: "ENTREGA GRATUITA",
5: "TRASLADO INTERNO",
6: "OTRO TRASLADOS NO VENTA",
7: "GUÍA DE DEVOLUCIÓN",
8: "TRASLADO PARA EXPORTACIÓN. (NO VENTA)",
9: "VENTA PARA EXPORTACIÓN"
}
<|code_end|>
using the current file's imports:
from .helpers import escape_tex
from .TemplateElement import TemplateElement
and any relevant context from other files:
# Path: src/sii/lib/printing/helpers.py
# def escape_tex(string):
# chars = '%|\$|&'
#
# return re.sub('(?<!\\\\)({0})'.format(chars), r'\\\1', string)
#
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
. Output only the next line. | class SectionItems(TemplateElement): |
Continue the code snippet: <|code_start|>""" SII Document Signing Process Functions
"""
__all__ = [
'sign_document',
'sign_document_all',
'build_template'
]
def sign_document(xml, key_path, cert_path):
""" Signs topmost XML <ds:Signature> node under the document root node.
:param `etree.Element` xml: The XML to be signed.
:param str key_path: Path to PEM key file.
:param str cert_path: Path to PEM certificate file.
:return: `etree.Element` to the signed document. Should be the same with the provided xml param.
"""
# HACK inject a DTD preamble in to direct non-standard xml:id
# resolution for <Reference URI = "#XXXX">.
<|code_end|>
. Use current file imports:
import xmlsec
from .helpers import prepend_dtd, extract_signode, extract_signodes
and context (classes, functions, or code) from other files:
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signode(xml):
# """ Extracts the <ds:Signature> node from right under the root node in an XML document. If none
# is found there, an RuntimeException gets risen.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Signature>
# """
# signode = xml.find('{http://www.w3.org/2000/09/xmldsig#}Signature')
#
# if signode is None:
# raise ValueError("Did not find a '{http://www.w3.org/2000/09/xmldsig#}Signature' under the root node")
#
# return signode
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
. Output only the next line. | xml = prepend_dtd(xml) |
Based on the snippet: <|code_start|>""" SII Document Signing Process Functions
"""
__all__ = [
'sign_document',
'sign_document_all',
'build_template'
]
def sign_document(xml, key_path, cert_path):
""" Signs topmost XML <ds:Signature> node under the document root node.
:param `etree.Element` xml: The XML to be signed.
:param str key_path: Path to PEM key file.
:param str cert_path: Path to PEM certificate file.
:return: `etree.Element` to the signed document. Should be the same with the provided xml param.
"""
# HACK inject a DTD preamble in to direct non-standard xml:id
# resolution for <Reference URI = "#XXXX">.
xml = prepend_dtd(xml)
<|code_end|>
, predict the immediate next line with the help of imports:
import xmlsec
from .helpers import prepend_dtd, extract_signode, extract_signodes
and context (classes, functions, sometimes code) from other files:
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signode(xml):
# """ Extracts the <ds:Signature> node from right under the root node in an XML document. If none
# is found there, an RuntimeException gets risen.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Signature>
# """
# signode = xml.find('{http://www.w3.org/2000/09/xmldsig#}Signature')
#
# if signode is None:
# raise ValueError("Did not find a '{http://www.w3.org/2000/09/xmldsig#}Signature' under the root node")
#
# return signode
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
. Output only the next line. | signode = extract_signode(xml) |
Given the following code snippet before the placeholder: <|code_start|> # Load Private Key and Public Certificate
key = xmlsec.Key.from_file(key_path, xmlsec.KeyFormat.PEM)
key.load_cert_from_file(cert_path, xmlsec.KeyFormat.PEM)
# Create Crypto Context and load in Key/Cert
ctx = xmlsec.SignatureContext()
ctx.key = key
ctx.sign(signode)
return xml
def sign_document_all(xml, key_path, cert_path):
""" Signs all XML's <ds:Signature> nodes under the document root node.
:param `etree.Element` xml: The XML to be signed.
:param str key_path: Path to PEM key file.
:param str cert_path: Path to PEM certificate file.
:return: `etree.Element` to the signed document. Should be the same with the provided xml param.
TODO: make sure we get all <ds:Signature> nodes in depth first order, otherwise we would break envolving
signatures. Its not that it is not currently working, it is just without guaranteed order.
"""
# HACK inject a DTD preamble in to direct non-standard xml:id
# resolution for <Reference URI = "#XXXX">.
xml = prepend_dtd(xml)
<|code_end|>
, predict the next line using imports from the current file:
import xmlsec
from .helpers import prepend_dtd, extract_signode, extract_signodes
and context including class names, function names, and sometimes code from other files:
# Path: src/sii/lib/helpers.py
# def prepend_dtd(xml):
# """ Prepends a DTD providing a definition of a to a non-standard xml:id pointer. Necessary for
# signature and signature verification.
#
# :param `etree.Element` xml: XML tree to prepend the DTD to.
#
# :param str sig_tag: Tag name to contain the URI.
# :param str uri_attr: Attribute name to contain the URI.
#
# :return: An `etree.Element` with the now DTD contextualized XML.
# """
# root = None
# if hasattr(xml, 'getroot'):
# root = xml.getroot()
# else:
# root = xml.getroottree().getroot()
#
# tag = re.sub('\{.*\}', '', root.tag)
# preamble = DTD_PREAMBLE.format(root=tag)
#
# buff = io.BytesIO()
# buff.write(bytes(preamble, 'utf8'))
# buff.write(etree.tostring(xml, pretty_print=True, method='xml'))
# buff.seek(0)
#
# tree = etree.parse(buff)
# root = tree.getroot()
#
# return root
#
# def extract_signode(xml):
# """ Extracts the <ds:Signature> node from right under the root node in an XML document. If none
# is found there, an RuntimeException gets risen.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: `etree.Element` of the <ds:Signature>
# """
# signode = xml.find('{http://www.w3.org/2000/09/xmldsig#}Signature')
#
# if signode is None:
# raise ValueError("Did not find a '{http://www.w3.org/2000/09/xmldsig#}Signature' under the root node")
#
# return signode
#
# def extract_signodes(xml):
# """ Extracts all <ds:Signature> nodes from given XML.
#
# :param `etree.Element` xml: Root node of the document.
#
# :return: list of `etree.Element` of the <ds:Signature>'s
# """
# signodes = xml.xpath('//ds:Signature', namespaces={'ds': 'http://www.w3.org/2000/09/xmldsig#'})
# return signodes
. Output only the next line. | for signode in extract_signodes(xml): |
Based on the snippet: <|code_start|>""" Static Company Data (coming from YAML)
For a Template example look into files/companies.yml at the root of the repository.
"""
class Company(object):
def __init__(self, data):
self.__dict__.update(data)
<|code_end|>
, predict the immediate next line with the help of imports:
from .Branch import Branch
and context (classes, functions, sometimes code) from other files:
# Path: src/sii/lib/types/Branch.py
# class Branch(object):
#
# def __init__(self, yml):
# self.__dict__.update(yml)
#
# def __getattr__(self, key):
# if key.startswith('__'):
# return super().__getattr__(key)
#
# if key in self.__dict__:
# return super().__getattr__(key)
# else:
# raise RuntimeError("Expected and did not find <{0}> in branch section of YAML.".format(key))
. Output only the next line. | self.branches = [Branch(b) for b in self.branches] |
Predict the next line after this snippet: <|code_start|>
# Add the <ds:Reference/> node to the signature template.
ref = xmlsec.template.add_reference(signode, digest_method=xmlsec.Transform.SHA1)
# Add the enveloped transform descriptor.
xmlsec.template.add_transform(ref, transform=xmlsec.Transform.ENVELOPED)
# Add Key Value Info and x509 Data
key_info = xmlsec.template.ensure_key_info(signode)
xmlsec.template.add_key_value(key_info)
xmlsec.template.add_x509_data(key_info)
# Load Key and Certificate
key = xmlsec.Key.from_file(self.key_path, xmlsec.KeyFormat.PEM)
key.load_cert_from_file(self.cert_path, xmlsec.KeyFormat.PEM)
# Create Crypto Context and sign Signature Node
ctx = xmlsec.SignatureContext()
ctx.key = key
ctx.sign(signode)
return root
def _fetch_token(self, request_etree):
request_xml = etree.tostring(
request_etree,
method='xml',
encoding='unicode'
)
<|code_end|>
using the current file's imports:
import re
import xmlsec
from suds.client import Client
from lxml import etree
from .helpers import with_retry
and any relevant context from other files:
# Path: src/sii/lib/ptcl/helpers.py
# def with_retry(func, count=RETRIES_MAX, ival=RETRIES_SLEEP):
# retries = 0
# while retries < count:
# try:
# return func()
# except Exception as exc:
# code, msg = exc.args[0]
#
# if code == 503:
# retries += 1
# time.sleep(ival)
# continue
# else:
# raise
. Output only the next line. | return with_retry(lambda: self.sii_soap.service.getToken(request_xml)) |
Based on the snippet: <|code_start|>
class CAFPool(object):
def __init__(self, dirpath):
self._path = os.path.abspath(os.path.expanduser(dirpath))
self._fnames = [fname for fname in os.listdir(self._path) if os.path.splitext(fname)[-1] == ".xml"]
self._fpaths = [os.path.join(self._path, fname) for fname in self._fnames]
self._trees = [_read_caf(fname) for fname in self._fpaths]
self._cafs = [CAF(tree) for tree in self._trees]
self._idx_company = collections.defaultdict(lambda: list())
for caf in self._cafs:
rut = read_rut(caf.company_rut)
self._idx_company[rut].append(caf)
def resolve(self, rut, dte_type, dte_id):
""" Returns CAF if available for given <company, dte_type, dte_id> information. """
cafs = self._idx_company[rut]
typed = [caf for caf in cafs if caf.dte_type == dte_type]
for caf in typed:
if caf.dte_id_from <= dte_id <= caf.dte_id_until:
return caf
raise RuntimeError(
"Could not find CAF for document <company={0}, type={1}, id={2}>".format(rut, dte_type, dte_id)
)
def resolve_document(self, dte):
""" Returns CAF if available for given DTE inner <Documento>. """
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import collections
from lxml import etree
from ..lib import xml
from .CAF import CAF
and context (classes, functions, sometimes code) from other files:
# Path: src/sii/lib/lib/xml.py
# XML_DECL = lambda enc: b'<?xml version="1.0" encoding="' + bytes(enc, enc) + b'"?>'
# class XML(object):
# def __init__(self, node=None, name=None, text=None, namespaces=None):
# def __repr__(self):
# def __str__(self):
# def __int__(self):
# def __float__(self):
# def __iter__(self):
# def __setattr__(self, name, value):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def __remove__(self):
# def __name__(self):
# def __children__(self):
# def __siblings__(self):
# def __generation__(self):
# def _etree(self):
# def _xml(self):
# def _str(self):
# def _int(self):
# def _float(self):
# def _number(self):
# def _list(self):
# def _has(self, name):
# def create_xml(name, value=None, namespaces=None):
# def read_xml(path):
# def load_xml(xml_string):
# def wrap_xml(xml_etree):
# def dump_etree(xml_node):
# def dump_xml(xml_node, **kwargs):
# def print_xml(xml, file=sys.stdout.buffer, end='\n', encoding='UTF-8'):
# def write_xml(xml, fpath, end='\n', encoding='UTF-8', append=False):
#
# Path: src/sii/lib/types/CAF.py
# class CAF(object):
#
# def __init__(self, caf_xml):
# if isinstance(caf_xml, str):
# self._root = etree.fromstring(caf_xml)
# elif isinstance(caf_xml, etree._Element):
# self._root = caf_xml
# else:
# raise ValueError("Expected XML string or etree.Element as argument")
#
# def __str__(self):
# return etree.tostring(self._root, encoding='unicode')
#
# @property
# def xml(self):
# return self._root
#
# @property
# def company_rut(self):
# rut = self._ctrld_xpath(
# '//RE/text()',
# "Could not parse company RUT in CAF:\n{0}".format(str(self))
# )
# return rut
#
# @property
# def dte_type(self):
# typ = self._ctrld_xpath(
# '//TD/text()',
# "Could not parse document type in CAF:\n{0}".format(str(self))
# )
# return int(typ)
#
# @property
# def dte_id_from(self):
# id_from = self._ctrld_xpath(
# '//RNG/D/text()',
# "Could not parse range <from> in CAF:\n{0}".format(str(self))
# )
# return int(id_from)
#
# @property
# def dte_id_until(self):
# id_until = self._ctrld_xpath(
# '//RNG/H/text()',
# "Could not parse range <until> in CAF:\n{0}".format(str(self))
# )
# return int(id_until)
#
# @property
# def private_key(self):
# return self._ctrld_xpath(
# '//RSASK/text()',
# "Could not parse private key in CAF:\n{0}".format(str(self))
# )
#
# @property
# def public_key(self):
# return self._ctrld_xpath(
# '//RSAPUBK/text()',
# "Could not parse public key in CAF:\n{0}".format(str(self))
# )
#
# def _ctrld_xpath(self, xpath, failmsg):
# values = self._root.xpath(xpath)
#
# if not values:
# raise RuntimeError(failmsg)
# elif len(values) > 1:
# raise RuntimeError("Found more than one values matching "
# "\"{0}\" in:\n{1}".format(xpath, self.xml))
# else:
# return values[0]
. Output only the next line. | dte = xml.wrap_xml(dte) |
Next line prediction: <|code_start|>""" Wrapper around CAF File.
This is currently only a proxy to an internal object from cns.lib.sii.
"""
__all__ = [
"CAFPool"
]
read_rut = lambda raw: int(raw.split('-')[0])
class CAFPool(object):
def __init__(self, dirpath):
self._path = os.path.abspath(os.path.expanduser(dirpath))
self._fnames = [fname for fname in os.listdir(self._path) if os.path.splitext(fname)[-1] == ".xml"]
self._fpaths = [os.path.join(self._path, fname) for fname in self._fnames]
self._trees = [_read_caf(fname) for fname in self._fpaths]
<|code_end|>
. Use current file imports:
(import os
import collections
from lxml import etree
from ..lib import xml
from .CAF import CAF)
and context including class names, function names, or small code snippets from other files:
# Path: src/sii/lib/lib/xml.py
# XML_DECL = lambda enc: b'<?xml version="1.0" encoding="' + bytes(enc, enc) + b'"?>'
# class XML(object):
# def __init__(self, node=None, name=None, text=None, namespaces=None):
# def __repr__(self):
# def __str__(self):
# def __int__(self):
# def __float__(self):
# def __iter__(self):
# def __setattr__(self, name, value):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def __remove__(self):
# def __name__(self):
# def __children__(self):
# def __siblings__(self):
# def __generation__(self):
# def _etree(self):
# def _xml(self):
# def _str(self):
# def _int(self):
# def _float(self):
# def _number(self):
# def _list(self):
# def _has(self, name):
# def create_xml(name, value=None, namespaces=None):
# def read_xml(path):
# def load_xml(xml_string):
# def wrap_xml(xml_etree):
# def dump_etree(xml_node):
# def dump_xml(xml_node, **kwargs):
# def print_xml(xml, file=sys.stdout.buffer, end='\n', encoding='UTF-8'):
# def write_xml(xml, fpath, end='\n', encoding='UTF-8', append=False):
#
# Path: src/sii/lib/types/CAF.py
# class CAF(object):
#
# def __init__(self, caf_xml):
# if isinstance(caf_xml, str):
# self._root = etree.fromstring(caf_xml)
# elif isinstance(caf_xml, etree._Element):
# self._root = caf_xml
# else:
# raise ValueError("Expected XML string or etree.Element as argument")
#
# def __str__(self):
# return etree.tostring(self._root, encoding='unicode')
#
# @property
# def xml(self):
# return self._root
#
# @property
# def company_rut(self):
# rut = self._ctrld_xpath(
# '//RE/text()',
# "Could not parse company RUT in CAF:\n{0}".format(str(self))
# )
# return rut
#
# @property
# def dte_type(self):
# typ = self._ctrld_xpath(
# '//TD/text()',
# "Could not parse document type in CAF:\n{0}".format(str(self))
# )
# return int(typ)
#
# @property
# def dte_id_from(self):
# id_from = self._ctrld_xpath(
# '//RNG/D/text()',
# "Could not parse range <from> in CAF:\n{0}".format(str(self))
# )
# return int(id_from)
#
# @property
# def dte_id_until(self):
# id_until = self._ctrld_xpath(
# '//RNG/H/text()',
# "Could not parse range <until> in CAF:\n{0}".format(str(self))
# )
# return int(id_until)
#
# @property
# def private_key(self):
# return self._ctrld_xpath(
# '//RSASK/text()',
# "Could not parse private key in CAF:\n{0}".format(str(self))
# )
#
# @property
# def public_key(self):
# return self._ctrld_xpath(
# '//RSAPUBK/text()',
# "Could not parse public key in CAF:\n{0}".format(str(self))
# )
#
# def _ctrld_xpath(self, xpath, failmsg):
# values = self._root.xpath(xpath)
#
# if not values:
# raise RuntimeError(failmsg)
# elif len(values) > 1:
# raise RuntimeError("Found more than one values matching "
# "\"{0}\" in:\n{1}".format(xpath, self.xml))
# else:
# return values[0]
. Output only the next line. | self._cafs = [CAF(tree) for tree in self._trees] |
Next line prediction: <|code_start|>""" SII Document Patch.
Contains:
* RUT
* Document Type Name
* Document Serial Number
* SII Branch
* (thermal*mm only)(optional) Logo (path to EPS)
"""
DOC_TYPE_STRINGS = {
33: "FACTURA\\break ELECTRÓNICA",
34: "FACTURA\\break NO AFECTA O EXENTA\\break ELECTRÓNICA",
52: "GUÍA DE DESPACHO\\break ELECTRÓNICA",
56: "NOTA DE DÉBITO\\break ELECTRÓNICA",
61: "NOTA DE CRÉDITO\\break ELECTRÓNICA",
46: "FACTURA DE COMPRA\\break ELECTRÓNICA",
43: "LIQUIDACIÓN FACTURA\\break ELECTRÓNICA",
110: "FACTURA\\break DE EXPORTACIÓN\\break ELECTRÓNICA",
111: "NOTA DE DÉBITO\\break DE EXPORTACIÓN\\break ELECTRÓNICA",
112: "NOTA DE CRÉDITO\\break DE EXPORTACIÓN\\break ELECTRÓNICA"
}
<|code_end|>
. Use current file imports:
(from .TemplateElement import TemplateElement, Resource)
and context including class names, function names, or small code snippets from other files:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
# def resources(self):
# def carta(self) -> str:
# def oficio(self) -> str:
# def thermal80mm(self) -> str:
. Output only the next line. | class SectionSiiPatch(TemplateElement): |
Predict the next line for this snippet: <|code_start|> \\begin{center}
\\large{\\textbf{R.U.T.: %s}}\\break
\\newline
\\large{\\textbf{%s}}\\break
\\newline
\\large{\\textbf{N\\textdegree\\ %s}}
\\end{center}
\\end{mdframed}
\\vspace{0.5em}
\\large{\\textbf{S.I.I. - %s}}
%s
\\end{center}
"""
def __init__(self, rut, dte_type, dte_serial, sii_branch, logo_path=''):
self._rut = rut
self._dte_type = dte_type
self._dte_type_str = DOC_TYPE_STRINGS[dte_type]
self._dte_serial = dte_serial
self._sii_branch = sii_branch
self._logo_path = logo_path
if self._logo_path:
with open(self._logo_path, 'r') as fh:
self._logo_data = fh.read()
@property
def resources(self):
ress = []
if self._logo_path:
<|code_end|>
with the help of current file imports:
from .TemplateElement import TemplateElement, Resource
and context from other files:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
# def resources(self):
# def carta(self) -> str:
# def oficio(self) -> str:
# def thermal80mm(self) -> str:
, which may contain function names, class names, or code. Output only the next line. | ress.append(Resource('logo.eps', self._logo_data)) |
Given snippet: <|code_start|>""" Emitter Section of the Document
Contains:
* Emitter Name (full Version)
* Emitter Activity (Economic Role)
* Emitter HQ Address String
* Emitter emitting Branch String
* (carta/oficio only) Logo (Optional) [takes the path to the logo EPS]
* (thermal*mm only) Emitter Name (short Version)
* (thermal*mm only) Emitter Salesman
* (thermal*mm only) Order Number
* (thermal*mm only) Licence Plate
Comes in two flavours:
* Emitter (emitting company is the same as the one printing the document)
* Provider (emitting company is a provider for the one printing the document)
"""
__all__ = [
'SectionEmitter',
'SectionEmitterProvider'
]
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os.path as path
from .TemplateElement import TemplateElement, Resource
and context:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
# def resources(self):
# def carta(self) -> str:
# def oficio(self) -> str:
# def thermal80mm(self) -> str:
which might include code, classes, or functions. Output only the next line. | class SectionEmitter(TemplateElement): |
Given snippet: <|code_start|> """
def __init__(self, emitter_name_long, emitter_name_short,
emitter_activity,
emitter_hq_addr, emitter_branch_addr, emitter_phone,
order_number='', emitter_salesman='', licence_plate='',
logo_path=''):
self._emitter_name_long = emitter_name_long
self._emitter_name_short = emitter_name_short
self._emitter_activity = emitter_activity
self._emitter_hq_addr = emitter_hq_addr
self._emitter_branch_addr = emitter_branch_addr
self._emitter_phone = emitter_phone
# carta/oficio specifics
self._logo_path = logo_path
if self._logo_path:
with open(self._logo_path, 'rb') as fh:
self._logo_data = fh.read()
# thermal*mm specifics
self._order_number = order_number
self._licence_plate = licence_plate
self._emitter_salesman = emitter_salesman
@property
def resources(self):
ress = []
if self._logo_path:
_, ext = path.splitext(self._logo_path)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os.path as path
from .TemplateElement import TemplateElement, Resource
and context:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
# def resources(self):
# def carta(self) -> str:
# def oficio(self) -> str:
# def thermal80mm(self) -> str:
which might include code, classes, or functions. Output only the next line. | ress.append(Resource('logo' + ext, self._logo_data)) |
Here is a snippet: <|code_start|> raise NotImplementedError
@property
def executable(self):
""" Path to the Executable. """
raise NotImplementedError
def check(self):
""" Check if the Executable can be found and run. This is the point we have to
fail if the program can not be found and/or is presumably not installed on the system.
"""
raise NotImplementedError
def call(self):
""" Call the executable to do something. Here the parameters and behaviours are
implementation specific.
NOTE: This function should default to hiding stdout and stderr from the callable.
"""
raise NotImplementedError
class Ghostscript(SystemCall):
@property
def name(self):
return 'ghostscript'
@property
def executable(self):
<|code_end|>
. Write the next line using the current file imports:
import os
import re
import tempfile
import subprocess
from .shell import which, cd
and context from other files:
# Path: src/sii/lib/lib/shell.py
# def which(program, fail=True):
# """ Sort of replicates the `which` utility.
# """
# is_exe = lambda fp: os.path.isfile(fp) and os.access(fp, os.X_OK)
# locations = [os.path.join(path, program) for path in os.environ["PATH"].split(os.pathsep)]
# found = [loc for loc in locations if is_exe(loc)]
#
# if not found:
# if not fail:
# return False
# else:
# raise RuntimeError("Did not find program: <{0}>".format(program))
# elif len(found) > 1:
# if not fail:
# return False
# else:
# raise RuntimeError("Found more than one instance of the program:\n"
# "{0}".format('\n'.join(found)))
# else:
# return found[0]
#
# class cd(object):
# """ Directory Context Switcher.
#
# Switches directory within the guards, switching back when leaving them.
#
# [NOT THREAD SAFE]
# If multiple threads switch around it looses its way back to the original
# working directory. A possible way would be to take into consideration in
# which thread it is currently being called.
# """
# def __init__(self, dir):
# self.dir = dir
# self.olddir = None
#
# def __enter__(self):
# self._olddir = os.getcwd()
# os.chdir(self.dir)
#
# def __exit__(self, etype, evalue, etraceback):
# if any([etype, evalue, etraceback]):
# traceback.print_exception(etype, evalue, etraceback)
# os.chdir(self._olddir)
, which may include functions, classes, or code. Output only the next line. | return which(self.name) |
Given the code snippet: <|code_start|>""" Disclaimer Section of the Document (you might want to subclass this one)
Contains:
* Company Name
* Company Rut
* Disclaimer of the Company (subclass if you want to change this)
"""
__all__ = [
'SectionDisclaimer',
'SectionDisclaimerDummy'
]
<|code_end|>
, generate the next line using the imports in this file:
from .TemplateElement import TemplateElement
and context (functions, classes, or occasionally code) from other files:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
#
# @property
# def resources(self):
# """ Requires the return of a list of `Resource` object providing
# the filename the template is going to expect, and the data that
# should be inside of it.
#
# In case of none, return an empty list.
# """
# return []
#
# @property
# def carta(self) -> str:
# """ Create TeX Template for printable medium: "US Letter"
# """
# raise NotImplementedError
#
# @property
# def oficio(self) -> str:
# """ Create TeX Template for printable medium: "American Foolscap"
# """
# raise NotImplementedError
#
# @property
# def thermal80mm(self) -> str:
# """ Create TeX Template for printable medium: "Thermal endless 80mm width"
# """
# raise NotImplementedError
. Output only the next line. | class SectionDisclaimer(TemplateElement): |
Predict the next line after this snippet: <|code_start|>""" Creation and management of SII Digital Stamp Utilities
"""
def build_digital_stamp(doc_xml, caf_xml):
""" Builds a digital stamp digest from a DTE.
:param `etree.Element` doc_xml: DTE Document node.
:param `etree.Element` caf_xml: Codigo autorizacion de folios XML.
:return: `etree.Element` of the 'TED' (Timbre Electronico Digital?) node.
"""
<|code_end|>
using the current file's imports:
import re
import copy
import base64
import datetime as dt
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA as SHA1
from Crypto.PublicKey import RSA
from .lib import xml
and any relevant context from other files:
# Path: src/sii/lib/lib/xml.py
# XML_DECL = lambda enc: b'<?xml version="1.0" encoding="' + bytes(enc, enc) + b'"?>'
# class XML(object):
# def __init__(self, node=None, name=None, text=None, namespaces=None):
# def __repr__(self):
# def __str__(self):
# def __int__(self):
# def __float__(self):
# def __iter__(self):
# def __setattr__(self, name, value):
# def __getitem__(self, key):
# def __setitem__(self, key, value):
# def __remove__(self):
# def __name__(self):
# def __children__(self):
# def __siblings__(self):
# def __generation__(self):
# def _etree(self):
# def _xml(self):
# def _str(self):
# def _int(self):
# def _float(self):
# def _number(self):
# def _list(self):
# def _has(self, name):
# def create_xml(name, value=None, namespaces=None):
# def read_xml(path):
# def load_xml(xml_string):
# def wrap_xml(xml_etree):
# def dump_etree(xml_node):
# def dump_xml(xml_node, **kwargs):
# def print_xml(xml, file=sys.stdout.buffer, end='\n', encoding='UTF-8'):
# def write_xml(xml, fpath, end='\n', encoding='UTF-8', append=False):
. Output only the next line. | caf = xml.wrap_xml(caf_xml) |
Based on the snippet: <|code_start|>""" Barcode Section of the Document
Contains:
* Barcode (PDF417)
* Resolution Number
* Resolution Date
"""
<|code_end|>
, predict the immediate next line with the help of imports:
from .TemplateElement import TemplateElement, Resource
from .barcode import PDF417
and context (classes, functions, sometimes code) from other files:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
# def resources(self):
# def carta(self) -> str:
# def oficio(self) -> str:
# def thermal80mm(self) -> str:
#
# Path: src/sii/lib/printing/barcode/PDF417.py
# class PDF417(Barcode):
# """0 0 moveto <{hexdata}> ({options}) /pdf417 /uk.co.terryburton.bwipp findresource exec"""
# # % 0 -10 rmoveto (PDF417) show
#
# def __init__(self, data, rows=None, columns=None):
# self.data = data
# self.data_hex = binascii.hexlify(data.encode('ISO-8859-1')).decode('utf8')
# self.rows = rows
# self.columns = columns
#
# @property
# def ps(self):
# options = []
#
# if self.rows:
# options.append('rows={0}'.format(self.rows))
#
# if self.columns:
# options.append('columns={0}'.format(self.columns))
#
# # We append the pdf417 call onto the read in library and return
# ps_cmd = '\n\n'
# ps_cmd += self.__doc__.format(
# hexdata = self.data_hex,
# options = ','.join(options)
# )
#
# return self.__lib__ + ps_cmd
#
# @property
# def eps(self):
# return self._eps()
#
# @property
# def eps_filepath(self):
# return self._eps(return_path=True)
#
# def _eps(self, return_path=False):
# tmp = tempfile.TemporaryDirectory()
# ps_fname = os.path.join(tmp.name, 'barcode.ps')
# eps_fname = os.path.join(tmp.name, 'barcode.eps')
# result = None
#
# with open(ps_fname, 'w') as fh:
# fh.write(self.ps)
#
# converter = syscall.Ps2Eps()
# converter.call(ps_fname)
#
# if return_path is True:
# return eps_fname
# else:
# with open(eps_fname, 'rb') as fh:
# result = fh.read()
#
# tmp.cleanup()
# return result
. Output only the next line. | class SectionBarcode(TemplateElement): |
Using the snippet: <|code_start|>Contains:
* Barcode (PDF417)
* Resolution Number
* Resolution Date
"""
class SectionBarcode(TemplateElement):
"""
%% -----------------------------------------------------------------
%% SECTION - Barcode
%% -----------------------------------------------------------------
\\begin{center}
\\includegraphics[width=%s\\textwidth]{barcode.eps} \\\\
\\scriptsize{
Timbre Electrónico SII \\\\
Res. %s del %s - Verifique documento: www.sii.cl
}
\\end{center}
"""
def __init__(self, data, resolution_number, resolution_datestr):
self._data = data
self._res_number = resolution_number
self._res_datestr = resolution_datestr
self._barcode = None
@property
def resources(self):
ress = []
<|code_end|>
, determine the next line of code. You have imports:
from .TemplateElement import TemplateElement, Resource
from .barcode import PDF417
and context (class names, function names, or code) available:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
# def resources(self):
# def carta(self) -> str:
# def oficio(self) -> str:
# def thermal80mm(self) -> str:
#
# Path: src/sii/lib/printing/barcode/PDF417.py
# class PDF417(Barcode):
# """0 0 moveto <{hexdata}> ({options}) /pdf417 /uk.co.terryburton.bwipp findresource exec"""
# # % 0 -10 rmoveto (PDF417) show
#
# def __init__(self, data, rows=None, columns=None):
# self.data = data
# self.data_hex = binascii.hexlify(data.encode('ISO-8859-1')).decode('utf8')
# self.rows = rows
# self.columns = columns
#
# @property
# def ps(self):
# options = []
#
# if self.rows:
# options.append('rows={0}'.format(self.rows))
#
# if self.columns:
# options.append('columns={0}'.format(self.columns))
#
# # We append the pdf417 call onto the read in library and return
# ps_cmd = '\n\n'
# ps_cmd += self.__doc__.format(
# hexdata = self.data_hex,
# options = ','.join(options)
# )
#
# return self.__lib__ + ps_cmd
#
# @property
# def eps(self):
# return self._eps()
#
# @property
# def eps_filepath(self):
# return self._eps(return_path=True)
#
# def _eps(self, return_path=False):
# tmp = tempfile.TemporaryDirectory()
# ps_fname = os.path.join(tmp.name, 'barcode.ps')
# eps_fname = os.path.join(tmp.name, 'barcode.eps')
# result = None
#
# with open(ps_fname, 'w') as fh:
# fh.write(self.ps)
#
# converter = syscall.Ps2Eps()
# converter.call(ps_fname)
#
# if return_path is True:
# return eps_fname
# else:
# with open(eps_fname, 'rb') as fh:
# result = fh.read()
#
# tmp.cleanup()
# return result
. Output only the next line. | ress.append(Resource('barcode.eps', self._eps)) |
Given snippet: <|code_start|> @property
def carta(self):
tex = self.__doc__ % (
0.9,
self._res_number,
self._res_datestr
)
return tex
@property
def oficio(self):
tex = self.__doc__ % (
0.9,
self._res_number,
self._res_datestr
)
return tex
@property
def thermal80mm(self):
tex = self.__doc__ % (
1.0,
self._res_number,
self._res_datestr
)
return tex
@property
def _eps(self):
if not self._barcode:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .TemplateElement import TemplateElement, Resource
from .barcode import PDF417
and context:
# Path: src/sii/lib/printing/TemplateElement.py
# class TemplateElement(object):
# def resources(self):
# def carta(self) -> str:
# def oficio(self) -> str:
# def thermal80mm(self) -> str:
#
# Path: src/sii/lib/printing/barcode/PDF417.py
# class PDF417(Barcode):
# """0 0 moveto <{hexdata}> ({options}) /pdf417 /uk.co.terryburton.bwipp findresource exec"""
# # % 0 -10 rmoveto (PDF417) show
#
# def __init__(self, data, rows=None, columns=None):
# self.data = data
# self.data_hex = binascii.hexlify(data.encode('ISO-8859-1')).decode('utf8')
# self.rows = rows
# self.columns = columns
#
# @property
# def ps(self):
# options = []
#
# if self.rows:
# options.append('rows={0}'.format(self.rows))
#
# if self.columns:
# options.append('columns={0}'.format(self.columns))
#
# # We append the pdf417 call onto the read in library and return
# ps_cmd = '\n\n'
# ps_cmd += self.__doc__.format(
# hexdata = self.data_hex,
# options = ','.join(options)
# )
#
# return self.__lib__ + ps_cmd
#
# @property
# def eps(self):
# return self._eps()
#
# @property
# def eps_filepath(self):
# return self._eps(return_path=True)
#
# def _eps(self, return_path=False):
# tmp = tempfile.TemporaryDirectory()
# ps_fname = os.path.join(tmp.name, 'barcode.ps')
# eps_fname = os.path.join(tmp.name, 'barcode.eps')
# result = None
#
# with open(ps_fname, 'w') as fh:
# fh.write(self.ps)
#
# converter = syscall.Ps2Eps()
# converter.call(ps_fname)
#
# if return_path is True:
# return eps_fname
# else:
# with open(eps_fname, 'rb') as fh:
# result = fh.read()
#
# tmp.cleanup()
# return result
which might include code, classes, or functions. Output only the next line. | pdf417 = PDF417(self._data) |
Given the code snippet: <|code_start|>""" SII WebService Authentication Seed.
"""
__all__ = [
'Seed'
]
class Seed(object):
SEED_OK = 0
SEED_LINE_ERR = -1
SEED_RETURN_ERR = -2
def __init__(self, sii_host="https://palena.sii.cl/DTEWS/CrSeed.jws?wsdl"):
self.sii_host = sii_host
self.sii_soap = Client(self.sii_host)
self._status = None
self._seed = None
self._error = None
self._seed_xml = self._fetch_seed()
self._seed_etree = self._prepare_seed_xml(self._seed_xml)
self._seed_values = self._parse_seed_xml(self._seed_etree)
def _fetch_seed(self):
<|code_end|>
, generate the next line using the imports in this file:
import re
from suds.client import Client
from lxml import etree
from .helpers import with_retry
and context (functions, classes, or occasionally code) from other files:
# Path: src/sii/lib/ptcl/helpers.py
# def with_retry(func, count=RETRIES_MAX, ival=RETRIES_SLEEP):
# retries = 0
# while retries < count:
# try:
# return func()
# except Exception as exc:
# code, msg = exc.args[0]
#
# if code == 503:
# retries += 1
# time.sleep(ival)
# continue
# else:
# raise
. Output only the next line. | return with_retry(lambda: self.sii_soap.service.getSeed()) |
Continue the code snippet: <|code_start|># module. Defaults to `os.getcwd()`.
# depends_files -- list of str, paths to Fortran files that should be
# checked for changes when compiling the final module.
# **kwargs -- Any configuration options relevant to this
# compilation passed as keyword arguments,
# see `help(fmodpy)` for more information, or
# run again with 'verbose=True' to see options.
#
# KEYWORD OPTIONS (some important ones, more in 'fmodpy.configure'):
# autocompile -- bool, whether or not automatic compilation of
# dependancies should be attempted.
# rebuild -- bool, True if you want to rebuild even though the
# `input_fortran_file` has not been modified
# since the last compilation of the module.
# wrap -- bool, True if the wrapper code should be generated
# even if wrapper code already exists in `build_dir`,
# use False when manually modifying generated wrappers.
# show_warnings -- bool, if miscellaneous warnings should be printed.
#
def fimport(input_fortran_file, name=None, build_dir=None,
output_dir=None, depends_files=None, **kwargs):
# Import parsing functions.
# Import "os" for os operations and "importlib" for importing a module.
# Configure this runtime of fmodpy
pre_config = load_config() # <- gets the configuration dictionary
if (len(kwargs) > 0): load_config(**kwargs) # <- assigns given configuration
# Import some locally used settings.
# Print the configuration (when verbose).
<|code_end|>
. Use current file imports:
from fmodpy.config import fmodpy_print as print
from fmodpy.parsing import after_dot, before_dot, legal_module_name
from fmodpy.config import run, load_config, \
FORT_WRAPPER_EXT, PYTHON_WRAPPER_EXT, PY_EXT
from fmodpy.config import wrap, rebuild, autocompile, \
f_compiler, f_compiler_args, delete_destination
from fmodpy.exceptions import NameError
from fmodpy.parsing import simplify_fortran_file, after_dot
from fmodpy.parsing.file import Fortran
from fmodpy.exceptions import ParseError
from fmodpy.config import end_is_named
from fmodpy.parsing import after_dot
from fmodpy.config import run, f_compiler, f_compiler_args, \
GET_SIZE_PROG_FILE, GET_SIZE_EXEC_FILE
from fmodpy.exceptions import CompileError
from tempfile import TemporaryDirectory
from fmodpy.config import home_directory, config_file
from builtins import print
from fmodpy.config import load_config
import os, sys, shutil, importlib
import os
import os
import os
import os, sys, pkgutil, importlib
import os, time, builtins
import fmodpy
and context (classes, functions, or code) from other files:
# Path: fmodpy/config.py
# def fmodpy_print(*args, **kwargs):
# # Skip all print statements if verbosity is off.
# global verbose
# if (not verbose): return
# # Set the log file.
# global log_file
# if (log_file == os.devnull):
# import sys
# log_file = sys.stdout
# # Add information about where the bit is printed from, if turned on.
# global debug_line_numbers
# if debug_line_numbers:
# import inspect
# # Add the calling file and line to the print statement
# calling_function = inspect.stack()[1]
# calling_file = calling_function.filename
# calling_file = os.path.basename(calling_file)
# calling_line = calling_function.lineno
# args += (f'({calling_file} line {calling_line})',)
# # For all print operations, force a "flush" output.
# kwargs["file"] = log_file
# kwargs["flush"] = True
# print(*args, **kwargs)
. Output only the next line. | print() |
Given the code snippet: <|code_start|># Making this module accessible by being called directly from command line.
# Import all of the fmodpy module
if len(sys.argv) < 2:
help(fmodpy)
exit()
else:
file_path = os.path.abspath(sys.argv[1])
# Pretty error handling when this file is executed directly
def custom_excepthook(exc_type, value, tb):
l = ''.join(traceback.format_exception(exc_type, value, tb))
print(l)
sys.excepthook = custom_excepthook
# Read command line arguments after the path to the source file.
command_line_args = {}
for arg in sys.argv[2:]:
if ("=" not in arg):
raise(UnrecognizedConfiguration(
f"Command line argument {str([arg])[1:-1]} must be formatted"+
" as '<setting>=<value>', but no '=' found."))
first_equals = arg.index('=')
setting, value = arg[:first_equals], arg[first_equals+1:]
command_line_args[setting] = value
# Call "fimport" providing the given arguments as configurations.
<|code_end|>
, generate the next line using the imports in this file:
import os, sys, traceback
import fmodpy
from .fmodpy import __doc__, fimport, configure
from fmodpy.exceptions import UnrecognizedConfiguration
and context (functions, classes, or occasionally code) from other files:
# Path: fmodpy/fmodpy.py
# def fimport(input_fortran_file, name=None, build_dir=None,
# output_dir=None, depends_files=None, **kwargs):
# def make_wrapper(source_file, build_dir, module_name):
# def autocompile_files(build_dir, target_file=None):
# def prepare_build_directory(source_dir, build_dir):
# def should_rebuild_module(source_path, module_name, module_directory):
# def configure(*to_delete, **to_save):
. Output only the next line. | fimport(file_path, **command_line_args) |
Given the code snippet: <|code_start|> for attr in dir(self):
# Skip hidden attributes.
if (attr[:1] == "_"): continue
# TODO: Come up with better general way to handle these special cases.
if (attr in {"c_type", "c_type_array", "py_type"}): continue
value = getattr(self, attr)
# Skip executable attributes.
if (hasattr(value, "__call__")): continue
# If this attribute has a ".copy" function, then use it.
if (hasattr(value, "copy") and hasattr(value.copy, "__call__")):
value = value.copy()
# Set the attribute in the new argument.
setattr(arg, attr, value)
return arg
# Default initialization, process standard Fortran specifications.
# Expects to be given list of strings that comes from a declaration
# line, but with the ":: name1, ..." stripped off the end.
def __init__(self, line, parent=None):
if (len(line) == 0): raise(NotImplementedError)
# Make sure this line matches the expected type.
if (line.pop(0) != self.type): raise(NotImplementedError)
# Set the parent.
self.parent = parent
# If the line is empty now, then we're done (use defaults).
if (len(line) == 0): return
# Parse the remainder of the declaration line.
#
# Get the KIND declaration, if it was given.
<|code_end|>
, generate the next line using the imports in this file:
from . import pop_group
import re # <- use regular expressions to capture SIZE(...) syntax.
import warnings
and context (functions, classes, or occasionally code) from other files:
# Path: fmodpy/parsing/util.py
# def pop_group(list_str, open_with="(", close_with=")"):
# group = []
# # Get the first element of the string (open the group, if matched).
# if ((len(list_str) > 0) and (list_str[0] == open_with)):
# list_str.pop(0)
# num_open = 1
# else: num_open = 0
# # Search until the started group is closed.
# while (num_open > 0):
# # TOOD: Might need to raise parsing error when this happens.
# if (len(list_str) == 0): raise(NotImplementedError)
# next_value = list_str.pop(0)
# if (next_value == open_with): num_open += 1
# elif (next_value == close_with): num_open -= 1
# if (num_open != 0): group.append(next_value)
# # Return the captured portion and the remaining.
# return group, list_str
. Output only the next line. | group, line = pop_group(line) |
Using the snippet: <|code_start|> continue
# Parse the contained objects out of the file.
for (parser, name) in self.can_contain:
pre_length = len(list_of_lines)
instances = parser(list_of_lines, comments, self)
length = pre_length - len(list_of_lines)
self.lines += length
# If instances were found, we have a match, break.
if (len(instances) > 0):
for inst in instances:
getattr(self,name).append( inst )
comments = ""
break
# If any parsing was done otherwise, then it was a match, break.
elif (length > 0): break
else:
# Look for things that will be parsed, but ignored.
for parser in self.will_ignore:
pre_length = len(list_of_lines)
instances = parser(list_of_lines, comments, self)
length = len(list_of_lines) - pre_length
self.lines += length
if ((len(instances) > 0) or (length > 0)): break
else:
# This is an unknown block of code.
if self.allowed_unknown:
comments = ""
# This is an un-identifiable line, it belongs ???
print(f" skipping line '{list_of_lines.pop(0)}'")
else:
<|code_end|>
, determine the next line of code. You have imports:
from . import class_name
from fmodpy.config import fmodpy_print as print
from fmodpy.config import end_is_named
from fmodpy.exceptions import ParseError
from fmodpy.exceptions import ParseError
from . import class_name
from fmodpy.exceptions import ParseError
from fmodpy.exceptions import ParseError
from fmodpy.parsing.util import wrap_long_lines
from fmodpy.config import run, f_compiler, \
GET_SIZE_PROG_FILE, GET_SIZE_EXEC_FILE
from fmodpy.exceptions import CompileError
import os
and context (class names, function names, or code) available:
# Path: fmodpy/parsing/util.py
# def class_name(cls): return str(type(cls)).split(".")[-1].split("'")[0]
. Output only the next line. | raise(ParseError(f"\n\nEncountered unrecognized line while parsing {class_name(self)}:\n\n {str([list_of_lines.pop(0)])[1:-1]}\n\n")) |
Predict the next line for this snippet: <|code_start|>
class MethodMap(object):
def __init__(self, decorator=Identity):
super(MethodMap, self).__init__()
self._map = {}
self._decorate = decorator
def registering(self, key):
return functools.partial(self.register, key)
def register(self, key, value):
self._map[key] = self._decorate(value)
return value
def __get__(self, instance, owner):
return Binder(self._map, instance)
_NOTHING = object()
class Binder(object):
def __init__(self, map, instance):
super(Binder, self).__init__()
self._map = map
self._instance = instance
def get(self, key, default=None):
if key not in self._map:
return default
function = self._map[key]
if isinstance(function, staticmethod):
<|code_end|>
with the help of current file imports:
import functools
import platform
from .functors import Identity
from .python_compat import get_underlying_function
from .python_compat import create_bound_method
and context from other files:
# Path: infi/pyutils/functors/identity.py
# class _Identity(Functor):
# def __call__(self, obj):
# def __repr__(self):
#
# Path: infi/pyutils/python_compat.py
# def get_underlying_function(f):
# if _IS_BELOW_PYTHON_2_7 and (isinstance(f, classmethod) or isinstance(f, staticmethod)):
# return _get_underlying_classmethod_function(f)
# return f.__func__
#
# Path: infi/pyutils/python_compat.py
# def create_bound_method(func, self):
# return types.MethodType(func, self, type(self))
, which may contain function names, class names, or code. Output only the next line. | return get_underlying_function(function) |
Here is a snippet: <|code_start|> def __init__(self, decorator=Identity):
super(MethodMap, self).__init__()
self._map = {}
self._decorate = decorator
def registering(self, key):
return functools.partial(self.register, key)
def register(self, key, value):
self._map[key] = self._decorate(value)
return value
def __get__(self, instance, owner):
return Binder(self._map, instance)
_NOTHING = object()
class Binder(object):
def __init__(self, map, instance):
super(Binder, self).__init__()
self._map = map
self._instance = instance
def get(self, key, default=None):
if key not in self._map:
return default
function = self._map[key]
if isinstance(function, staticmethod):
return get_underlying_function(function)
if isinstance(function, classmethod):
returned_self = self._instance
function = get_underlying_function(function)
else:
returned_self = self._instance.__class__
<|code_end|>
. Write the next line using the current file imports:
import functools
import platform
from .functors import Identity
from .python_compat import get_underlying_function
from .python_compat import create_bound_method
and context from other files:
# Path: infi/pyutils/functors/identity.py
# class _Identity(Functor):
# def __call__(self, obj):
# def __repr__(self):
#
# Path: infi/pyutils/python_compat.py
# def get_underlying_function(f):
# if _IS_BELOW_PYTHON_2_7 and (isinstance(f, classmethod) or isinstance(f, staticmethod)):
# return _get_underlying_classmethod_function(f)
# return f.__func__
#
# Path: infi/pyutils/python_compat.py
# def create_bound_method(func, self):
# return types.MethodType(func, self, type(self))
, which may include functions, classes, or code. Output only the next line. | return create_bound_method( |
Next line prediction: <|code_start|>
class OriginalObject(object):
def __init__(self):
super(OriginalObject, self).__init__()
self.public_attr = 54321
self._private_attr = 12345
def method(self):
return 666
@classmethod
def classmethod(cls):
return 777
@staticmethod
def staticmethod():
return 888
def __repr__(self):
raise NotImplementedError() # pragma: no cover
def __str__(self):
raise NotImplementedError() # pragma: no cover
class ReprifyTest(TestCase):
def setUp(self):
super(ReprifyTest, self).setUp()
self.original_object = OriginalObject()
self.REPR = "some repr here"
self.STR = "some str here"
<|code_end|>
. Use current file imports:
(from .test_utils import TestCase
from infi.pyutils import Reprify)
and context including class names, function names, or small code snippets from other files:
# Path: infi/pyutils/misc.py
# class Reprify(object):
# def __init__(self, original, str=None, repr=None):
# super(Reprify, self).__init__()
# self._strify__original = original
# if repr is None:
# repr = str
# if str is None:
# str = repr
# self._strify__str = str
# self._strify__repr = repr
# def __getattribute__(self, attr):
# if attr.startswith('_strify__'):
# return super(Reprify, self).__getattribute__(attr)
# return getattr(self._strify__original, attr)
# def __repr__(self):
# if self._strify__repr is not None:
# return self._strify__repr
# return repr(self._strify__original)
# def __str__(self):
# if self._strify__str is not None:
# return self._strify__str
# return str(self._strify__originalx)
. Output only the next line. | self.obj = Reprify(self.original_object, str=self.STR, repr=self.REPR) |
Using the snippet: <|code_start|>
def listdir_patch(path):
return [path]
TEST_PATH = "__infi_test"
class EnumTestCase(TestCase):
def test__patch_context(self):
<|code_end|>
, determine the next line of code. You have imports:
import os
from infi.pyutils.patch import patch
from .test_utils import TestCase
and context (class names, function names, or code) available:
# Path: infi/pyutils/patch.py
# class patch(object):
# """ context manager for patching """
# def __init__(self, module, name, replacement):
# self._module = module
# self._name = name
# self._replacement = replacement
#
# def __enter__(self):
# monkey_patch(self._module, self._name, self._replacement)
#
# def __exit__ (self, exc_type, exc_value, exc_tb):
# unmonkey_patch(self._module, self._name)
. Output only the next line. | with patch(os, "listdir", listdir_patch): |
Based on the snippet: <|code_start|> def get_header(self, name):
...
class UserAuth(object):
def is_authenticated(self):
auth_header = self.get_header("Authorization")
...
Now, to add the mixin to the object we'll do::
request = HttpRequest(...)
# Find out that there's a need for authentication
...
install_mixin(request, UserAuth)
"""
__all__ = [ "install_mixin", "install_mixin_if" ]
def install_mixin_if(obj, mixin, condition):
"""
Same as install_mixin, but installs the mixin only if *condition* evaluates to truth.
"""
if not condition:
return
install_mixin(obj, mixin)
def install_mixin(obj, mixin):
obj.__class__ = _replace_class(type(obj), mixin)
<|code_end|>
, predict the immediate next line with the help of imports:
from .lazy import cached_function
and context (classes, functions, sometimes code) from other files:
# Path: infi/pyutils/lazy.py
# def cached_function(func):
# """Decorator that caches a function's return value each time it is called.
# If called later with the same arguments, the cached value is returned, and
# not re-evaluated.
# """
# @wraps(func)
# def callee(*args, **kwargs):
# key = _get_function_cache_entry(args, kwargs)
# try:
# value = func._cache[key]
# except (KeyError, AttributeError):
# value = func(*args, **kwargs)
# if not hasattr(func, '_cache'):
# setattr(func, '_cache', {})
# func._cache[key] = value
# return value
#
# callee._cache = func._cache = dict()
# callee.__cached_method__ = True
# return callee
. Output only the next line. | @cached_function |
Based on the snippet: <|code_start|> def get_self_arg_name(self):
if self.is_bound_method() and len(self._args) > 0:
return self._args[0].name
return None
def get_arg_names(self):
return (arg.name for arg in self.get_args())
def get_required_arg_names(self):
return set(arg.name for arg in self.get_args() if not arg.has_default())
def has_variable_args(self):
return self._varargs_name is not None
def has_variable_kwargs(self):
return self._kwargs_name is not None
def get_normalized_args(self, args, kwargs):
returned = {}
self._update_normalized_positional_args(returned, args)
self._update_normalized_kwargs(returned, kwargs)
self._check_missing_arguments(returned)
self._check_unknown_arguments(returned)
return returned
def _update_normalized_positional_args(self, returned, args):
argument_names = list(self.get_arg_names())
argument_names.extend(range(len(args) - self.get_num_args()))
for arg_name, given_arg in zip(argument_names, args):
returned[arg_name] = given_arg
def _update_normalized_kwargs(self, returned, kwargs):
for arg_name, arg in iteritems(kwargs):
if not isinstance(arg_name, basestring):
raise InvalidKeywordArgument("Invalid keyword argument %r" % (arg_name,))
if arg_name in returned:
<|code_end|>
, predict the immediate next line with the help of imports:
from types import MethodType
from .exceptions import (
SignatureException,
InvalidKeywordArgument,
UnknownArguments,
MissingArguments,
)
from numbers import Number
import copy
import inspect
import itertools
import platform
and context (classes, functions, sometimes code) from other files:
# Path: infi/pyutils/exceptions.py
# class SignatureException(ReflectionException):
# pass
#
# class InvalidKeywordArgument(ReflectionException):
# pass
#
# class UnknownArguments(SignatureException):
# pass
#
# class MissingArguments(SignatureException):
# pass
. Output only the next line. | raise SignatureException("%s is given more than once to %s" % (arg_name, self.func_name)) |
Predict the next line after this snippet: <|code_start|> returned = max(0, returned - 1)
return returned
def get_self_arg_name(self):
if self.is_bound_method() and len(self._args) > 0:
return self._args[0].name
return None
def get_arg_names(self):
return (arg.name for arg in self.get_args())
def get_required_arg_names(self):
return set(arg.name for arg in self.get_args() if not arg.has_default())
def has_variable_args(self):
return self._varargs_name is not None
def has_variable_kwargs(self):
return self._kwargs_name is not None
def get_normalized_args(self, args, kwargs):
returned = {}
self._update_normalized_positional_args(returned, args)
self._update_normalized_kwargs(returned, kwargs)
self._check_missing_arguments(returned)
self._check_unknown_arguments(returned)
return returned
def _update_normalized_positional_args(self, returned, args):
argument_names = list(self.get_arg_names())
argument_names.extend(range(len(args) - self.get_num_args()))
for arg_name, given_arg in zip(argument_names, args):
returned[arg_name] = given_arg
def _update_normalized_kwargs(self, returned, kwargs):
for arg_name, arg in iteritems(kwargs):
if not isinstance(arg_name, basestring):
<|code_end|>
using the current file's imports:
from types import MethodType
from .exceptions import (
SignatureException,
InvalidKeywordArgument,
UnknownArguments,
MissingArguments,
)
from numbers import Number
import copy
import inspect
import itertools
import platform
and any relevant context from other files:
# Path: infi/pyutils/exceptions.py
# class SignatureException(ReflectionException):
# pass
#
# class InvalidKeywordArgument(ReflectionException):
# pass
#
# class UnknownArguments(SignatureException):
# pass
#
# class MissingArguments(SignatureException):
# pass
. Output only the next line. | raise InvalidKeywordArgument("Invalid keyword argument %r" % (arg_name,)) |
Based on the snippet: <|code_start|> def get_normalized_args(self, args, kwargs):
returned = {}
self._update_normalized_positional_args(returned, args)
self._update_normalized_kwargs(returned, kwargs)
self._check_missing_arguments(returned)
self._check_unknown_arguments(returned)
return returned
def _update_normalized_positional_args(self, returned, args):
argument_names = list(self.get_arg_names())
argument_names.extend(range(len(args) - self.get_num_args()))
for arg_name, given_arg in zip(argument_names, args):
returned[arg_name] = given_arg
def _update_normalized_kwargs(self, returned, kwargs):
for arg_name, arg in iteritems(kwargs):
if not isinstance(arg_name, basestring):
raise InvalidKeywordArgument("Invalid keyword argument %r" % (arg_name,))
if arg_name in returned:
raise SignatureException("%s is given more than once to %s" % (arg_name, self.func_name))
returned[arg_name] = arg
def _check_missing_arguments(self, args_dict):
required_arguments = self.get_required_arg_names()
missing_arguments = required_arguments - set(args_dict)
if missing_arguments:
raise MissingArguments("The following arguments were not specified: %s" % ",".join(map(repr, missing_arguments)))
def _check_unknown_arguments(self, args_dict):
positional_arg_count = len([arg_name for arg_name in args_dict if isinstance(arg_name, Number)])
num_args = self.get_num_args()
if positional_arg_count and not self.has_variable_args():
<|code_end|>
, predict the immediate next line with the help of imports:
from types import MethodType
from .exceptions import (
SignatureException,
InvalidKeywordArgument,
UnknownArguments,
MissingArguments,
)
from numbers import Number
import copy
import inspect
import itertools
import platform
and context (classes, functions, sometimes code) from other files:
# Path: infi/pyutils/exceptions.py
# class SignatureException(ReflectionException):
# pass
#
# class InvalidKeywordArgument(ReflectionException):
# pass
#
# class UnknownArguments(SignatureException):
# pass
#
# class MissingArguments(SignatureException):
# pass
. Output only the next line. | raise UnknownArguments("%s receives %s positional arguments (%s specified)" % (self.func_name, num_args, num_args + positional_arg_count)) |
Next line prediction: <|code_start|> return set(arg.name for arg in self.get_args() if not arg.has_default())
def has_variable_args(self):
return self._varargs_name is not None
def has_variable_kwargs(self):
return self._kwargs_name is not None
def get_normalized_args(self, args, kwargs):
returned = {}
self._update_normalized_positional_args(returned, args)
self._update_normalized_kwargs(returned, kwargs)
self._check_missing_arguments(returned)
self._check_unknown_arguments(returned)
return returned
def _update_normalized_positional_args(self, returned, args):
argument_names = list(self.get_arg_names())
argument_names.extend(range(len(args) - self.get_num_args()))
for arg_name, given_arg in zip(argument_names, args):
returned[arg_name] = given_arg
def _update_normalized_kwargs(self, returned, kwargs):
for arg_name, arg in iteritems(kwargs):
if not isinstance(arg_name, basestring):
raise InvalidKeywordArgument("Invalid keyword argument %r" % (arg_name,))
if arg_name in returned:
raise SignatureException("%s is given more than once to %s" % (arg_name, self.func_name))
returned[arg_name] = arg
def _check_missing_arguments(self, args_dict):
required_arguments = self.get_required_arg_names()
missing_arguments = required_arguments - set(args_dict)
if missing_arguments:
<|code_end|>
. Use current file imports:
(from types import MethodType
from .exceptions import (
SignatureException,
InvalidKeywordArgument,
UnknownArguments,
MissingArguments,
)
from numbers import Number
import copy
import inspect
import itertools
import platform)
and context including class names, function names, or small code snippets from other files:
# Path: infi/pyutils/exceptions.py
# class SignatureException(ReflectionException):
# pass
#
# class InvalidKeywordArgument(ReflectionException):
# pass
#
# class UnknownArguments(SignatureException):
# pass
#
# class MissingArguments(SignatureException):
# pass
. Output only the next line. | raise MissingArguments("The following arguments were not specified: %s" % ",".join(map(repr, missing_arguments))) |
Using the snippet: <|code_start|> iterator = enumerate(itertools.chain(iter(collection), [_NOTHING]))
prefetched = _NOTHING
while True:
index, element = next(iterator) if prefetched is _NOTHING else prefetched
if element is _NOTHING:
break
prefetched = next(iterator)
yield Iteration(
counter0 = index,
counter1 = index + 1,
first = (index == 0),
last = prefetched[1] is _NOTHING
), element
class Iteration(object):
def __init__(self, counter0, counter1, first, last):
super(Iteration, self).__init__()
self.counter0 = counter0
self.counter1 = counter1
self.first = first
self.last = last
def renumerate(seq):
"""Like enumerate(), only in reverse order. Useful for filtering a list in place"""
if isinstance(seq, list) or isinstance(seq, tuple):
return _renumerate_lazy(seq)
return _renumerate_strict(seq)
def _renumerate_lazy(seq):
<|code_end|>
, determine the next line of code. You have imports:
import itertools
from .python_compat import xrange
and context (class names, function names, or code) available:
# Path: infi/pyutils/python_compat.py
# _PYTHON_VERSION = platform.python_version()
# _IS_PYTHON_3 = _PYTHON_VERSION >= '3'
# _IS_BELOW_PYTHON_2_7 = _PYTHON_VERSION < '2.7'
# def get_underlying_function(f):
# def _get_underlying_classmethod_function(f):
# def create_bound_method(func, self):
# class TemporaryClass(object):
. Output only the next line. | for index in xrange(len(seq)-1, -1, -1): |
Here is a snippet: <|code_start|>
# no named tuples for python 2.5 compliance...
class ExpectedArg(object):
def __init__(self, name, has_default, default=None):
self.name = name
self.has_default = has_default
self.default = default
class SignatureTest(TestCase):
def _assert_argument_names(self, sig, names):
self.assertEquals([arg.name for arg in sig._args], names)
def _test_function_signature(self,
func,
expected_signature,
has_varargs=False,
has_varkwargs=False
):
<|code_end|>
. Write the next line using the current file imports:
import time
from .test_utils import TestCase
from infi.pyutils.function_signature import FunctionSignature
from infi.pyutils.exceptions import SignatureException, InvalidKeywordArgument, UnknownArguments, MissingArguments
and context from other files:
# Path: infi/pyutils/function_signature.py
# class FunctionSignature(object):
# def __init__(self, func):
# super(FunctionSignature, self).__init__()
# self.func = func
# self.func_name = func.__name__
# self._build_arguments()
# def is_bound_method(self):
# return is_bound_method(self.func)
# def is_class_method(self):
# return is_class_method(self.func)
# def _iter_args_and_defaults(self, args, defaults):
# defaults = [] if defaults is None else defaults
# filled_defaults = itertools.chain(itertools.repeat(_NO_DEFAULT, len(args) - len(defaults)), defaults)
# return izip(args, filled_defaults)
#
# def _build_arguments(self):
# self._args = []
# try:
# args, varargs_name, kwargs_name, defaults = inspect.getargspec(self.func)
# except TypeError:
# args = []
# varargs_name = 'args'
# kwargs_name = 'kwargs'
# defaults = []
# for arg_name, default in self._iter_args_and_defaults(args, defaults):
# self._args.append(Argument(arg_name, default))
# self._varargs_name = varargs_name
# self._kwargs_name = kwargs_name
# def get_args(self):
# return itertools.islice(self._args, 1 if self.is_bound_method() else 0, None)
# def get_num_args(self):
# returned = len(self._args)
# if self.is_bound_method():
# returned = max(0, returned - 1)
# return returned
# def get_self_arg_name(self):
# if self.is_bound_method() and len(self._args) > 0:
# return self._args[0].name
# return None
# def get_arg_names(self):
# return (arg.name for arg in self.get_args())
# def get_required_arg_names(self):
# return set(arg.name for arg in self.get_args() if not arg.has_default())
# def has_variable_args(self):
# return self._varargs_name is not None
# def has_variable_kwargs(self):
# return self._kwargs_name is not None
# def get_normalized_args(self, args, kwargs):
# returned = {}
# self._update_normalized_positional_args(returned, args)
# self._update_normalized_kwargs(returned, kwargs)
# self._check_missing_arguments(returned)
# self._check_unknown_arguments(returned)
# return returned
# def _update_normalized_positional_args(self, returned, args):
# argument_names = list(self.get_arg_names())
# argument_names.extend(range(len(args) - self.get_num_args()))
# for arg_name, given_arg in zip(argument_names, args):
# returned[arg_name] = given_arg
#
# def _update_normalized_kwargs(self, returned, kwargs):
# for arg_name, arg in iteritems(kwargs):
# if not isinstance(arg_name, basestring):
# raise InvalidKeywordArgument("Invalid keyword argument %r" % (arg_name,))
# if arg_name in returned:
# raise SignatureException("%s is given more than once to %s" % (arg_name, self.func_name))
# returned[arg_name] = arg
#
# def _check_missing_arguments(self, args_dict):
# required_arguments = self.get_required_arg_names()
# missing_arguments = required_arguments - set(args_dict)
# if missing_arguments:
# raise MissingArguments("The following arguments were not specified: %s" % ",".join(map(repr, missing_arguments)))
# def _check_unknown_arguments(self, args_dict):
# positional_arg_count = len([arg_name for arg_name in args_dict if isinstance(arg_name, Number)])
# num_args = self.get_num_args()
# if positional_arg_count and not self.has_variable_args():
# raise UnknownArguments("%s receives %s positional arguments (%s specified)" % (self.func_name, num_args, num_args + positional_arg_count))
# unknown = set(arg for arg in args_dict if not isinstance(arg, Number)) - set(self.get_arg_names())
# if unknown and not self.has_variable_kwargs():
# raise UnknownArguments("%s received unknown argument(s): %s" % (self.func_name, ",".join(unknown)))
#
# Path: infi/pyutils/exceptions.py
# class SignatureException(ReflectionException):
# pass
#
# class InvalidKeywordArgument(ReflectionException):
# pass
#
# class UnknownArguments(SignatureException):
# pass
#
# class MissingArguments(SignatureException):
# pass
, which may include functions, classes, or code. Output only the next line. | sig = FunctionSignature(func) |
Given the code snippet: <|code_start|> kwargs.pop([e.name for e in expected_signature if not e.has_default][0])
with self.assertRaises(MissingArguments):
signature.get_normalized_args((), kwargs)
def _test_unknown_arguments(self, signature, expected_signature):
if not signature.has_variable_kwargs():
too_many = dict((x[0], x[1]) for x in expected_signature)
too_many['blalblakjfdlkj'] = 2
with self.assertRaises(UnknownArguments):
signature.get_normalized_args((), too_many)
if not signature.has_variable_args():
kwargs = dict((x[0], x[1]) for x in expected_signature)
args = [1, 2, 3, 4]
with self.assertRaises(UnknownArguments):
signature.get_normalized_args(args, kwargs)
def test__simple_functions(self):
def f(a, b, c):
pass
self._test_function_signature(f,
[('a', False),
('b', False),
('c', False)])
def test__get_normalized_kwargs(self):
def f(a, b, c):
raise NotImplementedError()
sig = FunctionSignature(f)
self.assertEquals(
sig.get_normalized_args((1,2,3), {}),
dict(a=1, b=2, c=3)
)
<|code_end|>
, generate the next line using the imports in this file:
import time
from .test_utils import TestCase
from infi.pyutils.function_signature import FunctionSignature
from infi.pyutils.exceptions import SignatureException, InvalidKeywordArgument, UnknownArguments, MissingArguments
and context (functions, classes, or occasionally code) from other files:
# Path: infi/pyutils/function_signature.py
# class FunctionSignature(object):
# def __init__(self, func):
# super(FunctionSignature, self).__init__()
# self.func = func
# self.func_name = func.__name__
# self._build_arguments()
# def is_bound_method(self):
# return is_bound_method(self.func)
# def is_class_method(self):
# return is_class_method(self.func)
# def _iter_args_and_defaults(self, args, defaults):
# defaults = [] if defaults is None else defaults
# filled_defaults = itertools.chain(itertools.repeat(_NO_DEFAULT, len(args) - len(defaults)), defaults)
# return izip(args, filled_defaults)
#
# def _build_arguments(self):
# self._args = []
# try:
# args, varargs_name, kwargs_name, defaults = inspect.getargspec(self.func)
# except TypeError:
# args = []
# varargs_name = 'args'
# kwargs_name = 'kwargs'
# defaults = []
# for arg_name, default in self._iter_args_and_defaults(args, defaults):
# self._args.append(Argument(arg_name, default))
# self._varargs_name = varargs_name
# self._kwargs_name = kwargs_name
# def get_args(self):
# return itertools.islice(self._args, 1 if self.is_bound_method() else 0, None)
# def get_num_args(self):
# returned = len(self._args)
# if self.is_bound_method():
# returned = max(0, returned - 1)
# return returned
# def get_self_arg_name(self):
# if self.is_bound_method() and len(self._args) > 0:
# return self._args[0].name
# return None
# def get_arg_names(self):
# return (arg.name for arg in self.get_args())
# def get_required_arg_names(self):
# return set(arg.name for arg in self.get_args() if not arg.has_default())
# def has_variable_args(self):
# return self._varargs_name is not None
# def has_variable_kwargs(self):
# return self._kwargs_name is not None
# def get_normalized_args(self, args, kwargs):
# returned = {}
# self._update_normalized_positional_args(returned, args)
# self._update_normalized_kwargs(returned, kwargs)
# self._check_missing_arguments(returned)
# self._check_unknown_arguments(returned)
# return returned
# def _update_normalized_positional_args(self, returned, args):
# argument_names = list(self.get_arg_names())
# argument_names.extend(range(len(args) - self.get_num_args()))
# for arg_name, given_arg in zip(argument_names, args):
# returned[arg_name] = given_arg
#
# def _update_normalized_kwargs(self, returned, kwargs):
# for arg_name, arg in iteritems(kwargs):
# if not isinstance(arg_name, basestring):
# raise InvalidKeywordArgument("Invalid keyword argument %r" % (arg_name,))
# if arg_name in returned:
# raise SignatureException("%s is given more than once to %s" % (arg_name, self.func_name))
# returned[arg_name] = arg
#
# def _check_missing_arguments(self, args_dict):
# required_arguments = self.get_required_arg_names()
# missing_arguments = required_arguments - set(args_dict)
# if missing_arguments:
# raise MissingArguments("The following arguments were not specified: %s" % ",".join(map(repr, missing_arguments)))
# def _check_unknown_arguments(self, args_dict):
# positional_arg_count = len([arg_name for arg_name in args_dict if isinstance(arg_name, Number)])
# num_args = self.get_num_args()
# if positional_arg_count and not self.has_variable_args():
# raise UnknownArguments("%s receives %s positional arguments (%s specified)" % (self.func_name, num_args, num_args + positional_arg_count))
# unknown = set(arg for arg in args_dict if not isinstance(arg, Number)) - set(self.get_arg_names())
# if unknown and not self.has_variable_kwargs():
# raise UnknownArguments("%s received unknown argument(s): %s" % (self.func_name, ",".join(unknown)))
#
# Path: infi/pyutils/exceptions.py
# class SignatureException(ReflectionException):
# pass
#
# class InvalidKeywordArgument(ReflectionException):
# pass
#
# class UnknownArguments(SignatureException):
# pass
#
# class MissingArguments(SignatureException):
# pass
. Output only the next line. | with self.assertRaises(SignatureException): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.