Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Continue the code snippet: <|code_start|> continue
for l, v in matches:
# Check the arguments
arguments = v.split(',')
# Check the number of provided arguments
if len(arguments) < distributions_arguments[dist]:
Global._print(eq)
Global._error('The distribution ' + dist + ' requires ' + str(distributions_arguments[dist]) + 'parameters')
elif len(arguments) > distributions_arguments[dist]:
Global._print(eq)
Global._error('Too many parameters provided to the distribution ' + dist)
# Process the arguments
processed_arguments = ""
for idx in range(len(arguments)):
try:
arg = float(arguments[idx])
except: # A global parameter
if arguments[idx].strip() in description['global']:
arg = arguments[idx].strip() + "%(global_index)s"
dependencies.append(arguments[idx].strip())
else:
Global._error(arguments[idx] + ' is not a global parameter of the neuron/synapse. It can not be used as an argument to the random distribution ' + dist + '(' + v + ')')
processed_arguments += str(arg)
if idx != len(arguments)-1: # not the last one
processed_arguments += ', '
<|code_end|>
. Use current file imports:
import ANNarchy.core.Global as Global
import re
import sympy
from ANNarchy.core.Random import available_distributions, distributions_arguments, distributions_equivalents
from ANNarchy.parser.Equation import Equation
from ANNarchy.parser.Function import FunctionParser
from ANNarchy.parser.StringManipulation import *
from ANNarchy.parser.ITE import *
from inspect import getmembers
and context (classes, functions, or code) from other files:
# Path: ANNarchy/core/Random.py
# class RandomDistribution(object):
# class Uniform(RandomDistribution):
# class DiscreteUniform(RandomDistribution):
# class Normal(RandomDistribution):
# class LogNormal(RandomDistribution):
# class Exponential(RandomDistribution):
# class Gamma(RandomDistribution):
# def get_values(self, shape):
# def get_list_values(self, size):
# def get_value(self):
# def keywords(self):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, min, max):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, min, max):
# def get_values(self, shape):
# def latex(self):
# def __init__(self, mu, sigma, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, mu, sigma, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def get_cpp_args(self):
# def __init__(self, Lambda, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
# def __init__(self, alpha, beta=1.0, seed=-1, min=None, max=None):
# def get_values(self, shape):
# def latex(self):
. Output only the next line. | definition = distributions_equivalents[dist] + '(' + processed_arguments + ')' |
Using the snippet: <|code_start|>#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
class CUDAProfile(ProfileGenerator):
def __init__(self, annarchy_dir, net_id):
ProfileGenerator.__init__(self, annarchy_dir, net_id)
def generate(self):
"""
Generate Profiling class code, called from Generator instance.
"""
# Generate header for profiling
with open(self.annarchy_dir+'/generate/net'+str(self._net_id)+'/Profiling.h', 'w') as ofile:
ofile.write(self._generate_header())
def generate_body_dict(self):
"""
Creates a dictionary, contain profile code snippets.
"""
body_dict = {
<|code_end|>
, determine the next line of code. You have imports:
from ANNarchy.core import Global
from .ProfileGenerator import ProfileGenerator
from .ProfileTemplate import cuda_profile_template, cuda_profile_header
and context (class names, function names, or code) available:
# Path: ANNarchy/generator/Profile/ProfileTemplate.py
. Output only the next line. | 'prof_include': cuda_profile_template['include'], |
Predict the next line for this snippet: <|code_start|>
def annotate_spike_gather(self, pop, code):
"""
annotate the update neuron code
"""
prof_begin = cuda_profile_template['spike_gather']['before'] % {'id': pop.id, 'name': pop.name}
prof_end = cuda_profile_template['spike_gather']['after'] % {'id': pop.id, 'name': pop.name}
prof_code = """
// first run, measuring average time
%(prof_begin)s
%(code)s
%(prof_end)s
""" % {'code': code,
'prof_begin': prof_begin,
'prof_end': prof_end
}
return prof_code
def _generate_header(self):
"""
generate Profiling.h
"""
config_xml = """
_out_file << " <config>" << std::endl;
_out_file << " <paradigm>%(paradigm)s</paradigm>" << std::endl;
_out_file << " </config>" << std::endl;
""" % {'paradigm': Global.config["paradigm"]}
config = Global.config["paradigm"]
<|code_end|>
with the help of current file imports:
from ANNarchy.core import Global
from .ProfileGenerator import ProfileGenerator
from .ProfileTemplate import cuda_profile_template, cuda_profile_header
and context from other files:
# Path: ANNarchy/generator/Profile/ProfileTemplate.py
, which may contain function names, class names, or code. Output only the next line. | return cuda_profile_header % { |
Next line prediction: <|code_start|> >>> subpop.set( {'tau' : 20, 'r'= np.random.rand(subpop.size) } )
.. warning::
If you modify the value of a global parameter, this will be the case for ALL neurons of the population, not only the subset.
"""
def _set_single(name, rank, value):
if not self.population.initialized:
if not name in self.population.neuron_type.description['local']:
Global._error('can not set the value of a global attribute from a PopulationView.')
return
if isinstance(self.population.init[name], np.ndarray):
if len(self.population.geometry) == 1:
self.population.init[name][rank] = value
else: # Need to access the coordinates
coords = self.population.coordinates_from_rank(rank)
self.population.init[name][coords] = value
else:
val = self.population.init[name]
data = val * np.ones(self.population.size)
data[rank] = value
self.population.init[name] = data.reshape(self.population.geometry)
else:
ctype = self.population._get_attribute_cpp_type(name)
self.population.cyInstance.set_local_attribute(name, rank, value, ctype)
for val_key in value.keys():
if hasattr(self.population, val_key):
# Check the value
<|code_end|>
. Use current file imports:
(from ANNarchy.core import Global as Global
from .Random import RandomDistribution
from ANNarchy.core.Neuron import IndividualNeuron
import numpy as np)
and context including class names, function names, or small code snippets from other files:
# Path: ANNarchy/core/Random.py
# class RandomDistribution(object):
# """
# BaseClass for random distributions.
# """
#
# def get_values(self, shape):
# """
# Returns a np.ndarray with the given shape
# """
# Global._error('instantiated base class RandomDistribution is not allowed.')
# return np.array([0.0])
#
# def get_list_values(self, size):
# """
# Returns a list of the given size.
# """
# return list(self.get_values(size))
#
# def get_value(self):
# """
# Returns a single float value.
# """
# return self.get_values(1)[0]
#
# def keywords(self):
# return available_distributions
#
# def latex(self):
# return '?'
#
# def get_cpp_args(self):
# raise NotImplementedError
. Output only the next line. | if isinstance(value[val_key], RandomDistribution): # Make sure it is generated only once |
Next line prediction: <|code_start|># but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
class PAPIProfile(ProfileGenerator):
"""
Extent the generated code by profiling annotations.
"""
def __init__(self, annarchy_dir, net_id):
ProfileGenerator.__init__(self, annarchy_dir, net_id)
def generate(self):
"""
Generate Profiling class code, called from Generator instance.
"""
# Generate header for profiling
with open(self.annarchy_dir+'/generate/net'+str(self._net_id)+'/Profiling.h', 'w') as ofile:
ofile.write(self._generate_header())
def generate_body_dict(self):
"""
Creates a dictionary, contain profile code snippets.
"""
body_dict = {
<|code_end|>
. Use current file imports:
(from ANNarchy.core import Global
from .ProfileGenerator import ProfileGenerator
from .ProfileTemplate import papi_profile_template, papi_profile_header)
and context including class names, function names, or small code snippets from other files:
# Path: ANNarchy/generator/Profile/ProfileTemplate.py
. Output only the next line. | 'prof_include': papi_profile_template['include'], |
Given the code snippet: <|code_start|> annotate the update neuron code
"""
prof_begin = papi_profile_template['update_neuron']['before'] % {'name': pop.name}
prof_end = papi_profile_template['update_neuron']['after'] % {'name': pop.name}
prof_code = """
// first run, measuring average time
%(prof_begin)s
%(code)s
%(prof_end)s
""" % {'code': code,
'prof_begin': prof_begin,
'prof_end': prof_end
}
return prof_code
def _generate_header(self):
"""
generate Profiling.h
"""
config_xml = """
_out_file << " <config>" << std::endl;
_out_file << " <paradigm>%(paradigm)s</paradigm>" << std::endl;
_out_file << " <num_threads>%(num_threads)s</num_threads>" << std::endl;
_out_file << " </config>" << std::endl;
""" % {
'paradigm': Global.config["paradigm"],
'num_threads': Global.config["num_threads"]
}
config = Global.config["paradigm"] + '_' + str(Global.config["num_threads"]) + 'threads'
<|code_end|>
, generate the next line using the imports in this file:
from ANNarchy.core import Global
from .ProfileGenerator import ProfileGenerator
from .ProfileTemplate import papi_profile_template, papi_profile_header
and context (functions, classes, or occasionally code) from other files:
# Path: ANNarchy/generator/Profile/ProfileTemplate.py
. Output only the next line. | return papi_profile_header % { |
Given snippet: <|code_start|> form_header_value, form_parameters = sanic.headers.parse_content_header(
form_line[idx:]
)
if form_header_field == 'content-disposition':
field_name = form_parameters.get('name')
file_name = form_parameters.get('filename')
# non-ASCII filenames in RFC2231, "filename*" format
if file_name is None and form_parameters.get('filename*'):
encoding, _, value = email.utils.decode_rfc2231(
form_parameters['filename*']
)
file_name = urllib.parse.unquote(value, encoding=encoding)
elif form_header_field == 'content-type':
content_type = form_header_value
if field_name != 'file':
logger.error('Field file missing from request')
return sanic.response.json(
{'message': 'Field "file" missing from request'}, 400)
file_content = form_part[line_index:-4]
content_disposition = request.headers.get('x-content-disposition')
sample = self.sample_factory.make_sample(
file_content, file_name,
content_type, content_disposition)
try:
await self.db_con.analysis_add(sample)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import asyncio
import email.utils
import logging
import urllib.parse
import sanic
import sanic.headers
import sanic.response
from peekaboo.db import PeekabooDatabaseError
and context:
# Path: peekaboo/db.py
# DB_SCHEMA_VERSION = 9
# class InFlightSample(Base):
# class SampleInfo(Base):
# class PeekabooDatabase:
# def __str__(self):
# def __str__(self):
# def __init__(self, db_url, instance_id=0,
# stale_in_flight_threshold=15*60,
# log_level=logging.WARNING,
# async_driver=None):
# def was_transient_error(self, error, attempt, action):
# async def analysis_add(self, sample):
# def analysis_update(self, sample):
# def analysis_journal_fetch_journal(self, sample):
# async def analysis_retrieve(self, job_id):
# def mark_sample_in_flight(self, sample, instance_id=None, start_time=None):
# def clear_sample_in_flight(self, sample, instance_id=None):
# def clear_in_flight_samples(self, instance_id=None):
# def clear_stale_in_flight_samples(self):
# def clear_statement(statement_class):
# def drop(self):
which might include code, classes, or functions. Output only the next line. | except PeekabooDatabaseError as dberr: |
Predict the next line after this snippet: <|code_start|># #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or (at #
# your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
""" Interface to Cortex. """
logger = logging.getLogger(__name__)
class tlp(enum.Enum):
WHITE = 0
GREEN = 1
AMBER = 2
RED = 3
<|code_end|>
using the current file's imports:
import datetime
import http.cookiejar
import logging
import os
import threading
import enum
import cortex4py.api
import cortex4py.exceptions
import requests.sessions
import schema
import urllib3.util.retry
from peekaboo.exceptions import PeekabooException
and any relevant context from other files:
# Path: peekaboo/exceptions.py
# class PeekabooException(Exception):
# """ General exception class for all custom exception classes of Peekaboo. """
# pass
. Output only the next line. | class CortexSubmitFailedException(PeekabooException): |
Using the snippet: <|code_start|># General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
""" The configuration for the main program as well as the ruleset. Handles
defaults as well as reading a configuration file. """
logger = logging.getLogger(__name__)
class PeekabooConfigParser( # pylint: disable=too-many-ancestors
configparser.ConfigParser):
""" A config parser that gives error feedback if a required file does not
exist or cannot be opened. """
LOG_LEVEL = object()
OCTAL = object()
RELIST = object()
IRELIST = object()
def __init__(self, config_file):
super().__init__()
try:
self.read_file(open(config_file))
except IOError as ioerror:
<|code_end|>
, determine the next line of code. You have imports:
import re
import sys
import logging
import configparser
from peekaboo.exceptions import PeekabooConfigException
from peekaboo.toolbox.cortex import tlp
and context (class names, function names, or code) available:
# Path: peekaboo/exceptions.py
# class PeekabooConfigException(PeekabooException):
# pass
#
# Path: peekaboo/toolbox/cortex.py
# class tlp(enum.Enum):
# WHITE = 0
# GREEN = 1
# AMBER = 2
# RED = 3
. Output only the next line. | raise PeekabooConfigException( |
Continue the code snippet: <|code_start|> if not compiled_res:
compiled_res = fallback
self.relists[section][option] = compiled_res
return compiled_res
def get_log_level(self, section, option, raw=False, vars=None,
fallback=None):
""" Get the log level from the configuration file and parse the string
into a logging loglevel such as logging.CRITICAL. Raises config
exception if the log level is unknown. Options identical to get(). """
levels = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
level = self.get(section, option, raw=raw, vars=vars, fallback=None)
if level is None:
return fallback
if level not in levels:
raise PeekabooConfigException('Unknown log level %s' % level)
return levels[level]
def gettlp(self, section, option, raw=False, vars=None, fallback=None):
levels = {
<|code_end|>
. Use current file imports:
import re
import sys
import logging
import configparser
from peekaboo.exceptions import PeekabooConfigException
from peekaboo.toolbox.cortex import tlp
and context (classes, functions, or code) from other files:
# Path: peekaboo/exceptions.py
# class PeekabooConfigException(PeekabooException):
# pass
#
# Path: peekaboo/toolbox/cortex.py
# class tlp(enum.Enum):
# WHITE = 0
# GREEN = 1
# AMBER = 2
# RED = 3
. Output only the next line. | 'red': tlp.RED, |
Predict the next line after this snippet: <|code_start|> """
def __init__(self, samples=50):
"""Initialize the Block Buffer.
Parameters
----------
samples : int, optional
the desired block length in samples
"""
self.samples = samples
self.dat = None
def append(self, dat):
"""Append data to the Block Buffer.
This method accumulates the incoming data.
Parameters
----------
dat : Data
continuous Data object
"""
if self.dat is None:
self.dat = dat.copy()
elif not dat:
pass
else:
<|code_end|>
using the current file's imports:
import copy
import logging
import numpy as np
from wyrm.processing import append_cnt
and any relevant context from other files:
# Path: wyrm/processing.py
# def append_cnt(dat, dat2, timeaxis=-2, extra=None):
# """Append two continuous data objects.
#
# This method uses :func:`append` to append to continuous data
# objects. It also takes care that the resulting continuous will have
# a correct ``.axes[timeaxis]``. For that it uses the ``.fs``
# attribute and the length of the data to recalculate the timeaxis.
#
# If both ``dat`` and ``dat2`` have the ``markers`` attribute, the
# markers will be treated properly (i.e. by moving the markers of
# ``dat2`` by ``dat`` milliseconds to the right.
#
# Parameters
# ----------
# dat, dat2 : Data
# timeaxis : int, optional
# extra: list of strings, optional
#
# Returns
# -------
# dat : Data
# the resulting combination of ``dat`` and ``dat2``
#
# Raises
# ------
# AssertionError
# if at least one of the ``Data`` parameters has not the ``.fs``
# attribute or if the ``.fs`` attributes are not equal.
#
# See Also
# --------
# append, append_epo
#
# Examples
# --------
#
# >>> cnt.axis[0]
# [0, 1, 2]
# >>> cnt2.axis[0]
# [0, 1, 2]
# >>> cnt.fs
# 1000
# >>> cnt = append_cnt(cnt, cnt2)
# >>> cnt.axis[0]
# [0, 1, 2, 3, 4, 5]
#
# """
# assert hasattr(dat, 'fs') and hasattr(dat2, 'fs')
# assert dat.fs == dat2.fs
# cnt = append(dat, dat2, axis=timeaxis, extra=extra)
# if hasattr(dat, 'markers') and hasattr(dat2, 'markers'):
# # move the markers from dat2 to the right by dat-milliseconds
# ms = dat.data.shape[timeaxis] / dat.fs * 1000
# markers1 = dat.markers[:]
# markers2 = [[x[0]+ms, x[1]] for x in dat2.markers]
# markers1.extend(markers2)
# cnt.markers = markers1
# # fix the timeaxis from 0, 1, 2, 0, 1, 2 -> 0, 1, 2, 3, 4, 5
# ms = cnt.data.shape[timeaxis] / cnt.fs * 1000
# cnt.axes[timeaxis] = np.linspace(0, ms, cnt.data.shape[timeaxis], endpoint=False)
# return cnt
. Output only the next line. | self.dat = append_cnt(self.dat, dat) |
Continue the code snippet: <|code_start|>from __future__ import division
class TestLFilterZi(unittest.TestCase):
COEFFS = 10
def setUp(self):
self.b, self.a = np.ones(self.COEFFS), np.ones(self.COEFFS)
def test_lfilter_1d(self):
"""Output has the correct shape for n=1."""
<|code_end|>
. Use current file imports:
import unittest
import numpy as np
from wyrm.processing import lfilter_zi
and context (classes, functions, or code) from other files:
# Path: wyrm/processing.py
# def lfilter_zi(b, a, n=1):
# """Compute an initial state ``zi`` for the :func:`lfilter` function.
#
# When ``n == 1`` (default), this method mainly delegates the call to
# :func:`scipy.signal.lfilter_zi` and returns the result ``zi``. If
# ``n > 1``, ``zi`` is repeated ``n`` times. This is useful if you
# want to filter n-dimensional data like multi channel EEG.
#
# Parameters
# ----------
# b, a : 1-d array
# The IIR filter coefficients
# n : int, optional
# The desired width of the output vector. If ``n == 1`` the output
# is simply the 1d zi vector. For ``n > 1``, the zi vector is
# repeated ``n`` times.
#
# Returns
# -------
# zi : n-d array
# The initial state of the filter.
#
# See Also
# --------
# :func:`lfilter`, :func:`scipy.signal.lfilter_zi`
#
# Examples
# --------
#
# >>> # pre-calculate the filter coefficients and the initial filter
# >>> # state
# >>> b, a = signal.butter(butter_ord, [f_low / fn, f_high / fn], btype='band')
# >>> zi = proc.lfilter_zi(b, a, len(CHANNELS))
# >>> while 1:
# ... data, markers = amp.get_data()
# ... # convert incoming data into ``Data`` object
# ... cnt = Data(data, ...)
# ... # filter the data, note how filter now also returns the
# ... # filter state which we feed back into the next call of
# ... # ``filter``
# ... cnt, zi = lfilter(cnt, b, a, zi=zi)
# ... ...
#
# """
# zi = signal.lfilter_zi(b, a)
# if n > 1:
# zi = np.tile(zi, (n, 1)).T
# return zi
. Output only the next line. | zi = lfilter_zi(self.b, self.a) |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import division
CHANNELS = 5
SAMPLES = 3
FS = 10
class TestConvertMushuData(unittest.TestCase):
def setUp(self):
xv, yv = np.meshgrid(list(range(CHANNELS)), list(range(SAMPLES)))
self.data = xv * 10 + yv
self.channels = ['ch %i' % i for i in range(CHANNELS)]
self.time = np.linspace(0, SAMPLES / FS * 1000, SAMPLES, endpoint=False)
self.markers = [[-10, 'a'], [0, 'b'], [1e100000, 'c']]
def test_convert_mushu_data(self):
"""Convert mushu data."""
<|code_end|>
, predict the next line using imports from the current file:
import unittest
import numpy as np
from wyrm.io import convert_mushu_data
and context including class names, function names, and sometimes code from other files:
# Path: wyrm/io.py
# def convert_mushu_data(data, markers, fs, channels):
# """Convert mushu data into wyrm's ``Data`` format.
#
# This convenience method creates a continuous ``Data`` object from
# the parameters given. The timeaxis always starts from zero and its
# values are calculated from the sampling frequency ``fs`` and the
# length of ``data``. The ``names`` and ``units`` attributes are
# filled with default vaules.
#
# Parameters
# ----------
# data : 2d array
# an 2 dimensional numpy array with the axes: (time, channel)
# markers : list of tuples: (float, str)
# a list of markers. Each element is a tuple of timestamp and
# string. The timestamp is the time in ms relative to the onset of
# the block of data. Note that negative values are *allowed* as
# well as values bigger than the length of the block of data
# returned. That is to be interpreted as a marker from the last
# block and a marker for a future block respectively.
# fs : float
# the sampling frequency, this number is used to calculate the
# timeaxis for the data
# channels : list or 1d array of strings
# the channel names
#
# Returns
# -------
# cnt : continuous ``Data`` object
#
# Examples
# --------
#
# Assuming that ``amp`` is an Amplifier instance from ``libmushu``,
# already configured but not started yet:
#
# >>> amp_fs = amp.get_sampling_frequency()
# >>> amp_channels = amp.get_channels()
# >>> amp.start()
# >>> while True:
# ... data, markers = amp.get_data()
# ... cnt = convert_mushu_data(data, markers, amp_fs, amp_channels)
# ... # some more code
# >>> amp.stop()
#
# References
# ----------
# https://github.com/venthur/mushu
#
# """
# time_axis = np.linspace(0, 1000 * data.shape[0] / fs, data.shape[0], endpoint=False)
# chan_axis = channels[:]
# axes = [time_axis, chan_axis]
# names = ['time', 'channel']
# units = ['uV', '#']
# cnt = Data(data=data.copy(), axes=axes, names=names, units=units)
# cnt.markers = markers[:]
# cnt.fs = fs
# return cnt
. Output only the next line. | cnt = convert_mushu_data(self.data, self.markers, FS, self.channels) |
Given snippet: <|code_start|>
class TestLoadMushuData(unittest.TestCase):
def setUp(self):
with open('foo.meta', 'w') as fh:
json.dump({'Sampling Frequency' : 1,
'Channels' : ['ch1', 'ch2'],
'Amp': 'Dummy Amp'}, fh)
with open('foo.marker', 'w') as fh:
fh.write('0.0 Marker 0\n')
fh.write('500.0 Marker 500\n')
fh.write('666.666 Marker 666.666\n')
with open('foo.eeg', 'wb') as fh:
for i in (0, 1, 33, 66, .1, .2):
fh.write(struct.pack('f', i))
def tearDown(self):
for fname in 'foo.meta', 'foo.eeg', 'foo.markers':
try:
os.remove(fname)
except:
pass
def test_incomplete_fileset(self):
"""Must raise an error if not all files are available."""
os.remove('foo.marker')
with self.assertRaises(AssertionError):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import os
import json
import struct
import numpy as np
from wyrm.io import load_mushu_data
and context:
# Path: wyrm/io.py
# def load_mushu_data(meta):
# """Load saved EEG data in Mushu's format.
#
# This method loads saved data in Mushu's format and returns a
# continuous ``Data`` object.
#
# Parameters
# ----------
# meta : str
# Path to `.meta` file. A Mushu recording consists of three
# different files: `.eeg`, `.marker`, and `.meta`.
#
# Returns
# -------
# dat : Data
# Continuous Data object
#
# Examples
# --------
#
# >>> dat = load_mushu_data('testrecording.meta')
#
# """
# # reverse and replace and reverse again to replace only the last
# # (occurrence of .meta)
# datafile = meta[::-1].replace('atem.', 'gee.', 1)[::-1]
# markerfile = meta[::-1].replace('atem.', 'rekram.', 1)[::-1]
# assert path.exists(meta) and path.exists(datafile) and path.exists(markerfile)
# # load meta data
# with open(meta, 'r') as fh:
# metadata = json.load(fh)
# fs = metadata['Sampling Frequency']
# channels = np.array(metadata['Channels'])
# # load eeg data
# data = np.fromfile(datafile, np.float32)
# data = data.reshape((-1, len(channels)))
# # load markers
# markers = []
# with open(markerfile, 'r') as fh:
# for line in fh:
# ts, m = line.split(' ', 1)
# markers.append([float(ts), str(m).strip()])
# # construct Data
# duration = len(data) * 1000 / fs
# axes = [np.linspace(0, duration, len(data), endpoint=False), channels]
# names = ['time', 'channels']
# units = ['ms', '#']
# dat = Data(data=data, axes=axes, names=names, units=units)
# dat.fs = fs
# dat.markers = markers
# return dat
which might include code, classes, or functions. Output only the next line. | load_mushu_data('foo.meta') |
Here is a snippet: <|code_start|> template_yaml = load_template(template)
if template_yaml and "ContextClass" in template_yaml:
context = load_class(template_yaml["ContextClass"])()
context.add_context_arguments(parser)
argcomplete.autocomplete(parser)
args, _ = parser.parse_known_args()
if args.template:
template_yaml = load_template(args.template)
if "ContextClass" in template_yaml:
context = load_class(template_yaml["ContextClass"])()
template_yaml.pop("ContextClass", None)
parser = argparse.ArgumentParser(description=context.__doc__)
_add_default_params(parser)
context.add_context_arguments(parser)
else:
parser = argparse.ArgumentParser(description=create_stack.__doc__)
_add_default_params(parser)
else:
parser = argparse.ArgumentParser(description=create_stack.__doc__)
_add_default_params(parser)
args = parser.parse_args()
context.resolve_parameters(args)
context.set_template(template_yaml)
if context.write(yes=args.yes):
subprocess.check_call(["ndt", "print-create-instructions",
context.component_name, context.stack_name])
return
def load_template(template):
<|code_end|>
. Write the next line using the current file imports:
from builtins import zip
from builtins import input
from builtins import str
from builtins import object
from copy import deepcopy
from collections import OrderedDict
from subprocess import Popen, PIPE
from argcomplete.completers import ChoicesCompleter
from awscli.customizations.configure.writer import ConfigFileWriter
from n_utils.ndt import find_include, find_all_includes
from n_utils.aws_infra_util import yaml_load, yaml_save
from n_utils.cf_utils import has_output_selector, select_stacks
import os
import random
import re
import shutil
import subprocess
import stat
import sys
import argparse
import argcomplete
import boto3
import ipaddr
and context from other files:
# Path: n_utils/ndt.py
# def find_include(basefile):
# if os.path.isfile(basefile):
# return basefile
# for search_dir in include_dirs:
# if os.path.isfile(search_dir + basefile):
# return search_dir + basefile
# return None
#
# def find_all_includes(pattern):
# ret = []
# dirs = list(include_dirs)
# dirs.insert(0, "./")
# for search_dir in dirs:
# for next_match in glob.glob(search_dir + pattern):
# ret.append(next_match)
# return ret
#
# Path: n_utils/aws_infra_util.py
# def yaml_load(stream):
# for name in INTRISINC_FUNCS:
# yaml.add_multi_constructor(name, INTRISINC_FUNCS[name], Loader=yaml.SafeLoader)
#
# class OrderedLoader(yaml.SafeLoader):
# pass
#
# def construct_mapping(loader, node):
# loader.flatten_mapping(node)
# return OrderedDict(loader.construct_pairs(node))
# OrderedLoader.add_constructor(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# construct_mapping)
#
# return yaml.load(stream, OrderedLoader)
#
# def yaml_save(data):
# class OrderedDumper(yaml.SafeDumper):
# pass
#
# def _dict_representer(dumper, data):
# return dumper.represent_mapping(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# list(data.items()))
#
# OrderedDumper.add_representer(OrderedDict, _dict_representer)
# return yaml.dump(data, None, OrderedDumper, default_flow_style=False)
#
# Path: n_utils/cf_utils.py
# def has_output_selector(stack, outputname, mapper):
# if 'Outputs' not in stack:
# return False
# for output in stack['Outputs']:
# if output['OutputKey'] == outputname:
# return mapper(stack)
# return False
#
# def select_stacks(selector):
# ret = []
# paginator = boto3.client('cloudformation').get_paginator('describe_stacks')
# for page in paginator.paginate():
# for stack in page.get('Stacks'):
# selected = selector(stack)
# if selected:
# ret.append(selected)
# return ret
, which may include functions, classes, or code. Output only the next line. | file_name = find_include("creatable-templates/" + template + ".yaml") |
Based on the snippet: <|code_start|> else:
parser = argparse.ArgumentParser(description=create_stack.__doc__)
_add_default_params(parser)
args = parser.parse_args()
context.resolve_parameters(args)
context.set_template(template_yaml)
if context.write(yes=args.yes):
subprocess.check_call(["ndt", "print-create-instructions",
context.component_name, context.stack_name])
return
def load_template(template):
file_name = find_include("creatable-templates/" + template + ".yaml")
if file_name:
return yaml_load(open(file_name))
else:
return None
def load_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def list_templates():
ret = []
<|code_end|>
, predict the immediate next line with the help of imports:
from builtins import zip
from builtins import input
from builtins import str
from builtins import object
from copy import deepcopy
from collections import OrderedDict
from subprocess import Popen, PIPE
from argcomplete.completers import ChoicesCompleter
from awscli.customizations.configure.writer import ConfigFileWriter
from n_utils.ndt import find_include, find_all_includes
from n_utils.aws_infra_util import yaml_load, yaml_save
from n_utils.cf_utils import has_output_selector, select_stacks
import os
import random
import re
import shutil
import subprocess
import stat
import sys
import argparse
import argcomplete
import boto3
import ipaddr
and context (classes, functions, sometimes code) from other files:
# Path: n_utils/ndt.py
# def find_include(basefile):
# if os.path.isfile(basefile):
# return basefile
# for search_dir in include_dirs:
# if os.path.isfile(search_dir + basefile):
# return search_dir + basefile
# return None
#
# def find_all_includes(pattern):
# ret = []
# dirs = list(include_dirs)
# dirs.insert(0, "./")
# for search_dir in dirs:
# for next_match in glob.glob(search_dir + pattern):
# ret.append(next_match)
# return ret
#
# Path: n_utils/aws_infra_util.py
# def yaml_load(stream):
# for name in INTRISINC_FUNCS:
# yaml.add_multi_constructor(name, INTRISINC_FUNCS[name], Loader=yaml.SafeLoader)
#
# class OrderedLoader(yaml.SafeLoader):
# pass
#
# def construct_mapping(loader, node):
# loader.flatten_mapping(node)
# return OrderedDict(loader.construct_pairs(node))
# OrderedLoader.add_constructor(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# construct_mapping)
#
# return yaml.load(stream, OrderedLoader)
#
# def yaml_save(data):
# class OrderedDumper(yaml.SafeDumper):
# pass
#
# def _dict_representer(dumper, data):
# return dumper.represent_mapping(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# list(data.items()))
#
# OrderedDumper.add_representer(OrderedDict, _dict_representer)
# return yaml.dump(data, None, OrderedDumper, default_flow_style=False)
#
# Path: n_utils/cf_utils.py
# def has_output_selector(stack, outputname, mapper):
# if 'Outputs' not in stack:
# return False
# for output in stack['Outputs']:
# if output['OutputKey'] == outputname:
# return mapper(stack)
# return False
#
# def select_stacks(selector):
# ret = []
# paginator = boto3.client('cloudformation').get_paginator('describe_stacks')
# for page in paginator.paginate():
# for stack in page.get('Stacks'):
# selected = selector(stack)
# if selected:
# ret.append(selected)
# return ret
. Output only the next line. | files = find_all_includes("creatable-templates/*.yaml") |
Predict the next line after this snippet: <|code_start|> context = load_class(template_yaml["ContextClass"])()
context.add_context_arguments(parser)
argcomplete.autocomplete(parser)
args, _ = parser.parse_known_args()
if args.template:
template_yaml = load_template(args.template)
if "ContextClass" in template_yaml:
context = load_class(template_yaml["ContextClass"])()
template_yaml.pop("ContextClass", None)
parser = argparse.ArgumentParser(description=context.__doc__)
_add_default_params(parser)
context.add_context_arguments(parser)
else:
parser = argparse.ArgumentParser(description=create_stack.__doc__)
_add_default_params(parser)
else:
parser = argparse.ArgumentParser(description=create_stack.__doc__)
_add_default_params(parser)
args = parser.parse_args()
context.resolve_parameters(args)
context.set_template(template_yaml)
if context.write(yes=args.yes):
subprocess.check_call(["ndt", "print-create-instructions",
context.component_name, context.stack_name])
return
def load_template(template):
file_name = find_include("creatable-templates/" + template + ".yaml")
if file_name:
<|code_end|>
using the current file's imports:
from builtins import zip
from builtins import input
from builtins import str
from builtins import object
from copy import deepcopy
from collections import OrderedDict
from subprocess import Popen, PIPE
from argcomplete.completers import ChoicesCompleter
from awscli.customizations.configure.writer import ConfigFileWriter
from n_utils.ndt import find_include, find_all_includes
from n_utils.aws_infra_util import yaml_load, yaml_save
from n_utils.cf_utils import has_output_selector, select_stacks
import os
import random
import re
import shutil
import subprocess
import stat
import sys
import argparse
import argcomplete
import boto3
import ipaddr
and any relevant context from other files:
# Path: n_utils/ndt.py
# def find_include(basefile):
# if os.path.isfile(basefile):
# return basefile
# for search_dir in include_dirs:
# if os.path.isfile(search_dir + basefile):
# return search_dir + basefile
# return None
#
# def find_all_includes(pattern):
# ret = []
# dirs = list(include_dirs)
# dirs.insert(0, "./")
# for search_dir in dirs:
# for next_match in glob.glob(search_dir + pattern):
# ret.append(next_match)
# return ret
#
# Path: n_utils/aws_infra_util.py
# def yaml_load(stream):
# for name in INTRISINC_FUNCS:
# yaml.add_multi_constructor(name, INTRISINC_FUNCS[name], Loader=yaml.SafeLoader)
#
# class OrderedLoader(yaml.SafeLoader):
# pass
#
# def construct_mapping(loader, node):
# loader.flatten_mapping(node)
# return OrderedDict(loader.construct_pairs(node))
# OrderedLoader.add_constructor(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# construct_mapping)
#
# return yaml.load(stream, OrderedLoader)
#
# def yaml_save(data):
# class OrderedDumper(yaml.SafeDumper):
# pass
#
# def _dict_representer(dumper, data):
# return dumper.represent_mapping(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# list(data.items()))
#
# OrderedDumper.add_representer(OrderedDict, _dict_representer)
# return yaml.dump(data, None, OrderedDumper, default_flow_style=False)
#
# Path: n_utils/cf_utils.py
# def has_output_selector(stack, outputname, mapper):
# if 'Outputs' not in stack:
# return False
# for output in stack['Outputs']:
# if output['OutputKey'] == outputname:
# return mapper(stack)
# return False
#
# def select_stacks(selector):
# ret = []
# paginator = boto3.client('cloudformation').get_paginator('describe_stacks')
# for page in paginator.paginate():
# for stack in page.get('Stacks'):
# selected = selector(stack)
# if selected:
# ret.append(selected)
# return ret
. Output only the next line. | return yaml_load(open(file_name)) |
Predict the next line after this snippet: <|code_start|> "paramEip",
myself.elastic_ip))
def write(self, yes=False):
if "Files" in self.template:
for entry in self.template["Files"]:
for source, dest in list(entry.items()):
dest = self.component_name + os.sep + dest % self.__dict__
dest = os.path.normpath(dest)
dest_dir = os.path.normpath(os.path.dirname(dest))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
source_file = find_include(source)
shutil.copy2(source_file, dest)
self.template.pop("Files", None)
stack_dir = os.path.join(".", self.component_name, "stack-" + self.stack_name)
if not os.path.exists(stack_dir):
os.makedirs(stack_dir)
if self.branch_mode == BRANCH_MODES.SINGLE_STACK:
file_name = "infra.properties"
stack_props = os.path.join(stack_dir, file_name)
if not os.path.exists(stack_props):
with open(stack_props, 'w') as stack_props_file:
stack_props_file.write("STACK_NAME=$ORIG_STACK_NAME\n")
stack_template = os.path.join(stack_dir, "template.yaml")
if os.path.exists(stack_template) and not yes:
answer = eval(input("Overwrite " + self.stack_name + " stack? (n): "))
if not answer or not answer.lower() == "y":
return False
with open(stack_template, "w") as stack_file:
<|code_end|>
using the current file's imports:
from builtins import zip
from builtins import input
from builtins import str
from builtins import object
from copy import deepcopy
from collections import OrderedDict
from subprocess import Popen, PIPE
from argcomplete.completers import ChoicesCompleter
from awscli.customizations.configure.writer import ConfigFileWriter
from n_utils.ndt import find_include, find_all_includes
from n_utils.aws_infra_util import yaml_load, yaml_save
from n_utils.cf_utils import has_output_selector, select_stacks
import os
import random
import re
import shutil
import subprocess
import stat
import sys
import argparse
import argcomplete
import boto3
import ipaddr
and any relevant context from other files:
# Path: n_utils/ndt.py
# def find_include(basefile):
# if os.path.isfile(basefile):
# return basefile
# for search_dir in include_dirs:
# if os.path.isfile(search_dir + basefile):
# return search_dir + basefile
# return None
#
# def find_all_includes(pattern):
# ret = []
# dirs = list(include_dirs)
# dirs.insert(0, "./")
# for search_dir in dirs:
# for next_match in glob.glob(search_dir + pattern):
# ret.append(next_match)
# return ret
#
# Path: n_utils/aws_infra_util.py
# def yaml_load(stream):
# for name in INTRISINC_FUNCS:
# yaml.add_multi_constructor(name, INTRISINC_FUNCS[name], Loader=yaml.SafeLoader)
#
# class OrderedLoader(yaml.SafeLoader):
# pass
#
# def construct_mapping(loader, node):
# loader.flatten_mapping(node)
# return OrderedDict(loader.construct_pairs(node))
# OrderedLoader.add_constructor(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# construct_mapping)
#
# return yaml.load(stream, OrderedLoader)
#
# def yaml_save(data):
# class OrderedDumper(yaml.SafeDumper):
# pass
#
# def _dict_representer(dumper, data):
# return dumper.represent_mapping(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# list(data.items()))
#
# OrderedDumper.add_representer(OrderedDict, _dict_representer)
# return yaml.dump(data, None, OrderedDumper, default_flow_style=False)
#
# Path: n_utils/cf_utils.py
# def has_output_selector(stack, outputname, mapper):
# if 'Outputs' not in stack:
# return False
# for output in stack['Outputs']:
# if output['OutputKey'] == outputname:
# return mapper(stack)
# return False
#
# def select_stacks(selector):
# ret = []
# paginator = boto3.client('cloudformation').get_paginator('describe_stacks')
# for page in paginator.paginate():
# for stack in page.get('Stacks'):
# selected = selector(stack)
# if selected:
# ret.append(selected)
# return ret
. Output only the next line. | stack_file.write(yaml_save(self.template)) |
Given the following code snippet before the placeholder: <|code_start|> return "10." + str(random.randint(0, 255)) + ".0.0"
def set_template(self, template):
common_yaml = yaml_load(open(find_include("creatable-templates/network/common.yaml")))
self.template, self.common_yaml = \
_get_network_yaml(self.stack_name, self.vpc_cidr, self.subnet_prefixlen,
self.subnet_base, template, common_yaml)
def write(self, yes=False):
ContextClassBase.write(self, yes=yes)
if not os.path.exists("common"):
os.makedirs("common")
common_out = os.path.join("common", "network.yaml")
with open(common_out, 'w') as c_out:
c_out.write(yaml_save(self.common_yaml))
return True
class BakeryRoles(ContextClassBase):
""" Creates roles necessary for baking images and deploying stacks
"""
def __init__(self):
ContextClassBase.__init__(self, ['network_stack', 'vault_stack'])
self._ask_network_stack()
self._ask_vault_stack()
def _ask_network_stack(self):
self.network_stack = "Network stack ({0}):\n"
<|code_end|>
, predict the next line using imports from the current file:
from builtins import zip
from builtins import input
from builtins import str
from builtins import object
from copy import deepcopy
from collections import OrderedDict
from subprocess import Popen, PIPE
from argcomplete.completers import ChoicesCompleter
from awscli.customizations.configure.writer import ConfigFileWriter
from n_utils.ndt import find_include, find_all_includes
from n_utils.aws_infra_util import yaml_load, yaml_save
from n_utils.cf_utils import has_output_selector, select_stacks
import os
import random
import re
import shutil
import subprocess
import stat
import sys
import argparse
import argcomplete
import boto3
import ipaddr
and context including class names, function names, and sometimes code from other files:
# Path: n_utils/ndt.py
# def find_include(basefile):
# if os.path.isfile(basefile):
# return basefile
# for search_dir in include_dirs:
# if os.path.isfile(search_dir + basefile):
# return search_dir + basefile
# return None
#
# def find_all_includes(pattern):
# ret = []
# dirs = list(include_dirs)
# dirs.insert(0, "./")
# for search_dir in dirs:
# for next_match in glob.glob(search_dir + pattern):
# ret.append(next_match)
# return ret
#
# Path: n_utils/aws_infra_util.py
# def yaml_load(stream):
# for name in INTRISINC_FUNCS:
# yaml.add_multi_constructor(name, INTRISINC_FUNCS[name], Loader=yaml.SafeLoader)
#
# class OrderedLoader(yaml.SafeLoader):
# pass
#
# def construct_mapping(loader, node):
# loader.flatten_mapping(node)
# return OrderedDict(loader.construct_pairs(node))
# OrderedLoader.add_constructor(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# construct_mapping)
#
# return yaml.load(stream, OrderedLoader)
#
# def yaml_save(data):
# class OrderedDumper(yaml.SafeDumper):
# pass
#
# def _dict_representer(dumper, data):
# return dumper.represent_mapping(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# list(data.items()))
#
# OrderedDumper.add_representer(OrderedDict, _dict_representer)
# return yaml.dump(data, None, OrderedDumper, default_flow_style=False)
#
# Path: n_utils/cf_utils.py
# def has_output_selector(stack, outputname, mapper):
# if 'Outputs' not in stack:
# return False
# for output in stack['Outputs']:
# if output['OutputKey'] == outputname:
# return mapper(stack)
# return False
#
# def select_stacks(selector):
# ret = []
# paginator = boto3.client('cloudformation').get_paginator('describe_stacks')
# for page in paginator.paginate():
# for stack in page.get('Stacks'):
# selected = selector(stack)
# if selected:
# ret.append(selected)
# return ret
. Output only the next line. | def network_sel(stack): return has_output_selector(stack, "VPC", |
Given snippet: <|code_start|> def set_template(self, template):
common_yaml = yaml_load(open(find_include("creatable-templates/network/common.yaml")))
self.template, self.common_yaml = \
_get_network_yaml(self.stack_name, self.vpc_cidr, self.subnet_prefixlen,
self.subnet_base, template, common_yaml)
def write(self, yes=False):
ContextClassBase.write(self, yes=yes)
if not os.path.exists("common"):
os.makedirs("common")
common_out = os.path.join("common", "network.yaml")
with open(common_out, 'w') as c_out:
c_out.write(yaml_save(self.common_yaml))
return True
class BakeryRoles(ContextClassBase):
""" Creates roles necessary for baking images and deploying stacks
"""
def __init__(self):
ContextClassBase.__init__(self, ['network_stack', 'vault_stack'])
self._ask_network_stack()
self._ask_vault_stack()
def _ask_network_stack(self):
self.network_stack = "Network stack ({0}):\n"
def network_sel(stack): return has_output_selector(stack, "VPC",
lambda stack: stack['StackName'])
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from builtins import zip
from builtins import input
from builtins import str
from builtins import object
from copy import deepcopy
from collections import OrderedDict
from subprocess import Popen, PIPE
from argcomplete.completers import ChoicesCompleter
from awscli.customizations.configure.writer import ConfigFileWriter
from n_utils.ndt import find_include, find_all_includes
from n_utils.aws_infra_util import yaml_load, yaml_save
from n_utils.cf_utils import has_output_selector, select_stacks
import os
import random
import re
import shutil
import subprocess
import stat
import sys
import argparse
import argcomplete
import boto3
import ipaddr
and context:
# Path: n_utils/ndt.py
# def find_include(basefile):
# if os.path.isfile(basefile):
# return basefile
# for search_dir in include_dirs:
# if os.path.isfile(search_dir + basefile):
# return search_dir + basefile
# return None
#
# def find_all_includes(pattern):
# ret = []
# dirs = list(include_dirs)
# dirs.insert(0, "./")
# for search_dir in dirs:
# for next_match in glob.glob(search_dir + pattern):
# ret.append(next_match)
# return ret
#
# Path: n_utils/aws_infra_util.py
# def yaml_load(stream):
# for name in INTRISINC_FUNCS:
# yaml.add_multi_constructor(name, INTRISINC_FUNCS[name], Loader=yaml.SafeLoader)
#
# class OrderedLoader(yaml.SafeLoader):
# pass
#
# def construct_mapping(loader, node):
# loader.flatten_mapping(node)
# return OrderedDict(loader.construct_pairs(node))
# OrderedLoader.add_constructor(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# construct_mapping)
#
# return yaml.load(stream, OrderedLoader)
#
# def yaml_save(data):
# class OrderedDumper(yaml.SafeDumper):
# pass
#
# def _dict_representer(dumper, data):
# return dumper.represent_mapping(
# yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
# list(data.items()))
#
# OrderedDumper.add_representer(OrderedDict, _dict_representer)
# return yaml.dump(data, None, OrderedDumper, default_flow_style=False)
#
# Path: n_utils/cf_utils.py
# def has_output_selector(stack, outputname, mapper):
# if 'Outputs' not in stack:
# return False
# for output in stack['Outputs']:
# if output['OutputKey'] == outputname:
# return mapper(stack)
# return False
#
# def select_stacks(selector):
# ret = []
# paginator = boto3.client('cloudformation').get_paginator('describe_stacks')
# for page in paginator.paginate():
# for stack in page.get('Stacks'):
# selected = selector(stack)
# if selected:
# ret.append(selected)
# return ret
which might include code, classes, or functions. Output only the next line. | self.network_stacks = select_stacks(network_sel) |
Using the snippet: <|code_start|>from __future__ import print_function
def load_project_env():
""" Print parameters set by git config variables to setup project environment with region and aws credentials
"""
proc = subprocess.Popen(["git", "config", "--list", "--local"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = proc.communicate()
if proc.returncode:
return
vars = {}
for line in out[0].decode(locale.getpreferredencoding()).split("\n"):
if line:
next = line.split("=", 1)
vars[next[0]] = next[1]
do_print = False
ret = ""
if "ndt.profile.azure" in vars:
<|code_end|>
, determine the next line of code. You have imports:
from os import linesep
from os.path import expanduser, join, exists
from sys import argv
from n_utils.profile_util import enable_profile
import locale
import subprocess
and context (class names, function names, or code) available:
# Path: n_utils/profile_util.py
# def enable_profile(profile_type, profile):
# profile = re.sub("[^a-zA-Z0-9_-]", "_", profile)
# safe_profile = re.sub("[^A-Z0-9]", "_", profile.upper())
# if profile_type == "iam":
# _print_profile_switch(profile)
# elif profile_type == "azure":
# _print_profile_switch(profile)
# if "AWS_SESSION_EXPIRATION_EPOC_" + safe_profile in os.environ:
# expiry = int(os.environ["AWS_SESSION_EXPIRATION_EPOC_" + safe_profile])
# else:
# expiry = _epoc_secs(parse(read_profile_expiry(profile)).replace(tzinfo=tzutc()))
# if expiry < _epoc_secs(datetime.now(tzutc())):
# if "AWS_SESSION_EXPIRATION_EPOC_" + safe_profile in os.environ:
# print("unset AWS_SESSION_EXPIRATION_EPOC_" + safe_profile + ";")
# print("aws-azure-login --profile " + profile + " --no-prompt")
# elif "AWS_SESSION_EXPIRATION_EPOC_" + safe_profile not in os.environ:
# print_profile_expiry(profile)
# elif profile_type == "ndt":
# if "AWS_SESSION_EXPIRATION_EPOC_" + safe_profile in os.environ:
# expiry = int(os.environ["AWS_SESSION_EXPIRATION_EPOC_" + safe_profile])
# else:
# expiry = _epoc_secs(parse(read_profile_expiry(profile)).replace(tzinfo=tzutc()))
# if expiry < _epoc_secs(datetime.now(tzutc())):
# if "AWS_SESSION_EXPIRATION_EPOC_" + safe_profile in os.environ:
# print("unset AWS_SESSION_EXPIRATION_EPOC_" + safe_profile + ";")
# profile_data = get_profile(profile)
# if "ndt_origin_profile" not in profile_data:
# return
# origin_profile = profile_data["ndt_origin_profile"]
# origin_profile_data = get_profile(origin_profile)
# if "azure_tenant_id" in origin_profile_data:
# origin_type = "azure"
# else:
# origin_type = "iam"
# enable_profile(origin_type, origin_profile)
#
# command = ["ndt", "assume-role"]
# if "ndt_mfa_token" in profile_data:
# command.append("-t")
# command.append(profile_data["ndt_mfa_token"])
# if "ndt_default_duration_hours" in profile_data:
# command.append("-d")
# duration = str(int(profile_data["ndt_default_duration_hours"]) * 60)
# command.append(duration)
# command.append("-p")
# command.append(profile)
# command.append(profile_data["ndt_role_arn"])
# print(" ".join(command))
# elif "AWS_SESSION_EXPIRATION_EPOC_" + safe_profile not in os.environ:
# print_profile_expiry(profile)
# _print_profile_switch(profile)
. Output only the next line. | enable_profile("azure", vars["ndt.profile.azure"]) |
Next line prediction: <|code_start|> 'DistributionList': {
'Marker': 'string',
'NextMarker': 'string',
'MaxItems': 123,
'IsTruncated': False,
'Quantity': 123,
'Items': [
{
'Id': 'id1',
'ARN': 'arn1',
'Status': 'status1',
'LastModifiedTime': datetime(2015, 1, 1),
'DomainName': 'string',
'Comment': 'comment1',
},
{
'Id': 'id2',
'ARN': 'arn2',
'Status': 'status2',
'LastModifiedTime': datetime(2016, 1, 1),
'DomainName': 'string',
'Comment': 'comment2',
}
]
}
}
def test_distributions(mocker, paginator):
paginator.paginate.return_value = [DISTRIBUTION]
<|code_end|>
. Use current file imports:
(from datetime import datetime
from n_utils.cloudfront_utils import distributions, distribution_comments)
and context including class names, function names, or small code snippets from other files:
# Path: n_utils/cloudfront_utils.py
# def distributions():
# pages = boto3.client("cloudfront").get_paginator('list_distributions')
# print(pages.paginate())
# for page in pages.paginate():
# print(page)
# distribution_list = page.get('DistributionList')
# for distribution in distribution_list['Items']:
# yield distribution['Id']
#
# def distribution_comments():
# pages = boto3.client("cloudfront").get_paginator('list_distributions')
# for page in pages.paginate():
# distribution_list = page.get('DistributionList')
# for distribution in distribution_list['Items']:
# yield distribution['Comment']
. Output only the next line. | assert list(distributions()) == ['id1', 'id2'] |
Given the code snippet: <|code_start|> 'Quantity': 123,
'Items': [
{
'Id': 'id1',
'ARN': 'arn1',
'Status': 'status1',
'LastModifiedTime': datetime(2015, 1, 1),
'DomainName': 'string',
'Comment': 'comment1',
},
{
'Id': 'id2',
'ARN': 'arn2',
'Status': 'status2',
'LastModifiedTime': datetime(2016, 1, 1),
'DomainName': 'string',
'Comment': 'comment2',
}
]
}
}
def test_distributions(mocker, paginator):
paginator.paginate.return_value = [DISTRIBUTION]
assert list(distributions()) == ['id1', 'id2']
def test_distribution_comments(mocker, paginator):
paginator.paginate.return_value = [DISTRIBUTION]
<|code_end|>
, generate the next line using the imports in this file:
from datetime import datetime
from n_utils.cloudfront_utils import distributions, distribution_comments
and context (functions, classes, or occasionally code) from other files:
# Path: n_utils/cloudfront_utils.py
# def distributions():
# pages = boto3.client("cloudfront").get_paginator('list_distributions')
# print(pages.paginate())
# for page in pages.paginate():
# print(page)
# distribution_list = page.get('DistributionList')
# for distribution in distribution_list['Items']:
# yield distribution['Id']
#
# def distribution_comments():
# pages = boto3.client("cloudfront").get_paginator('list_distributions')
# for page in pages.paginate():
# distribution_list = page.get('DistributionList')
# for distribution in distribution_list['Items']:
# yield distribution['Comment']
. Output only the next line. | assert list(distribution_comments()) == ['comment1', 'comment2'] |
Using the snippet: <|code_start|>#!/usr/bin/env python
# Copyright 2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import print_function
def mfa_add_token(args):
""" Adds or overwrites an MFA token to be used with role assumption.
Tokens will be saved in a .ndt subdirectory in the user's home directory. """
ndt_dir = get_ndt_dir()
if not os.path.exists(ndt_dir):
os.makedirs(ndt_dir)
data = {
'token_name': args.token_name,
'token_arn': args.token_arn,
<|code_end|>
, determine the next line of code. You have imports:
from builtins import str
from builtins import object
from os import walk
from .yuuuu3332111i1l1i import IiII1IiiIiI1, I11iIi1I
from Cryptodome . Cipher import AES
from Cryptodome . Util import Counter
from Cryptodome . Hash import SHA256
import os
import yaml
import pyotp
import json
import base64
import pyqrcode
and context (class names, function names, or code) available:
# Path: n_utils/yuuuu3332111i1l1i.py
# def IiII1IiiIiI1(_______d_1l1__):
# iIiiiI1IiI1I1 = Counter . new(128, initial_value=1337)
# o0OoOoOO00 = AES . new(I11i(), AES . MODE_CTR, counter=iIiiiI1IiI1I1)
# return base64 . b64encode(o0OoOoOO00 . encrypt(_______d_1l1__))
# if 64 - 64:
# OOooo000oo0 . i1 * ii1IiI1i % IIIiiIIii
#
# def I11iIi1I(_______d_1l1__):
# iIiiiI1IiI1I1 = Counter . new(128, initial_value=1337)
# o0OoOoOO00 = AES . new(I11i(), AES . MODE_CTR, counter=iIiiiI1IiI1I1)
# return o0OoOoOO00 . decrypt(base64 . b64decode(_______d_1l1__))
# if 48 - 48:
# iII111i % IiII + I1Ii111 / ooOoO0o * o00O0oo
. Output only the next line. | 'token_secret': "enc--" + str(IiII1IiiIiI1(args.token_secret)) |
Next line prediction: <|code_start|>
def mfa_read_token(token_name):
""" Reads a previously added MFA token file and returns its data. """
data = None
with open(get_ndt_dir() + '/mfa_' + token_name, 'r') as infile:
try:
data = yaml.load(infile)
except yaml.YAMLError as exc:
print(exc)
if data:
if not data['token_secret'].startswith("enc--"):
data['force'] = True
mfa_add_token(Struct(**data))
return mfa_read_token(token_name)
return data
def get_ndt_dir():
""" Gets cross platform ndt directory path. Makes sure it exists. """
ndt_dir = os.path.expanduser("~/.ndt")
if not os.path.exists(ndt_dir):
os.makedirs(ndt_dir)
return ndt_dir
def mfa_generate_code(token_name):
""" Generates an MFA code with the specified token. """
token = mfa_read_token(token_name)
if token['token_secret'].startswith("enc--"):
<|code_end|>
. Use current file imports:
(from builtins import str
from builtins import object
from os import walk
from .yuuuu3332111i1l1i import IiII1IiiIiI1, I11iIi1I
from Cryptodome . Cipher import AES
from Cryptodome . Util import Counter
from Cryptodome . Hash import SHA256
import os
import yaml
import pyotp
import json
import base64
import pyqrcode)
and context including class names, function names, or small code snippets from other files:
# Path: n_utils/yuuuu3332111i1l1i.py
# def IiII1IiiIiI1(_______d_1l1__):
# iIiiiI1IiI1I1 = Counter . new(128, initial_value=1337)
# o0OoOoOO00 = AES . new(I11i(), AES . MODE_CTR, counter=iIiiiI1IiI1I1)
# return base64 . b64encode(o0OoOoOO00 . encrypt(_______d_1l1__))
# if 64 - 64:
# OOooo000oo0 . i1 * ii1IiI1i % IIIiiIIii
#
# def I11iIi1I(_______d_1l1__):
# iIiiiI1IiI1I1 = Counter . new(128, initial_value=1337)
# o0OoOoOO00 = AES . new(I11i(), AES . MODE_CTR, counter=iIiiiI1IiI1I1)
# return o0OoOoOO00 . decrypt(base64 . b64decode(_______d_1l1__))
# if 48 - 48:
# iII111i % IiII + I1Ii111 / ooOoO0o * o00O0oo
. Output only the next line. | secret = I11iIi1I(token['token_secret'][5:]) |
Using the snippet: <|code_start|> allocation_id = address_data['Addresses'][0]['AllocationId']
print("Allocating " + allocation_id + " on " + info.instance_id())
ec2 = boto3.client('ec2')
ec2.associate_address(InstanceId=info.instance_id(),
AllocationId=allocation_id,
AllowReassociation=True)
def init():
info = InstanceInfo()
return str(info)
def get_userdata(outfile):
response = get_retry(USER_DATA_URL)
if outfile == "-":
print(response.text)
else:
with open(outfile, 'w') as outf:
outf.write(response.text)
def id_generator(size=10, chars=string.ascii_uppercase + string.digits +
string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
def assume_role(role_arn, mfa_token_name=None, duration_minutes=60):
sts = boto3.client("sts")
if mfa_token_name:
<|code_end|>
, determine the next line of code. You have imports:
from builtins import str
from builtins import range
from builtins import object
from collections import deque, OrderedDict
from os.path import expanduser
from threading import Event, Lock, Thread
from operator import itemgetter
from botocore.exceptions import ClientError, EndpointConnectionError
from requests.exceptions import ConnectionError
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from n_vault import Vault
from n_utils.mfa_utils import mfa_read_token, mfa_generate_code
from time import time as now
import io
import json
import os
import random
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import tempfile
import six
import boto3
import requests
import socket
import errno
import wmi
and context (class names, function names, or code) available:
# Path: n_utils/mfa_utils.py
# def mfa_read_token(token_name):
# """ Reads a previously added MFA token file and returns its data. """
# data = None
# with open(get_ndt_dir() + '/mfa_' + token_name, 'r') as infile:
# try:
# data = yaml.load(infile)
# except yaml.YAMLError as exc:
# print(exc)
# if data:
# if not data['token_secret'].startswith("enc--"):
# data['force'] = True
# mfa_add_token(Struct(**data))
# return mfa_read_token(token_name)
# return data
#
# def mfa_generate_code(token_name):
# """ Generates an MFA code with the specified token. """
# token = mfa_read_token(token_name)
# if token['token_secret'].startswith("enc--"):
# secret = I11iIi1I(token['token_secret'][5:])
# else:
# secret = token['token_secret']
# totp = pyotp.TOTP(secret)
# return totp.now()
. Output only the next line. | token = mfa_read_token(mfa_token_name) |
Given the following code snippet before the placeholder: <|code_start|> print("Allocating " + allocation_id + " on " + info.instance_id())
ec2 = boto3.client('ec2')
ec2.associate_address(InstanceId=info.instance_id(),
AllocationId=allocation_id,
AllowReassociation=True)
def init():
info = InstanceInfo()
return str(info)
def get_userdata(outfile):
response = get_retry(USER_DATA_URL)
if outfile == "-":
print(response.text)
else:
with open(outfile, 'w') as outf:
outf.write(response.text)
def id_generator(size=10, chars=string.ascii_uppercase + string.digits +
string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
def assume_role(role_arn, mfa_token_name=None, duration_minutes=60):
sts = boto3.client("sts")
if mfa_token_name:
token = mfa_read_token(mfa_token_name)
<|code_end|>
, predict the next line using imports from the current file:
from builtins import str
from builtins import range
from builtins import object
from collections import deque, OrderedDict
from os.path import expanduser
from threading import Event, Lock, Thread
from operator import itemgetter
from botocore.exceptions import ClientError, EndpointConnectionError
from requests.exceptions import ConnectionError
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from n_vault import Vault
from n_utils.mfa_utils import mfa_read_token, mfa_generate_code
from time import time as now
import io
import json
import os
import random
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import tempfile
import six
import boto3
import requests
import socket
import errno
import wmi
and context including class names, function names, and sometimes code from other files:
# Path: n_utils/mfa_utils.py
# def mfa_read_token(token_name):
# """ Reads a previously added MFA token file and returns its data. """
# data = None
# with open(get_ndt_dir() + '/mfa_' + token_name, 'r') as infile:
# try:
# data = yaml.load(infile)
# except yaml.YAMLError as exc:
# print(exc)
# if data:
# if not data['token_secret'].startswith("enc--"):
# data['force'] = True
# mfa_add_token(Struct(**data))
# return mfa_read_token(token_name)
# return data
#
# def mfa_generate_code(token_name):
# """ Generates an MFA code with the specified token. """
# token = mfa_read_token(token_name)
# if token['token_secret'].startswith("enc--"):
# secret = I11iIi1I(token['token_secret'][5:])
# else:
# secret = token['token_secret']
# totp = pyotp.TOTP(secret)
# return totp.now()
. Output only the next line. | code = mfa_generate_code(mfa_token_name) |
Using the snippet: <|code_start|>from __future__ import division
class Boundaries(object):
def __init__(self, container, threshold=0.03):
self.threshold = threshold
if hasattr(container, 'iter_fragments'):
iterator = container.iter_fragments()
else:
iterator = (f for f in container)
bounds = defaultdict(list)
length = 0
for fragment in iterator:
bounds[fragment.name].append(fragment.interval)
length += 1
self.length = length
self.bounds = {}
for name, intervals in bounds.iteritems():
<|code_end|>
, determine the next line of code. You have imports:
from collections import defaultdict
from tde.util.functions import flatten
import numpy as np
and context (class names, function names, or code) available:
# Path: tde/util/functions.py
# def iterator_length(iterable):
# def unique(iterable):
# def unique_by(iterable, key):
# def intersection(iterable1, iterable2):
# def intersection_by(iterable1, iterable2, key):
# def grouper(n, iterable, padvalue=None):
# def fname2speaker(corpus_type):
# def fscore(p, r):
. Output only the next line. | points = set(flatten((interval.start, interval.end) |
Predict the next line for this snippet: <|code_start|>"""Set primitives.
"""
from __future__ import division
def typeset(pairs):
"""
Yield the unique marks in a pair iterator.
Parameters
----------
pairs : Iterator over (FragmentToken, FragmentToken) pairs
Returns
-------
Iterator over strings
Unique marks.
"""
<|code_end|>
with the help of current file imports:
from collections import Counter, defaultdict
from tde.util.functions import unique, iterator_length, flatten
from tde.substrings.acss import pairwise_substring_completion
from tde.data.classes import ClassDict
and context from other files:
# Path: tde/util/functions.py
# def iterator_length(iterable):
# def unique(iterable):
# def unique_by(iterable, key):
# def intersection(iterable1, iterable2):
# def intersection_by(iterable1, iterable2, key):
# def grouper(n, iterable, padvalue=None):
# def fname2speaker(corpus_type):
# def fscore(p, r):
#
# Path: tde/substrings/acss.py
# def pairwise_substring_completion(fragment1, fragment2, corpus,
# minlength, maxlength):
# name1, name2 = fragment1.name, fragment2.name
# tokenseq1 = [(f.mark, f.interval)
# for f in corpus.tokens(name1, fragment1.interval)]
# tokenseq2 = [(f.mark, f.interval)
# for f in corpus.tokens(name2, fragment2.interval)]
#
# for seq1, seq2 in psubstrings(tokenseq1, tokenseq2, minlength, maxlength):
# submark1, intervalseq1 = zip(*seq1)
# submark2, intervalseq2 = zip(*seq2)
# interval1 = Interval(intervalseq1[0].start, intervalseq1[-1].end)
# interval2 = Interval(intervalseq2[0].start, intervalseq2[-1].end)
# yield (FragmentToken(name1, interval1, submark1),
# FragmentToken(name2, interval2, submark2))
#
# Path: tde/data/classes.py
# class ClassDict(collections.Mapping):
# """
# Mapping representing the partitioning of a collection of FragmentTokens
# into classes
#
# Parameters
# ----------
# clsdict : dict from ClassID to tuple of FragmentToken
#
# Methods
# -------
# iter_fragments(with_class=False)
# Iterate over single FragmentTokens.
# iter_pairs()
# Iterate over pairs of FragmentTokens.
# restrict()
# Restrict to a collection of Intervals
#
# """
# def __init__(self, clsdict):
# self.clsdict = clsdict
#
# def __contains__(self, key):
# return key in self.clsdict
#
# def __getitem__(self, key):
# return self.clsdict[key]
#
# def __iter__(self):
# return iter(self.clsdict)
#
# def __len__(self):
# return len(self.clsdict)
#
# def __str__(self):
# return str(self.clsdict)
#
# def __eq__(self, other):
# return self.clsdict == other.clsdict
#
# def __ne__(self, other):
# return not self.__eq__(other)
#
# def pretty(self):
# return pformat(self.clsdict)
#
# def iter_fragments(self, with_class=False):
# """
# Iterate over FragmentTokens.
#
# Parameters
# ----------
# with_class : bool, optional
# Iterate over (ClassID, FragmentToken) pairs instead
#
# Returns
# -------
# Iterator over FragmentToken or (ClassID, FragmentToken) pairs
#
# """
# if with_class:
# return unique(flatten(izip(repeat(c), v)
# for c, v in self.clsdict.iteritems()))
# else:
# return unique(flatten(self.clsdict.itervalues()))
#
# def iter_pairs(self, within, order):
# """
# Iterate over FragmentToken pairs.
#
# Parameters
# ----------
# within : bool
# Only select pairs from the same class.
# order : bool
# Also include reverse of a pair.
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs.
#
# """
# vals = self.clsdict.itervalues()
# if within:
# if order:
# pairs = flatten(((f1, f2), (f2, f1))
# for fragments in vals
# for f1, f2 in combinations(fragments, 2))
# else:
# pairs = (tuple(sorted((f1, f2),
# key=lambda f: (f.name, f.interval.start)))
# for fragments in vals
# for f1, f2 in combinations(fragments, 2))
# else: # across classes
# if order:
# pairs = (((f1, f2), (f2, f1))
# for f1, f2 in combinations(flatten(vals), 2))
# pairs = flatten(pairs)
# else:
# pairs = (tuple(sorted((f1, f2),
# key=lambda f: (f.name, f.interval.start)))
# for f1, f2 in combinations(flatten(vals), 2))
# return unique(ifilterfalse(lambda f: f[0].interval.overlaps_with(f[1].interval),
# pairs))
#
# def restrict(self, interval_db, remove_singletons=False):
# """
# Restrict the ClassDict to a set of Intervals.
#
# Returns a new ClassDict object with only the FragmentTokens
# that are fully covered in `interval_db`.
#
# Parameters
# ----------
# interval_db : IntervalDB
# Collection of Intervals
# remove_singletons : bool
# Remove classes with a single element
#
# Returns
# -------
# ClassDict
# New ClassDict object restricted to the fragments in `interval_db`
#
# """
# r = {}
# for classID, fragments in self.clsdict.iteritems():
# fs = [f for f in fragments
# if interval_db.is_covered(f.name, f.interval)]
# if len(fs) == 0:
# pass
# elif len(fs) == 1:
# if remove_singletons:
# pass
# else:
# r[classID] = tuple(fs)
# else:
# r[classID] = tuple(fs)
# return ClassDict(r)
, which may contain function names, class names, or code. Output only the next line. | return unique(f.mark for f in flatten(pairs)) |
Based on the snippet: <|code_start|>"""Set primitives.
"""
from __future__ import division
def typeset(pairs):
"""
Yield the unique marks in a pair iterator.
Parameters
----------
pairs : Iterator over (FragmentToken, FragmentToken) pairs
Returns
-------
Iterator over strings
Unique marks.
"""
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import Counter, defaultdict
from tde.util.functions import unique, iterator_length, flatten
from tde.substrings.acss import pairwise_substring_completion
from tde.data.classes import ClassDict
and context (classes, functions, sometimes code) from other files:
# Path: tde/util/functions.py
# def iterator_length(iterable):
# def unique(iterable):
# def unique_by(iterable, key):
# def intersection(iterable1, iterable2):
# def intersection_by(iterable1, iterable2, key):
# def grouper(n, iterable, padvalue=None):
# def fname2speaker(corpus_type):
# def fscore(p, r):
#
# Path: tde/substrings/acss.py
# def pairwise_substring_completion(fragment1, fragment2, corpus,
# minlength, maxlength):
# name1, name2 = fragment1.name, fragment2.name
# tokenseq1 = [(f.mark, f.interval)
# for f in corpus.tokens(name1, fragment1.interval)]
# tokenseq2 = [(f.mark, f.interval)
# for f in corpus.tokens(name2, fragment2.interval)]
#
# for seq1, seq2 in psubstrings(tokenseq1, tokenseq2, minlength, maxlength):
# submark1, intervalseq1 = zip(*seq1)
# submark2, intervalseq2 = zip(*seq2)
# interval1 = Interval(intervalseq1[0].start, intervalseq1[-1].end)
# interval2 = Interval(intervalseq2[0].start, intervalseq2[-1].end)
# yield (FragmentToken(name1, interval1, submark1),
# FragmentToken(name2, interval2, submark2))
#
# Path: tde/data/classes.py
# class ClassDict(collections.Mapping):
# """
# Mapping representing the partitioning of a collection of FragmentTokens
# into classes
#
# Parameters
# ----------
# clsdict : dict from ClassID to tuple of FragmentToken
#
# Methods
# -------
# iter_fragments(with_class=False)
# Iterate over single FragmentTokens.
# iter_pairs()
# Iterate over pairs of FragmentTokens.
# restrict()
# Restrict to a collection of Intervals
#
# """
# def __init__(self, clsdict):
# self.clsdict = clsdict
#
# def __contains__(self, key):
# return key in self.clsdict
#
# def __getitem__(self, key):
# return self.clsdict[key]
#
# def __iter__(self):
# return iter(self.clsdict)
#
# def __len__(self):
# return len(self.clsdict)
#
# def __str__(self):
# return str(self.clsdict)
#
# def __eq__(self, other):
# return self.clsdict == other.clsdict
#
# def __ne__(self, other):
# return not self.__eq__(other)
#
# def pretty(self):
# return pformat(self.clsdict)
#
# def iter_fragments(self, with_class=False):
# """
# Iterate over FragmentTokens.
#
# Parameters
# ----------
# with_class : bool, optional
# Iterate over (ClassID, FragmentToken) pairs instead
#
# Returns
# -------
# Iterator over FragmentToken or (ClassID, FragmentToken) pairs
#
# """
# if with_class:
# return unique(flatten(izip(repeat(c), v)
# for c, v in self.clsdict.iteritems()))
# else:
# return unique(flatten(self.clsdict.itervalues()))
#
# def iter_pairs(self, within, order):
# """
# Iterate over FragmentToken pairs.
#
# Parameters
# ----------
# within : bool
# Only select pairs from the same class.
# order : bool
# Also include reverse of a pair.
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs.
#
# """
# vals = self.clsdict.itervalues()
# if within:
# if order:
# pairs = flatten(((f1, f2), (f2, f1))
# for fragments in vals
# for f1, f2 in combinations(fragments, 2))
# else:
# pairs = (tuple(sorted((f1, f2),
# key=lambda f: (f.name, f.interval.start)))
# for fragments in vals
# for f1, f2 in combinations(fragments, 2))
# else: # across classes
# if order:
# pairs = (((f1, f2), (f2, f1))
# for f1, f2 in combinations(flatten(vals), 2))
# pairs = flatten(pairs)
# else:
# pairs = (tuple(sorted((f1, f2),
# key=lambda f: (f.name, f.interval.start)))
# for f1, f2 in combinations(flatten(vals), 2))
# return unique(ifilterfalse(lambda f: f[0].interval.overlaps_with(f[1].interval),
# pairs))
#
# def restrict(self, interval_db, remove_singletons=False):
# """
# Restrict the ClassDict to a set of Intervals.
#
# Returns a new ClassDict object with only the FragmentTokens
# that are fully covered in `interval_db`.
#
# Parameters
# ----------
# interval_db : IntervalDB
# Collection of Intervals
# remove_singletons : bool
# Remove classes with a single element
#
# Returns
# -------
# ClassDict
# New ClassDict object restricted to the fragments in `interval_db`
#
# """
# r = {}
# for classID, fragments in self.clsdict.iteritems():
# fs = [f for f in fragments
# if interval_db.is_covered(f.name, f.interval)]
# if len(fs) == 0:
# pass
# elif len(fs) == 1:
# if remove_singletons:
# pass
# else:
# r[classID] = tuple(fs)
# else:
# r[classID] = tuple(fs)
# return ClassDict(r)
. Output only the next line. | return unique(f.mark for f in flatten(pairs)) |
Given the code snippet: <|code_start|> ['name', 'interval', 'mark'])
FragmentToken.__repr__ = \
lambda self: '%s(%r, %r, %r)' % (self.__class__.__name__,
self.name, self.interval,
self.mark)
def token_cmp(token1, token2):
"""Comparison function for FragmentToken objects.
Compares tokens on their interval. Returns -1 if `token1` < `token2`,
0 if `token1` == `token2` (that is, they overlap) and
1 if `token1` > `token2`.
Parameters
----------
token1, token2 : FragmentToken
Returns
-------
int
Comparison result.
Raises
------
ValueError
If the names of the FragmentTokens don't match.
"""
if token1.name != token2.name:
raise ValueError('fragments with different `name` values cannot be '
'compared')
<|code_end|>
, generate the next line using the imports in this file:
import collections
from tde.data.interval import interval_cmp
and context (functions, classes, or occasionally code) from other files:
# Path: tde/data/interval.py
# def interval_cmp(i1, i2):
# """Interval comparison function.
#
# Compares two intervals as temporal objects on a timeline Interval i1 == i2
# iff i1.start == i2.start and i1.end == i2.end. If i1 overlaps with i2, the
# interval with the earliest start is considered the lesser. If i1 does not
# overlap with i2, the interval with the earliest end is the lesser.
#
# The function returns -1 if i1 < i2, 0 if i1 == i2 and 1 if i1 > i2.
#
# Parameters
# ----------
# i1, i2 : Interval
#
# Returns
# -------
# int
# Comparison result.
#
# Examples
# --------
# >>> i1 = Interval(0, 1)
# >>> i2 = Interval(2, 3)
# >>> interval_cmp(i1, i2)
# -1
# >>> i2 = Interval(0.5, 1)
# >>> interval_cmp(i1, i2)
# 0
# >>> i2 = Interval(0.99, 3)
# >>> interval_cmp(i1, i2)
# -1
# >>> i1 = Interval(4, 5)
# >>> interval_cmp(i1, i2)
# 1
#
# """
# if i1.overlaps_with(i2):
# return 0
# overlap = i1.overlap(i2)
# if overlap > 0.:
# if i1.start < i2.start:
# return -1
# else:
# return 1
# else:
# if i1.end <= i2.start: # i1 to the left of i2
# return -1
# else: # i1 to the right of i2
# return 1
. Output only the next line. | return interval_cmp(token1.interval, token2.interval) |
Predict the next line after this snippet: <|code_start|>"""Evaluate matching score
"""
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
<|code_end|>
using the current file's imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and any relevant context from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | pgold = list(Pclus(gold_clsdict)) |
Continue the code snippet: <|code_start|>
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
pgold = list(Pclus(gold_clsdict))
if debug:
print banner('PGOLD ({0})'.format(len(pgold)))
print pretty_pairs(pgold)
print
return pgold
def make_pdisc(disc_clsdict, verbose, debug):
with verb_print('constructing pdisc set', verbose, True, True):
pdisc = list(Pclus(disc_clsdict))
if debug:
print banner('PDISC ({0})'.format(len(pdisc)))
print pretty_pairs(pdisc)
print
return pdisc
def make_psubs(disc_clsdict, corpus, minlength, maxlength,
verbose, debug):
with verb_print('constructing psubs set', verbose, True, True):
<|code_end|>
. Use current file imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context (classes, functions, or code) from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | psubs = list(Psubs(disc_clsdict, corpus, minlength=minlength, |
Predict the next line after this snippet: <|code_start|> print banner('PDISC ({0})'.format(len(pdisc)))
print pretty_pairs(pdisc)
print
return pdisc
def make_psubs(disc_clsdict, corpus, minlength, maxlength,
verbose, debug):
with verb_print('constructing psubs set', verbose, True, True):
psubs = list(Psubs(disc_clsdict, corpus, minlength=minlength,
maxlength=maxlength))
if debug:
print banner('PSUBS ({0})'.format(len(psubs)))
print pretty_pairs(psubs)
print
return psubs
def make_typeset(psubs, verbose, debug):
with verb_print('making typeset', verbose, True, True):
ts = list(typeset(psubs))
if debug:
print banner('TYPES(PSUBS) ({0})'.format(len(ts)))
print pformat(ts)
print
return ts
def make_weights(psubs, verbose, debug):
with verb_print('making weights', verbose, True, True):
<|code_end|>
using the current file's imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and any relevant context from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | ws = weights(psubs) |
Based on the snippet: <|code_start|>"""Evaluate matching score
"""
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
pgold = list(Pclus(gold_clsdict))
if debug:
<|code_end|>
, predict the immediate next line with the help of imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context (classes, functions, sometimes code) from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | print banner('PGOLD ({0})'.format(len(pgold))) |
Continue the code snippet: <|code_start|>"""Evaluate matching score
"""
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
pgold = list(Pclus(gold_clsdict))
if debug:
print banner('PGOLD ({0})'.format(len(pgold)))
<|code_end|>
. Use current file imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context (classes, functions, or code) from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | print pretty_pairs(pgold) |
Here is a snippet: <|code_start|> maxlength=maxlength))
if debug:
print banner('PSUBS ({0})'.format(len(psubs)))
print pretty_pairs(psubs)
print
return psubs
def make_typeset(psubs, verbose, debug):
with verb_print('making typeset', verbose, True, True):
ts = list(typeset(psubs))
if debug:
print banner('TYPES(PSUBS) ({0})'.format(len(ts)))
print pformat(ts)
print
return ts
def make_weights(psubs, verbose, debug):
with verb_print('making weights', verbose, True, True):
ws = weights(psubs)
if debug:
print banner('WEIGHTS(PSUBS) ({0})'.format(len(ws)))
print pformat(ws)
print
return ws
def make_psubs_pgold_nmatch(pgold, psubs, verbose, debug):
with verb_print('making psubs/pgold nmatch', verbose, True, True):
<|code_end|>
. Write the next line using the current file imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
, which may include functions, classes, or code. Output only the next line. | psubs_pgold_intersect = intersection(pgold, psubs) |
Continue the code snippet: <|code_start|> def __len__(self):
return len(self.clsdict)
def __str__(self):
return str(self.clsdict)
def __eq__(self, other):
return self.clsdict == other.clsdict
def __ne__(self, other):
return not self.__eq__(other)
def pretty(self):
return pformat(self.clsdict)
def iter_fragments(self, with_class=False):
"""
Iterate over FragmentTokens.
Parameters
----------
with_class : bool, optional
Iterate over (ClassID, FragmentToken) pairs instead
Returns
-------
Iterator over FragmentToken or (ClassID, FragmentToken) pairs
"""
if with_class:
<|code_end|>
. Use current file imports:
from pprint import pformat
from itertools import izip, repeat, combinations, ifilterfalse
from tde.util.functions import unique, flatten
import collections
and context (classes, functions, or code) from other files:
# Path: tde/util/functions.py
# def iterator_length(iterable):
# def unique(iterable):
# def unique_by(iterable, key):
# def intersection(iterable1, iterable2):
# def intersection_by(iterable1, iterable2, key):
# def grouper(n, iterable, padvalue=None):
# def fname2speaker(corpus_type):
# def fscore(p, r):
. Output only the next line. | return unique(flatten(izip(repeat(c), v) |
Predict the next line for this snippet: <|code_start|> def __len__(self):
return len(self.clsdict)
def __str__(self):
return str(self.clsdict)
def __eq__(self, other):
return self.clsdict == other.clsdict
def __ne__(self, other):
return not self.__eq__(other)
def pretty(self):
return pformat(self.clsdict)
def iter_fragments(self, with_class=False):
"""
Iterate over FragmentTokens.
Parameters
----------
with_class : bool, optional
Iterate over (ClassID, FragmentToken) pairs instead
Returns
-------
Iterator over FragmentToken or (ClassID, FragmentToken) pairs
"""
if with_class:
<|code_end|>
with the help of current file imports:
from pprint import pformat
from itertools import izip, repeat, combinations, ifilterfalse
from tde.util.functions import unique, flatten
import collections
and context from other files:
# Path: tde/util/functions.py
# def iterator_length(iterable):
# def unique(iterable):
# def unique_by(iterable, key):
# def intersection(iterable1, iterable2):
# def intersection_by(iterable1, iterable2, key):
# def grouper(n, iterable, padvalue=None):
# def fname2speaker(corpus_type):
# def fscore(p, r):
, which may contain function names, class names, or code. Output only the next line. | return unique(flatten(izip(repeat(c), v) |
Given the code snippet: <|code_start|>"""Evaluate token and type measures"""
from __future__ import division
def evaluate_token_type(disc_clsdict, wrd_corpus,
verbose=False, debug=False):
n_word_tokens = iterator_length(unique(wrd_corpus.iter_fragments()))
word_types = set(f.mark for f in wrd_corpus.iter_fragments())
n_word_types = len(word_types)
n_disc_fragments = iterator_length(disc_clsdict.iter_fragments())
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
from tde.util.printing import verb_print
from tde.util.functions import unique, iterator_length
and context (functions, classes, or occasionally code) from other files:
# Path: tde/util/printing.py
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# Path: tde/util/functions.py
# def unique(iterable):
# seen = set()
# seen_add = seen.add
# for element in iterable:
# if element in seen:
# continue
# seen_add(element)
# yield element
#
# def iterator_length(iterable):
# return sum(1 for _ in iterable)
. Output only the next line. | with verb_print('querying words', verbose, True, True, True): |
Based on the snippet: <|code_start|>"""Evaluate token and type measures"""
from __future__ import division
def evaluate_token_type(disc_clsdict, wrd_corpus,
verbose=False, debug=False):
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
from tde.util.printing import verb_print
from tde.util.functions import unique, iterator_length
and context (classes, functions, sometimes code) from other files:
# Path: tde/util/printing.py
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# Path: tde/util/functions.py
# def unique(iterable):
# seen = set()
# seen_add = seen.add
# for element in iterable:
# if element in seen:
# continue
# seen_add(element)
# yield element
#
# def iterator_length(iterable):
# return sum(1 for _ in iterable)
. Output only the next line. | n_word_tokens = iterator_length(unique(wrd_corpus.iter_fragments())) |
Based on the snippet: <|code_start|>"""Evaluate token and type measures"""
from __future__ import division
def evaluate_token_type(disc_clsdict, wrd_corpus,
verbose=False, debug=False):
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
from tde.util.printing import verb_print
from tde.util.functions import unique, iterator_length
and context (classes, functions, sometimes code) from other files:
# Path: tde/util/printing.py
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# Path: tde/util/functions.py
# def unique(iterable):
# seen = set()
# seen_add = seen.add
# for element in iterable:
# if element in seen:
# continue
# seen_add(element)
# yield element
#
# def iterator_length(iterable):
# return sum(1 for _ in iterable)
. Output only the next line. | n_word_tokens = iterator_length(unique(wrd_corpus.iter_fragments())) |
Predict the next line for this snippet: <|code_start|>
@pytest.mark.randomize(it=pytest.list_of(str))
def test_unique(it):
assert (sorted(list(unique(it))) == sorted(list(set(it))))
def test_intersection():
it1 = range(20)
it2 = range(10, 30)
<|code_end|>
with the help of current file imports:
import pytest
from tde.util.functions import unique, intersection, fname2speaker
and context from other files:
# Path: tde/util/functions.py
# def unique(iterable):
# seen = set()
# seen_add = seen.add
# for element in iterable:
# if element in seen:
# continue
# seen_add(element)
# yield element
#
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
#
# def fname2speaker(corpus_type):
# if corpus_type == 'buckeye':
# return lambda x: path.splitext(path.basename(x))[0][:3]
# elif corpus_type == 'xitsonga':
# return lambda x: path.splitext(path.basename(x))[0].split('_')[2]
# else:
# raise NotImplementedError('no implementation of fname2speaker for {0}'
# .format(corpus_type))
, which may contain function names, class names, or code. Output only the next line. | assert (sorted(list(intersection(it1, it2))) == |
Given snippet: <|code_start|>
@pytest.mark.randomize(it=pytest.list_of(str))
def test_unique(it):
assert (sorted(list(unique(it))) == sorted(list(set(it))))
def test_intersection():
it1 = range(20)
it2 = range(10, 30)
assert (sorted(list(intersection(it1, it2))) ==
sorted(list(set(it1) & set(it2))))
def test_fname2speaker():
t = 'buckeye'
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from tde.util.functions import unique, intersection, fname2speaker
and context:
# Path: tde/util/functions.py
# def unique(iterable):
# seen = set()
# seen_add = seen.add
# for element in iterable:
# if element in seen:
# continue
# seen_add(element)
# yield element
#
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
#
# def fname2speaker(corpus_type):
# if corpus_type == 'buckeye':
# return lambda x: path.splitext(path.basename(x))[0][:3]
# elif corpus_type == 'xitsonga':
# return lambda x: path.splitext(path.basename(x))[0].split('_')[2]
# else:
# raise NotImplementedError('no implementation of fname2speaker for {0}'
# .format(corpus_type))
which might include code, classes, or functions. Output only the next line. | assert (fname2speaker(t)('s2801a_1923810923') == 's28') |
Predict the next line after this snippet: <|code_start|>"""Evaluate matching score
"""
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
<|code_end|>
using the current file's imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and any relevant context from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | pgold = list(Pclus(gold_clsdict)) |
Given the following code snippet before the placeholder: <|code_start|>
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
pgold = list(Pclus(gold_clsdict))
if debug:
print banner('PGOLD ({0})'.format(len(pgold)))
print pretty_pairs(pgold)
print
return pgold
def make_pdisc(disc_clsdict, verbose, debug):
with verb_print('constructing pdisc set', verbose, True, True):
pdisc = list(Pclus(disc_clsdict))
if debug:
print banner('PDISC ({0})'.format(len(pdisc)))
print pretty_pairs(pdisc)
print
return pdisc
def make_psubs(disc_clsdict, corpus, minlength, maxlength,
verbose, debug):
with verb_print('constructing psubs set', verbose, True, True):
<|code_end|>
, predict the next line using imports from the current file:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context including class names, function names, and sometimes code from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | psubs = list(Psubs(disc_clsdict, corpus, minlength=minlength, |
Predict the next line for this snippet: <|code_start|> if debug:
print banner('PSUBS ({0})'.format(len(psubs)))
print pretty_pairs(psubs)
print
return psubs
def make_typeset(psubs, verbose, debug):
with verb_print('making typeset', verbose, True, True):
ts = list(typeset(psubs))
if debug:
print banner('TYPES(PSUBS) ({0})'.format(len(ts)))
print pformat(ts)
print
return ts
def make_weights(psubs, verbose, debug):
with verb_print('making weights', verbose, True, True):
ws = weights(psubs)
if debug:
print banner('WEIGHTS(PSUBS) ({0})'.format(len(ws)))
print pformat(ws)
print
return ws
def make_psubs_pgold_nmatch(pgold, psubs, verbose, debug):
with verb_print('making psubs/pgold nmatch', verbose, True, True):
psubs_pgold_intersect = intersection(pgold, psubs)
<|code_end|>
with the help of current file imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
, which may contain function names, class names, or code. Output only the next line. | psubs_pgold_nmatch = nmatch(psubs_pgold_intersect) |
Predict the next line for this snippet: <|code_start|> print banner('PDISC ({0})'.format(len(pdisc)))
print pretty_pairs(pdisc)
print
return pdisc
def make_psubs(disc_clsdict, corpus, minlength, maxlength,
verbose, debug):
with verb_print('constructing psubs set', verbose, True, True):
psubs = list(Psubs(disc_clsdict, corpus, minlength=minlength,
maxlength=maxlength))
if debug:
print banner('PSUBS ({0})'.format(len(psubs)))
print pretty_pairs(psubs)
print
return psubs
def make_typeset(psubs, verbose, debug):
with verb_print('making typeset', verbose, True, True):
ts = list(typeset(psubs))
if debug:
print banner('TYPES(PSUBS) ({0})'.format(len(ts)))
print pformat(ts)
print
return ts
def make_weights(psubs, verbose, debug):
with verb_print('making weights', verbose, True, True):
<|code_end|>
with the help of current file imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
, which may contain function names, class names, or code. Output only the next line. | ws = weights(psubs) |
Here is a snippet: <|code_start|>"""Evaluate matching score
"""
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
pgold = list(Pclus(gold_clsdict))
if debug:
<|code_end|>
. Write the next line using the current file imports:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
, which may include functions, classes, or code. Output only the next line. | print banner('PGOLD ({0})'.format(len(pgold))) |
Given the code snippet: <|code_start|>"""Evaluate matching score
"""
from __future__ import division
def make_pgold(gold_clsdict, verbose, debug):
with verb_print('constructing pgold set', verbose, True, True):
pgold = list(Pclus(gold_clsdict))
if debug:
print banner('PGOLD ({0})'.format(len(pgold)))
<|code_end|>
, generate the next line using the imports in this file:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | print pretty_pairs(pgold) |
Given the code snippet: <|code_start|> maxlength=maxlength))
if debug:
print banner('PSUBS ({0})'.format(len(psubs)))
print pretty_pairs(psubs)
print
return psubs
def make_typeset(psubs, verbose, debug):
with verb_print('making typeset', verbose, True, True):
ts = list(typeset(psubs))
if debug:
print banner('TYPES(PSUBS) ({0})'.format(len(ts)))
print pformat(ts)
print
return ts
def make_weights(psubs, verbose, debug):
with verb_print('making weights', verbose, True, True):
ws = weights(psubs)
if debug:
print banner('WEIGHTS(PSUBS) ({0})'.format(len(ws)))
print pformat(ws)
print
return ws
def make_psubs_pgold_nmatch(pgold, psubs, verbose, debug):
with verb_print('making psubs/pgold nmatch', verbose, True, True):
<|code_end|>
, generate the next line using the imports in this file:
from pprint import pformat
from tde.data.sets import Pclus, Psubs, nmatch, typeset, weights
from tde.util.printing import banner, verb_print, pretty_pairs
from tde.util.functions import intersection
import numpy as np
and context (functions, classes, or occasionally code) from other files:
# Path: tde/data/sets.py
# def Pclus(clsdict):
# r"""Generate Pclus - all the pairs of FragmentTokens per cluster
#
# .. math::
#
# P_{\mathrm{clus}} = \{((i, j), (k, l)) | \exists c \in C_{\mathrm{disc}} \land (i,j)\in c \land (j,k) \in c\}
#
# where :math:`C_{\mathrm{disc}}` is the set of discovered clusters, each set being a set of fragments.
#
# Parameters
# ----------
# clsdict : ClassDict
#
# Returns
# -------
# Iterator (FragmentToken, FragmentToken) pairs
#
# """
# return clsdict.iter_pairs(within=True, order=True)
#
# def Psubs(clsdict, corpus, minlength=3, maxlength=20):
# """
# Generate Psubs - the substring completion of a set of pairs.
#
# Psubs is the association between all substrings of the pairs in classdict.
#
# Parameters
# ----------
# clsdict : ClassDict
# corpus : Corpus
# minlength : int, optional
# minimum number of phones for the substrings
# maxlength : int, optional
# maximum number of phones for the substrings
#
# Returns
# -------
# Iterator over (FragmentToken, FragmentToken) pairs
#
# """
# sub_pairs = (pairwise_substring_completion(f1, f2, corpus,
# minlength,
# maxlength)
# for f1, f2 in clsdict.iter_pairs(within=True, order=True))
# return unique(flatten(sub_pairs))
#
# def nmatch(pairs):
# """Count the number of pairs of fragments per annotation string.
#
# .. math::
# \mathrm{nmatch}(t, P) = |\{(x, (i, j))\in P | T_{i,j} = t\}|
#
# Parameters
# ----------
# pairs : iterable over (FragmentToken, FragmentToken) pairs
# corpus : Corpus
#
# Returns
# -------
# d : dict from string tuple to int
# Annotation counts.
#
# """
# return Counter(f.mark
# for _, f in pairs)
#
# def typeset(pairs):
# """
# Yield the unique marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# Iterator over strings
# Unique marks.
#
# """
# return unique(f.mark for f in flatten(pairs))
#
# def weights(pairs):
# """
# Calculate the relative frequencies of the marks in a pair iterator.
#
# Parameters
# ----------
# pairs : Iterator over (FragmentToken, FragmentToken) pairs
#
# Returns
# -------
# dict from string to float
# Relative frequencies of the marks.
# """
# total = iterator_length(unique_flatten(pairs))
# fs = freqs(pairs)
# return {t: fs[t] / total for t in fs}
#
# Path: tde/util/printing.py
# def banner(s):
# l = len(s)
# return '-'*l+'\n'+s+'\n'+'-'*l
#
# @contextmanager
# def verb_print(label, verbose=False, when_done=False,
# timeit=False, with_dots=False):
# if timeit:
# t0 = time.time()
# if verbose:
# msg = label + ('...' if with_dots else '') + ('' if when_done else '\n')
# print msg,
# sys.stdout.flush()
# try:
# yield
# finally:
# if verbose and when_done:
# if timeit:
# print 'done. Time: {0:.3f}s'.format(time.time() - t0)
# else:
# print 'done.'
# sys.stdout.flush()
# elif verbose and timeit:
# print '{1}: time: {0:.3f}s'.format(time.time() - t0, label)
# sys.stdout.flush()
#
# def pretty_pairs(pclus_set):
# strings = [('<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f1.name, f1.interval.start,
# f1.interval.end, ' '.join(f1.mark)),
# '<{0} [{1:.3f},{2:.3f}] ({3})>'.format(f2.name, f2.interval.start,
# f2.interval.end, ' '.join(f2.mark)))
# for f1, f2 in pclus_set]
# if len(strings) == 0:
# return ''
# longest = max(len(x[0]) for x in strings)
# return '\n'.join('{1:{0}s} - {2:{0}s}'.format(longest, s1, s2)
# for s1, s2 in strings)
#
# Path: tde/util/functions.py
# def intersection(iterable1, iterable2):
# set1 = set(iterable1)
# seen = set()
# seen_add = seen.add
# for element in iterable2:
# if element in seen:
# continue
# seen_add(element)
# if element in set1:
# yield element
. Output only the next line. | psubs_pgold_intersect = intersection(pgold, psubs) |
Predict the next line for this snippet: <|code_start|> self._wmax = wmax
self._atom_atm=[]
self._atom_ion=[]
self._atom_lbl=[]
self._atom_wvl=[]
self._atom_fvl=[]
self._atom_gam=[]
self._molecule_atm=[]
self._molecule_ion=[]
self._molecule_lbl=[]
self._molecule_wvl=[]
self._molecule_fvl=[]
self._molecule_gam=[]
self.load_lines()
def solar(self):
elem = np.array(['H ', 'He','Li','Be','B ', 'C ', 'N ', 'O ', 'F ', 'Ne','Na','Mg','Al','Si','P ', 'S ', 'Cl','Ar','K ', 'Ca','Sc','Ti','V ', 'Cr','Mn','Fe','Co','Ni','Cu','Zn'])
mass = np.array([1.0, 4.0, 7.0, 8.0, 11.0, 12.0,14.0,16.0,19.0,20.0,23.0,24.0,27.0,28.0,31.0,32.0,35.0,36.0,39.0,40.0,45.0,48.0,51.0,52.0,55.0,56.0,59.0,60.0,63.0,64.0])
solr = np.array([12.0,10.93,3.26,1.30,2.79,8.43,7.83,8.69,4.42,7.93,6.26,7.56,6.44,7.51,5.42,7.14,5.23,6.40,5.06,6.29,3.05,4.91,3.95,5.64,5.48,7.47,4.93,6.21,4.25,4.63])
solar = np.zeros(self._atom_atm.size)
for i in range(elem.size):
w = np.where(self._atom_atm==elem[i])
solar[w] = solr[i]
self.solar = solar
return
def load_lines(self, verbose=1):
# Load the lines file
print("Loading a list of atomic transitions...")
alispath = __file__#"/".join(__file__.split("/")[:-2])#+"/data/atomic.dat"
<|code_end|>
with the help of current file imports:
import pdb
import os
import sys
import numpy as np
import matplotlib
import matplotlib.transforms as mtransforms
from matplotlib.lines import Line2D
from alis import alload
from alis import alis as alismain
and context from other files:
# Path: alis/alload.py
# def cpucheck(ncpu,curcpu=0,verbose=2):
# def usage(name):
# def optarg(pathname, argv=None, verbose=2):
# def set_params(lines, argflag, setstr="", verbose=None):
# def load_settings(fname,verbose=2):
# def initialise():
# def check_argflag(argflag, curcpu=None):
# def load_input(slf, filename=None, textstr=None, updateself=True):
# def load_atomic(slf):
# def load_data(slf, datlines, data=None):
# def load_userdata(data, colspl, wfe, verbose=2):
# def load_datafile(filename, colspl, wfe, verbose=2, datatype="default"):
# def load_ascii(filename, colspl, wfe, wfek, verbose=2):
# def load_fits(filename, colspl, wfe, verbose=2, ext=0, datatype='default'):
# def load_model(slf, modlines, updateself=True):
# def load_links(slf, lnklines):
# def load_onefits(slf,loadname):
# def load_subpixels(slf, parin):
# def load_par_influence(slf, parin):
# def inlinks(ivar):
# def get_binsize(wave, bintype="km/s", maxonly=True, verbose=2):
# def load_parinfo(slf):
# def load_tied(p, ptied=None, infl=None):
# def getis(string, ival, infl, retlhs=True):
, which may contain function names, class names, or code. Output only the next line. | argflag = alload.optarg(alispath, verbose=verbose) |
Based on the snippet: <|code_start|>matplotlib.use('Qt5Agg')
if __name__ == '__main__':
dla = dlas("object1")
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from myobjects import dlas
from alis.prepfit import specplot
and context (classes, functions, sometimes code) from other files:
# Path: alis/prepfit/specplot.py
# class SelectRegions(object):
# class props:
# class atomic:
# def __init__(self, canvas, ax, spec, prop, atom, vel=500.0):
# def draw_lines(self):
# def draw_callback(self, event):
# def get_ind_under_point(self, event):
# def sort_lines(self, method="ion"):
# def get_current_line(self):
# def button_press_callback(self, event):
# def button_release_callback(self, event):
# def key_press_callback(self, event):
# def next_element(self, pm, ion=False):
# def next_spectrum(self):
# def update_regions(self):
# def update_waverange(self):
# def write_data(self):
# def __init__(self, dla):
# def set_regions(arr):
# def __init__(self, wmin=None, wmax=None):
# def solar(self):
# def load_lines(self, verbose=1):
# def in1d_tol(avec, bvec, tol):
. Output only the next line. | prop = specplot.props(dla) |
Predict the next line for this snippet: <|code_start|> shestring += errstr
else:
outstring += outstr
errstring += errstr
if blind: return outstring
if getlines:
if errs is None:
return outstring.split("\n"), [cvstring.split("\n"),cvastring.split("\n"),shstring.split("\n"),shastring.split("\n")]
else:
return outstring.split("\n"), errstring.split("\n"), [cvstring.split("\n"),cvestring.split("\n"),cvastring.split("\n"),shstring.split("\n"),shestring.split("\n"),shastring.split("\n")]
if errs is None:
return outstring, [cvstring,cvastring,shstring,shastring]
else:
return outstring, errstring, [cvstring,cvestring,cvastring,shstring,shestring,shastring]
def save_model(slf,params,errors,info,printout=True,extratxt=["",""],filename=None,getlines=False,save=True):
"""
Save the input model into an output script
that can be run as input.
"""
msgs.info("Saving the best-fitting model parameters", verbose=slf._argflag['out']['verbose'])
if filename is None:
filename = extratxt[0]+slf._argflag['out']['modelname']+extratxt[1]
prestringA = "#\n# Generated by ALIS on {0:s}\n#\n".format(datetime.datetime.now().strftime("%d/%m/%y at %H:%M:%S"))
prestringA += "# Running Time (hrs) = {0:f}\n".format(info[0])
prestringA += "# Initial Chi-Squared = {0:f}\n".format(slf._chisq_init)
prestringA += "# Bestfit Chi-Squared = {0:f}\n".format(info[1])
prestringA += "# Degrees-of-Freedom = {0:d}\n".format(info[2])
prestringA += "# Num. of Iterations = {0:d}\n".format(info[3])
<|code_end|>
with the help of current file imports:
import os
import copy
import numpy as np
import datetime
import astropy.io.fits as pyfits
from alis import almsgs
from alis import alfunc_base
from matplotlib import pyplot as plt
from matplotlib import cm as pltcm
from alis.alutils import getreason
and context from other files:
# Path: alis/almsgs.py
# class Colors:
# class msgs:
# def disable(self):
# def __init__(self):
# def alisheader(self, prognm, verbose=2):
# def signal_handler(self, signalnum, handler):
# def error(self, msg, verbose=None):
# def info(self, msg, verbose=None):
# def warn(self, msg, verbose=None):
# def test(self, msg, verbose=None):
# def bug(self, msg, verbose=None):
# def work(self, msg, verbose=None):
# def input(self):
# def newline(self,verbose=None):
# def indent(self,verbose=None):
#
# Path: alis/alfunc_base.py
# class Base :
# def __init__(self, prgname="", getinst=False, atomic=None, verbose=2):
# def call_CPU(self, x, p, ae='em', mkey=None, ncpus=1):
# def model():
# def call_GPU(self, x, p, ae='em'):
# def adjust_fix(self, mp, cntr, jval, parj):
# def adjust_lim(self, mp, cntr, jval, jind, parj):
# def getminmax(self, par, fitrng, Nsig=5.0):
# def load(self, instr, cntr, mp, specid):
# def check_tied_param(ival, cntr, mps, iind):
# def parin(self, i, par, parb):
# def parout(self, params, mp, istart, level, errs=None, reletter=False, conv=None):
# def set_pinfo(self, pinfo, level, mp, lnk, mnum):
# def set_vars(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None, nexbin=None, ddpid=None, getinfl=False):
# def gtoef(self, num, fmt):
# def tick_info(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None):
# def call(prgname="",getfuncs=False,getinst=False,atomic=None,verbose=2):
#
# Path: alis/alutils.py
# def getreason(idnum,verbose=2):
# if idnum == 1: return "Both actual and predicted relative reductions in the sum of squares are at most ftol"
# elif idnum == 2: return "Relative error between two consecutive iterates is at most xtol"
# elif idnum == 3: return "Both actual and predicted relative reductions in the sum of squares are at most ftol and the relative error between two consecutive iterates is at most xtol"
# elif idnum == 4: return "The cosine of the angle between fvec and any column of the jacobian is at most gtol in absolute value"
# elif idnum == 5: return "The maximum number of iterations has been reached"
# elif idnum == 6: return "ftol is too small. No further reduction in the sum of squares is possible"
# elif idnum == 7: return "xtol is too small. No further improvement in the approximate solution x is possible"
# elif idnum == 8: return "gtol is too small. fvec is orthogonal to the columns of the jacobian to machine precision"
# elif idnum == 9: return "The relative reduction in the sum of squares is less than atol"
# else:
# msgs.bug("Convergence reason is unknown (probably failed) --- please contact the author",verbose=verbose)
# return "Convergence reason is unknown (probably failed) --- please contact the author"
, which may contain function names, class names, or code. Output only the next line. | prestringA += "# Convergence Reason = {0:s}\n".format(getreason(info[4],verbose=slf._argflag['out']['verbose'])) |
Continue the code snippet: <|code_start|> mtyp = mp['mtyp'][i]
if mp['emab'][i] != lastemab:
if mp['emab'][i]=="em": aetag = "emission"
elif mp['emab'][i]=="ab": aetag = "absorption"
elif mp['emab'][i]=="cv": aetag = "Convolution"
elif mp['emab'][i]=="sh": aetag = "Shift"
elif mp['emab'][i]=="zl": aetag = "zerolevel"
convstring += "#"+aetag+"\n"
lastemab = mp['emab'][i]
funcinst[mtyp]._keywd = mp['mkey'][i]
outstr, cnvstr, level = funccall[mtyp].parout(funcinst[mtyp], diff, mp, i, level, conv=thresh)
if outstr in donecv or outstr in donesh or outstr in donezl: continue
if mp['emab'][i] == "cv": donecv.append(outstr) # Make sure we don't print convolution more than once.
if mp['emab'][i] == "sh": donesh.append(outstr) # Make sure we don't print shifts more than once.
if mp['emab'][i] == "zl": donezl.append(outstr) # Make sure we don't print zerolevel more than once.
convstring += cnvstr
return convstring
def save_convtest(slf,diff,thresh,info,printout=True,extratxt=["",""]):
"""
Save the details of what parameters have converged.
"""
msgs.info("Saving the best-fitting model parameters", verbose=slf._argflag['out']['verbose'])
filename = extratxt[0]+slf._argflag['out']['modelname']+'.conv'+extratxt[1]
prestring = "#\n# Generated by ALIS on {0:s}\n#\n".format(datetime.datetime.now().strftime("%d/%m/%y at %H:%M:%S"))
prestring += "# Running Time (hrs) = {0:f}\n".format(info[0])
prestring += "# Initial Chi-Squared = {0:f}\n".format(slf._chisq_init)
prestring += "# Bestfit Chi-Squared = {0:f}\n".format(info[1])
prestring += "# Degrees-of-Freedom = {0:d}\n".format(info[2])
prestring += "# Num. of Iterations = {0:d}\n".format(info[3])
<|code_end|>
. Use current file imports:
import os
import datetime
from alis import almsgs
from alis.alutils import getreason
and context (classes, functions, or code) from other files:
# Path: alis/almsgs.py
# class Colors:
# class msgs:
# def disable(self):
# def __init__(self):
# def alisheader(self, prognm, verbose=2):
# def signal_handler(self, signalnum, handler):
# def error(self, msg, verbose=None):
# def info(self, msg, verbose=None):
# def warn(self, msg, verbose=None):
# def test(self, msg, verbose=None):
# def bug(self, msg, verbose=None):
# def work(self, msg, verbose=None):
# def input(self):
# def newline(self,verbose=None):
# def indent(self,verbose=None):
#
# Path: alis/alutils.py
# def getreason(idnum,verbose=2):
# if idnum == 1: return "Both actual and predicted relative reductions in the sum of squares are at most ftol"
# elif idnum == 2: return "Relative error between two consecutive iterates is at most xtol"
# elif idnum == 3: return "Both actual and predicted relative reductions in the sum of squares are at most ftol and the relative error between two consecutive iterates is at most xtol"
# elif idnum == 4: return "The cosine of the angle between fvec and any column of the jacobian is at most gtol in absolute value"
# elif idnum == 5: return "The maximum number of iterations has been reached"
# elif idnum == 6: return "ftol is too small. No further reduction in the sum of squares is possible"
# elif idnum == 7: return "xtol is too small. No further improvement in the approximate solution x is possible"
# elif idnum == 8: return "gtol is too small. fvec is orthogonal to the columns of the jacobian to machine precision"
# elif idnum == 9: return "The relative reduction in the sum of squares is less than atol"
# else:
# msgs.bug("Convergence reason is unknown (probably failed) --- please contact the author",verbose=verbose)
# return "Convergence reason is unknown (probably failed) --- please contact the author"
. Output only the next line. | prestring += "# Convergence Reason = {0:s}\n".format(getreason(info[4],verbose=slf._argflag['out']['verbose'])) |
Given the code snippet: <|code_start|>msgs = almsgs.msgs()
try:
except ImportError:
msgs.warn("linetools is not installed. Install it if you wish to use LSF")
<|code_end|>
, generate the next line using the imports in this file:
import numpy as np
import astropy.units as u
from alis import almsgs
from alis import alfunc_base
from linetools.spectra.lsf import LSF as ltLSF
and context (functions, classes, or occasionally code) from other files:
# Path: alis/almsgs.py
# class Colors:
# class msgs:
# def disable(self):
# def __init__(self):
# def alisheader(self, prognm, verbose=2):
# def signal_handler(self, signalnum, handler):
# def error(self, msg, verbose=None):
# def info(self, msg, verbose=None):
# def warn(self, msg, verbose=None):
# def test(self, msg, verbose=None):
# def bug(self, msg, verbose=None):
# def work(self, msg, verbose=None):
# def input(self):
# def newline(self,verbose=None):
# def indent(self,verbose=None):
#
# Path: alis/alfunc_base.py
# class Base :
# def __init__(self, prgname="", getinst=False, atomic=None, verbose=2):
# def call_CPU(self, x, p, ae='em', mkey=None, ncpus=1):
# def model():
# def call_GPU(self, x, p, ae='em'):
# def adjust_fix(self, mp, cntr, jval, parj):
# def adjust_lim(self, mp, cntr, jval, jind, parj):
# def getminmax(self, par, fitrng, Nsig=5.0):
# def load(self, instr, cntr, mp, specid):
# def check_tied_param(ival, cntr, mps, iind):
# def parin(self, i, par, parb):
# def parout(self, params, mp, istart, level, errs=None, reletter=False, conv=None):
# def set_pinfo(self, pinfo, level, mp, lnk, mnum):
# def set_vars(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None, nexbin=None, ddpid=None, getinfl=False):
# def gtoef(self, num, fmt):
# def tick_info(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None):
# def call(prgname="",getfuncs=False,getinst=False,atomic=None,verbose=2):
. Output only the next line. | class LSF(alfunc_base.Base) : |
Next line prediction: <|code_start|># All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class Chart(bv.BaseVisualization, csv.CustomScalesVisualization, vv.ViewportVisualization):
def __init__(self,
dataset,
template_name,
<|code_end|>
. Use current file imports:
(import jinja2
import os
import json
from pive.visualization import defaults as default
from pive.visualization import basevisualization as bv
from pive.visualization import viewportvisualization as vv
from pive.visualization import customscalesvisualization as csv)
and context including class names, function names, or small code snippets from other files:
# Path: pive/visualization/defaults.py
#
# Path: pive/visualization/basevisualization.py
# class BaseVisualization:
# def __init__(self):
# def set_div_hook(self, div_hook):
# def get_js_code(self):
# def get_json_dataset(self):
# def set_title(self, title):
# def set_labels(self, labels):
# def set_dataset(self, dataset):
# def set_chart_colors(self, colors):
# def generate_visualization_dataset(self, dataset):
# def write_dataset_file(self, dataset, destination_url, filename):
# def create_css(self, template):
# def create_html(self, template):
# def create_js(self, template, dataset_url):
# def write_file(self, output, destination_url, filename):
# def create_visualization_files(self, destination_url):
# def set_height(self, height):
# def set_width(self, width):
# def set_dimension(self, width, height):
# def load_template_file(self, template_url):
#
# Path: pive/visualization/viewportvisualization.py
# class ViewportVisualization:
# def __init__(self):
# def setIconProperties(self, iconwidth, iconheight, iconcolor, iconhighlight):
# def setJumplength(self, jumplength):
# def setViewport(self, viewport):
#
# Path: pive/visualization/customscalesvisualization.py
# class CustomScalesVisualization:
# def __init__(self):
# def setScales(self, scales):
. Output only the next line. | width=default.width, |
Predict the next line after this snippet: <|code_start|># All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class Chart(bv.BaseVisualization):
def __init__(self,
dataset,
template_name,
<|code_end|>
using the current file's imports:
import jinja2
import os
import json
from pive.visualization import defaults as default
from pive.visualization import basevisualization as bv
and any relevant context from other files:
# Path: pive/visualization/defaults.py
#
# Path: pive/visualization/basevisualization.py
# class BaseVisualization:
# def __init__(self):
# def set_div_hook(self, div_hook):
# def get_js_code(self):
# def get_json_dataset(self):
# def set_title(self, title):
# def set_labels(self, labels):
# def set_dataset(self, dataset):
# def set_chart_colors(self, colors):
# def generate_visualization_dataset(self, dataset):
# def write_dataset_file(self, dataset, destination_url, filename):
# def create_css(self, template):
# def create_html(self, template):
# def create_js(self, template, dataset_url):
# def write_file(self, output, destination_url, filename):
# def create_visualization_files(self, destination_url):
# def set_height(self, height):
# def set_width(self, width):
# def set_dimension(self, width, height):
# def load_template_file(self, template_url):
. Output only the next line. | width=default.width, |
Using the snippet: <|code_start|># All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class Chart(bv.BaseVisualization, vv.ViewportVisualization):
def __init__(self,
dataset,
template_name,
<|code_end|>
, determine the next line of code. You have imports:
import jinja2
import os
import json
from pive.visualization import defaults as default
from pive.visualization import basevisualization as bv
from pive.visualization import viewportvisualization as vv
and context (class names, function names, or code) available:
# Path: pive/visualization/defaults.py
#
# Path: pive/visualization/basevisualization.py
# class BaseVisualization:
# def __init__(self):
# def set_div_hook(self, div_hook):
# def get_js_code(self):
# def get_json_dataset(self):
# def set_title(self, title):
# def set_labels(self, labels):
# def set_dataset(self, dataset):
# def set_chart_colors(self, colors):
# def generate_visualization_dataset(self, dataset):
# def write_dataset_file(self, dataset, destination_url, filename):
# def create_css(self, template):
# def create_html(self, template):
# def create_js(self, template, dataset_url):
# def write_file(self, output, destination_url, filename):
# def create_visualization_files(self, destination_url):
# def set_height(self, height):
# def set_width(self, width):
# def set_dimension(self, width, height):
# def load_template_file(self, template_url):
#
# Path: pive/visualization/viewportvisualization.py
# class ViewportVisualization:
# def __init__(self):
# def setIconProperties(self, iconwidth, iconheight, iconcolor, iconhighlight):
# def setJumplength(self, jumplength):
# def setViewport(self, viewport):
. Output only the next line. | width=default.width, |
Given the code snippet: <|code_start|>line_stroke = 'black'
font_size = 16
label_size = 18
####################
## Formatting ######
####################
timelabel = '%M %S Sek'
isotimeformat = "%H:%M:%S"
interpolation = 'linear'
scales = ["linear", "linear"]
timescales = ["date", "linear"]
xlabel = 'X'
ylabel = 'Y'
##########################
## Chord Chart specific ##
##########################
fontsize = '1.25em'
ticksize = '.75em"'
textpadding = 45
#Defaults go for kilo-steps (K, 1000). Always combine them right.
ticksteps = 1000
prefix = 'K'
####################
## Colors ##########
####################
iconcolor = '#FF2C00'
iconhighlight = '#FF8B73'
<|code_end|>
, generate the next line using the imports in this file:
from pive.visualization import colorthemes
import sys
and context (functions, classes, or occasionally code) from other files:
# Path: pive/visualization/colorthemes.py
. Output only the next line. | chartcolors = colorthemes.pive |
Given snippet: <|code_start|># All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class Chart(bv.BaseVisualization, csv.CustomScalesVisualization, vv.ViewportVisualization):
def __init__(self,
dataset,
template_name,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import jinja2
import os
import json
from pive.visualization import defaults as default
from pive.visualization import basevisualization as bv
from pive.visualization import viewportvisualization as vv
from pive.visualization import customscalesvisualization as csv
and context:
# Path: pive/visualization/defaults.py
#
# Path: pive/visualization/basevisualization.py
# class BaseVisualization:
# def __init__(self):
# def set_div_hook(self, div_hook):
# def get_js_code(self):
# def get_json_dataset(self):
# def set_title(self, title):
# def set_labels(self, labels):
# def set_dataset(self, dataset):
# def set_chart_colors(self, colors):
# def generate_visualization_dataset(self, dataset):
# def write_dataset_file(self, dataset, destination_url, filename):
# def create_css(self, template):
# def create_html(self, template):
# def create_js(self, template, dataset_url):
# def write_file(self, output, destination_url, filename):
# def create_visualization_files(self, destination_url):
# def set_height(self, height):
# def set_width(self, width):
# def set_dimension(self, width, height):
# def load_template_file(self, template_url):
#
# Path: pive/visualization/viewportvisualization.py
# class ViewportVisualization:
# def __init__(self):
# def setIconProperties(self, iconwidth, iconheight, iconcolor, iconhighlight):
# def setJumplength(self, jumplength):
# def setViewport(self, viewport):
#
# Path: pive/visualization/customscalesvisualization.py
# class CustomScalesVisualization:
# def __init__(self):
# def setScales(self, scales):
which might include code, classes, or functions. Output only the next line. | width=default.width, |
Given the following code snippet before the placeholder: <|code_start|># All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class Chart(bv.BaseVisualization, csv.CustomScalesVisualization, vv.ViewportVisualization):
def __init__(self,
dataset,
template_name,
<|code_end|>
, predict the next line using imports from the current file:
import jinja2
import os
import json
from pive.visualization import defaults as default
from pive.visualization import basevisualization as bv
from pive.visualization import viewportvisualization as vv
from pive.visualization import customscalesvisualization as csv
and context including class names, function names, and sometimes code from other files:
# Path: pive/visualization/defaults.py
#
# Path: pive/visualization/basevisualization.py
# class BaseVisualization:
# def __init__(self):
# def set_div_hook(self, div_hook):
# def get_js_code(self):
# def get_json_dataset(self):
# def set_title(self, title):
# def set_labels(self, labels):
# def set_dataset(self, dataset):
# def set_chart_colors(self, colors):
# def generate_visualization_dataset(self, dataset):
# def write_dataset_file(self, dataset, destination_url, filename):
# def create_css(self, template):
# def create_html(self, template):
# def create_js(self, template, dataset_url):
# def write_file(self, output, destination_url, filename):
# def create_visualization_files(self, destination_url):
# def set_height(self, height):
# def set_width(self, width):
# def set_dimension(self, width, height):
# def load_template_file(self, template_url):
#
# Path: pive/visualization/viewportvisualization.py
# class ViewportVisualization:
# def __init__(self):
# def setIconProperties(self, iconwidth, iconheight, iconcolor, iconhighlight):
# def setJumplength(self, jumplength):
# def setViewport(self, viewport):
#
# Path: pive/visualization/customscalesvisualization.py
# class CustomScalesVisualization:
# def __init__(self):
# def setScales(self, scales):
. Output only the next line. | width=default.width, |
Using the snippet: <|code_start|>""" The pive environment manages the visualizations and
relies on a given input manager to read data before
processing the visualizations. """
# Accessor to choose the charts. Corresponding with
# the config files 'title' attribute in
# pive/visualization/config
CHART_LINE = 'linechart'
CHART_SCATTER = 'scatterchart'
CHART_BUBBLE = 'bubblechart'
CHART_BAR = 'barchart'
CHART_PIE = 'piechart'
CHART_CHORD = 'chordchart'
# Bundles all essential access methods to render visualizations.
class Environment(object):
"""Contains all suitable visualizations. Only those
visualizations are imported and it is not
allowed to render unsuited visualizations."""
__suitables = []
__data = []
# The actual visualization modules.
__modules = []
__has_datefields = False
__datakeys = []
<|code_end|>
, determine the next line of code. You have imports:
import importlib
from .visualization import defaults as default
and context (class names, function names, or code) available:
# Path: pive/visualization/defaults.py
. Output only the next line. | def __init__(self, inputmanager=None, outputpath=default.output_path): |
Next line prediction: <|code_start|># All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class Chart(bv.BaseVisualization):
def __init__(self,
dataset,
template_name,
<|code_end|>
. Use current file imports:
(import jinja2
import os
import json
from pive.visualization import defaults as default
from pive.visualization import basevisualization as bv)
and context including class names, function names, or small code snippets from other files:
# Path: pive/visualization/defaults.py
#
# Path: pive/visualization/basevisualization.py
# class BaseVisualization:
# def __init__(self):
# def set_div_hook(self, div_hook):
# def get_js_code(self):
# def get_json_dataset(self):
# def set_title(self, title):
# def set_labels(self, labels):
# def set_dataset(self, dataset):
# def set_chart_colors(self, colors):
# def generate_visualization_dataset(self, dataset):
# def write_dataset_file(self, dataset, destination_url, filename):
# def create_css(self, template):
# def create_html(self, template):
# def create_js(self, template, dataset_url):
# def write_file(self, output, destination_url, filename):
# def create_visualization_files(self, destination_url):
# def set_height(self, height):
# def set_width(self, width):
# def set_dimension(self, width, height):
# def load_template_file(self, template_url):
. Output only the next line. | width=default.width, |
Predict the next line for this snippet: <|code_start|># A builder for a translation response.
#
# Author: Max Kellermann <mk@cm4all.com>
#
try:
except ImportError:
class Response:
"""Generator for a translation response. The BEGIN and END
packets are generated automatically. When you are done with the
response, call finish(). This method returns the full response
(all serialized packets) as a string."""
def __init__(self, protocol_version=0):
assert isinstance(protocol_version, int)
assert protocol_version >= 0
assert protocol_version <= 0xff
self._data = b''
payload = b''
if protocol_version > 0:
payload = struct.pack('B', protocol_version)
self.packet(TRANSLATE_BEGIN, payload)
def finish(self):
"""Finish the response, and return it as a string."""
<|code_end|>
with the help of current file imports:
import six
import array, struct
from urllib.parse import urlparse
from urlparse import urlparse
from .protocol import *
from .serialize import packet_header
from socket import gethostbyname
from socket import gethostbyname
and context from other files:
# Path: python/beng_proxy/translation/serialize.py
# def packet_header(command, length=0):
# """Generate the header of a translation packet."""
#
# assert length <= 0xffff
# return struct.pack('HH', length, command)
, which may contain function names, class names, or code. Output only the next line. | self._data += packet_header(TRANSLATE_END) |
Based on the snippet: <|code_start|> """ meta class """
db_table = 'bookmark_tag'
unique_together = ('bookmark', 'tag')
class FeedSubscription(BaseObject):
""" FeedSubsscription model """
url = models.URLField()
name = models.CharField(max_length=255, editable=False)
owner = models.ForeignKey(User)
default_category = models.ForeignKey(Category)
class Meta(object):
""" meta class """
db_table = 'feed_subscription'
unique_together = ('url', 'owner')
def __unicode__(self):
return self.url
def update_title(sender, instance, **kwargs):
""" Update title of FeedSubscription for pre_save.
Arguments:
sender: model (FeedSubscription)
instance: FeedSubscription instance
**kwargs: not use
"""
if validators.validate_url(instance.url):
<|code_end|>
, predict the immediate next line with the help of imports:
from django.db import models
from django.db.models.signals import pre_save
from django.contrib.auth.models import User
from shortuuidfield import ShortUUIDField
from pyquery import PyQuery
from shiori.bookmark.agents.feed_parser import FeedParser
from shiori.bookmark import validators
import jsonfield
and context (classes, functions, sometimes code) from other files:
# Path: shiori/bookmark/agents/feed_parser.py
# class FeedParser(object):
# """ FeedParser class """
#
# def __init__(self, url):
# try:
# response = requests.get(url, stream=True)
# except requests.ConnectionError as error:
# print(error)
# sys.exit(1)
# try:
# etree = lxml.fromstring(response.content)
# except XMLSyntaxError as error:
# print(error)
# sys.exit(1)
# self.nsmap = etree.nsmap
# self.namespace = get_ns(etree.nsmap, None)
#
# self.updated = get_updated(etree, self.nsmap)
# self.title = get_title(etree, self.namespace)
# self.items = get_items(etree, self.namespace)
# self.etree = etree
#
# def retrieve_items(self):
# """ retrieve feed items """
# return [dict(title=get_title(item, self.namespace),
# link=get_link(item, self.namespace),
# updated=get_updated(item, self.nsmap))
# for item in self.items]
#
# Path: shiori/bookmark/validators.py
# def validate_url(value):
# def getaddr(hostname):
. Output only the next line. | instance.name = FeedParser(instance.url).title |
Given snippet: <|code_start|> registered_datetime = models.DateTimeField(auto_now=True,
auto_now_add=True)
description = models.TextField(blank=True)
owner = models.ForeignKey(User)
is_hide = models.BooleanField(default=False)
class Meta(object):
""" meta class """
db_table = 'bookmark'
unique_together = ('url', 'owner')
def __unicode__(self):
return self.title
def get_absolute_url(self):
""" bookmark permalink path """
return "/shiori/b/%s" % self.id
def retrieve_title(sender, instance, **kwargs):
""" Retreive title of specified url.
Arguments:
sender: :model (Bookmark)
instance: Bookmark instance
**kwargs: not use
"""
if instance.title:
return instance.title
else:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.db import models
from django.db.models.signals import pre_save
from django.contrib.auth.models import User
from shortuuidfield import ShortUUIDField
from pyquery import PyQuery
from shiori.bookmark.agents.feed_parser import FeedParser
from shiori.bookmark import validators
import jsonfield
and context:
# Path: shiori/bookmark/agents/feed_parser.py
# class FeedParser(object):
# """ FeedParser class """
#
# def __init__(self, url):
# try:
# response = requests.get(url, stream=True)
# except requests.ConnectionError as error:
# print(error)
# sys.exit(1)
# try:
# etree = lxml.fromstring(response.content)
# except XMLSyntaxError as error:
# print(error)
# sys.exit(1)
# self.nsmap = etree.nsmap
# self.namespace = get_ns(etree.nsmap, None)
#
# self.updated = get_updated(etree, self.nsmap)
# self.title = get_title(etree, self.namespace)
# self.items = get_items(etree, self.namespace)
# self.etree = etree
#
# def retrieve_items(self):
# """ retrieve feed items """
# return [dict(title=get_title(item, self.namespace),
# link=get_link(item, self.namespace),
# updated=get_updated(item, self.nsmap))
# for item in self.items]
#
# Path: shiori/bookmark/validators.py
# def validate_url(value):
# def getaddr(hostname):
which might include code, classes, or functions. Output only the next line. | if validators.validate_url(instance.url): |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
class FeedParserTests(unittest.TestCase):
def test_retrieve_not_connect_server(self):
with self.assertRaises(SystemExit) as e:
<|code_end|>
. Use current file imports:
(import unittest
from httpretty import HTTPretty, httprettified
from defusedxml.lxml import RestrictedElement
from shiori.bookmark.agents import feed_parser)
and context including class names, function names, or small code snippets from other files:
# Path: shiori/bookmark/agents/feed_parser.py
# def get_updated(etree, nsmap):
# def get_title(etree, namespace):
# def get_link(etree, namespace):
# def get_items(etree, namespace):
# def get_ns(nsmap, key=None):
# def __init__(self, url):
# def retrieve_items(self):
# class FeedParser(object):
. Output only the next line. | parser = feed_parser.FeedParser('http://example.org/rss') |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
class ValidatorsTests(unittest.TestCase):
@patch('socket.gethostbyname', return_value='93.184.216.119')
@patch('socket.getaddrinfo',
return_value=[(10, 1, 6, '',
('2606:2800:220:6d:26bf:1447:1097:aa7', 0, 0, 0)),
(10, 2, 17, '',
('2606:2800:220:6d:26bf:1447:1097:aa7', 0, 0, 0)),
(10, 3, 0, '',
('2606:2800:220:6d:26bf:1447:1097:aa7', 0, 0, 0))])
def test_validate_url(self, _mock0, _mock1):
<|code_end|>
with the help of current file imports:
import unittest
from mock import patch
from django.core.exceptions import ValidationError
from shiori.bookmark import validators
and context from other files:
# Path: shiori/bookmark/validators.py
# def validate_url(value):
# def getaddr(hostname):
, which may contain function names, class names, or code. Output only the next line. | self.assertTrue(validators.validate_url('http://example.org')) |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
""" module of feed generator """
class LatestEntries(Feed):
""" Feed generator class """
title = 'Shiori new bookmarks'
link = '/shiori/'
description = 'Updates on changes and additions to Shiori.'
description_template = 'feeds/latest_title.html'
def items(self):
""" Retrieve latest 5 bookmarks """
<|code_end|>
. Use current file imports:
from django.contrib.syndication.views import Feed
from shiori.bookmark.models import Bookmark
and context (classes, functions, or code) from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
. Output only the next line. | return Bookmark.objects.order_by('-registered_datetime')[:5] |
Next line prediction: <|code_start|>class BookmarkAdmin(admin.ModelAdmin):
""" Customizing list display for Bookmark model """
list_display = ('title', 'category', 'registered_datetime',
'owner', 'is_hide')
class BookmarkTagAdmin(admin.ModelAdmin):
""" Customizing list display for BookmarkTag model """
list_display = ('bookmark', 'tag')
class FeedSubscriptionAdmin(admin.ModelAdmin):
""" Customizing list display for FeedSubscription model """
list_display = ('name', 'owner', 'default_category')
class CrawlingHistoryAdmin(admin.ModelAdmin):
""" Customizing list display for CrawlingHistory model """
list_display = ('get_name', 'update_datetime', 'result')
def get_name(self, obj):
""" get feed name.
Argument:
obj: CrawlingHistory object
Return:
obj.feed.name
"""
return obj.feed.name
<|code_end|>
. Use current file imports:
(from django.contrib import admin
from shiori.bookmark.models import (Bookmark,
Category,
Tag,
BookmarkTag,
FeedSubscription,
CrawlingHistory))
and context including class names, function names, or small code snippets from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
. Output only the next line. | admin.site.register(Bookmark, BookmarkAdmin) |
Using the snippet: <|code_start|> """ Customizing list display for Bookmark model """
list_display = ('title', 'category', 'registered_datetime',
'owner', 'is_hide')
class BookmarkTagAdmin(admin.ModelAdmin):
""" Customizing list display for BookmarkTag model """
list_display = ('bookmark', 'tag')
class FeedSubscriptionAdmin(admin.ModelAdmin):
""" Customizing list display for FeedSubscription model """
list_display = ('name', 'owner', 'default_category')
class CrawlingHistoryAdmin(admin.ModelAdmin):
""" Customizing list display for CrawlingHistory model """
list_display = ('get_name', 'update_datetime', 'result')
def get_name(self, obj):
""" get feed name.
Argument:
obj: CrawlingHistory object
Return:
obj.feed.name
"""
return obj.feed.name
admin.site.register(Bookmark, BookmarkAdmin)
<|code_end|>
, determine the next line of code. You have imports:
from django.contrib import admin
from shiori.bookmark.models import (Bookmark,
Category,
Tag,
BookmarkTag,
FeedSubscription,
CrawlingHistory)
and context (class names, function names, or code) available:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
. Output only the next line. | admin.site.register(Category) |
Continue the code snippet: <|code_start|> list_display = ('title', 'category', 'registered_datetime',
'owner', 'is_hide')
class BookmarkTagAdmin(admin.ModelAdmin):
""" Customizing list display for BookmarkTag model """
list_display = ('bookmark', 'tag')
class FeedSubscriptionAdmin(admin.ModelAdmin):
""" Customizing list display for FeedSubscription model """
list_display = ('name', 'owner', 'default_category')
class CrawlingHistoryAdmin(admin.ModelAdmin):
""" Customizing list display for CrawlingHistory model """
list_display = ('get_name', 'update_datetime', 'result')
def get_name(self, obj):
""" get feed name.
Argument:
obj: CrawlingHistory object
Return:
obj.feed.name
"""
return obj.feed.name
admin.site.register(Bookmark, BookmarkAdmin)
admin.site.register(Category)
<|code_end|>
. Use current file imports:
from django.contrib import admin
from shiori.bookmark.models import (Bookmark,
Category,
Tag,
BookmarkTag,
FeedSubscription,
CrawlingHistory)
and context (classes, functions, or code) from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
. Output only the next line. | admin.site.register(Tag) |
Based on the snippet: <|code_start|> 'owner', 'is_hide')
class BookmarkTagAdmin(admin.ModelAdmin):
""" Customizing list display for BookmarkTag model """
list_display = ('bookmark', 'tag')
class FeedSubscriptionAdmin(admin.ModelAdmin):
""" Customizing list display for FeedSubscription model """
list_display = ('name', 'owner', 'default_category')
class CrawlingHistoryAdmin(admin.ModelAdmin):
""" Customizing list display for CrawlingHistory model """
list_display = ('get_name', 'update_datetime', 'result')
def get_name(self, obj):
""" get feed name.
Argument:
obj: CrawlingHistory object
Return:
obj.feed.name
"""
return obj.feed.name
admin.site.register(Bookmark, BookmarkAdmin)
admin.site.register(Category)
admin.site.register(Tag)
<|code_end|>
, predict the immediate next line with the help of imports:
from django.contrib import admin
from shiori.bookmark.models import (Bookmark,
Category,
Tag,
BookmarkTag,
FeedSubscription,
CrawlingHistory)
and context (classes, functions, sometimes code) from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
. Output only the next line. | admin.site.register(BookmarkTag, BookmarkTagAdmin) |
Next line prediction: <|code_start|>
class BookmarkTagAdmin(admin.ModelAdmin):
""" Customizing list display for BookmarkTag model """
list_display = ('bookmark', 'tag')
class FeedSubscriptionAdmin(admin.ModelAdmin):
""" Customizing list display for FeedSubscription model """
list_display = ('name', 'owner', 'default_category')
class CrawlingHistoryAdmin(admin.ModelAdmin):
""" Customizing list display for CrawlingHistory model """
list_display = ('get_name', 'update_datetime', 'result')
def get_name(self, obj):
""" get feed name.
Argument:
obj: CrawlingHistory object
Return:
obj.feed.name
"""
return obj.feed.name
admin.site.register(Bookmark, BookmarkAdmin)
admin.site.register(Category)
admin.site.register(Tag)
admin.site.register(BookmarkTag, BookmarkTagAdmin)
<|code_end|>
. Use current file imports:
(from django.contrib import admin
from shiori.bookmark.models import (Bookmark,
Category,
Tag,
BookmarkTag,
FeedSubscription,
CrawlingHistory))
and context including class names, function names, or small code snippets from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
. Output only the next line. | admin.site.register(FeedSubscription, FeedSubscriptionAdmin) |
Using the snippet: <|code_start|>
class BookmarkTagAdmin(admin.ModelAdmin):
""" Customizing list display for BookmarkTag model """
list_display = ('bookmark', 'tag')
class FeedSubscriptionAdmin(admin.ModelAdmin):
""" Customizing list display for FeedSubscription model """
list_display = ('name', 'owner', 'default_category')
class CrawlingHistoryAdmin(admin.ModelAdmin):
""" Customizing list display for CrawlingHistory model """
list_display = ('get_name', 'update_datetime', 'result')
def get_name(self, obj):
""" get feed name.
Argument:
obj: CrawlingHistory object
Return:
obj.feed.name
"""
return obj.feed.name
admin.site.register(Bookmark, BookmarkAdmin)
admin.site.register(Category)
admin.site.register(Tag)
admin.site.register(BookmarkTag, BookmarkTagAdmin)
admin.site.register(FeedSubscription, FeedSubscriptionAdmin)
<|code_end|>
, determine the next line of code. You have imports:
from django.contrib import admin
from shiori.bookmark.models import (Bookmark,
Category,
Tag,
BookmarkTag,
FeedSubscription,
CrawlingHistory)
and context (class names, function names, or code) available:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
. Output only the next line. | admin.site.register(CrawlingHistory, CrawlingHistoryAdmin) |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
""" module for validataion of agent """
if sys.version_info > (3, 0):
else:
def validate_url(value):
"""
Argument:
value: url (eg. http://example.org/rss)
Return:
True or raise exceptions
"""
hostname = urlparse(value).netloc
if ':' in hostname:
hostname = hostname.split(':')[0]
<|code_end|>
. Write the next line using the current file imports:
from django.core.exceptions import ValidationError
from urllib.parse import urlparse
from urlparse import urlparse
from netaddr import IPAddress, AddrFormatError
from shiori.core.settings import FEED_EXCLUDE_FQDN
import sys
import socket
and context from other files:
# Path: shiori/core/settings.py
# FEED_EXCLUDE_FQDN = (
# # 'shiori.example.org',
# )
, which may include functions, classes, or code. Output only the next line. | if hostname in FEED_EXCLUDE_FQDN: |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
""" Celery task module """
class ProcessRunner(PeriodicTask):
""" Process Runner class """
run_every = timedelta(minutes=CELERY_TIMEDELTA_MINUTES)
def run(self, **kwargs):
<|code_end|>
, generate the next line using the imports in this file:
from celery.task import PeriodicTask
from datetime import timedelta
from shiori.bookmark.agents import feed
from shiori.core.settings import CELERY_TIMEDELTA_MINUTES
and context (functions, classes, or occasionally code) from other files:
# Path: shiori/bookmark/agents/feed.py
# def register_bookmarks():
# def fetch_feeds(**kwargs):
# def add_item(**kwargs):
#
# Path: shiori/core/settings.py
# CELERY_TIMEDELTA_MINUTES = 60
. Output only the next line. | feed.register_bookmarks() |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
class BookmarkTransactionTest(TransactionTestCase):
fixtures = ['shiori_tests/test_data/dummy_users.json',
'shiori_tests/test_data/dummy_data.json']
def setUp(self):
self.user0 = User.objects.get(pk=1)
self.user1 = User.objects.get(pk=2)
<|code_end|>
. Use current file imports:
from django.core.urlresolvers import resolve
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User
from django.views.generic import RedirectView
from django.db import IntegrityError
from shiori.bookmark.models import Category, Tag, Bookmark
from shiori.bookmark.feed_generator import LatestEntries
import shiori.bookmark.views
import shiori_tests.tests.vars as v
and context (classes, functions, or code) from other files:
# Path: shiori/bookmark/models.py
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# Path: shiori/bookmark/feed_generator.py
# class LatestEntries(Feed):
# """ Feed generator class """
# title = 'Shiori new bookmarks'
# link = '/shiori/'
# description = 'Updates on changes and additions to Shiori.'
# description_template = 'feeds/latest_title.html'
#
# def items(self):
# """ Retrieve latest 5 bookmarks """
# return Bookmark.objects.order_by('-registered_datetime')[:5]
. Output only the next line. | categories = Category.objects.all() |
Predict the next line after this snippet: <|code_start|># -*- coding: utf-8 -*-
class BookmarkTransactionTest(TransactionTestCase):
fixtures = ['shiori_tests/test_data/dummy_users.json',
'shiori_tests/test_data/dummy_data.json']
def setUp(self):
self.user0 = User.objects.get(pk=1)
self.user1 = User.objects.get(pk=2)
categories = Category.objects.all()
self.category0 = categories[0]
self.category1 = categories[1]
<|code_end|>
using the current file's imports:
from django.core.urlresolvers import resolve
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User
from django.views.generic import RedirectView
from django.db import IntegrityError
from shiori.bookmark.models import Category, Tag, Bookmark
from shiori.bookmark.feed_generator import LatestEntries
import shiori.bookmark.views
import shiori_tests.tests.vars as v
and any relevant context from other files:
# Path: shiori/bookmark/models.py
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# Path: shiori/bookmark/feed_generator.py
# class LatestEntries(Feed):
# """ Feed generator class """
# title = 'Shiori new bookmarks'
# link = '/shiori/'
# description = 'Updates on changes and additions to Shiori.'
# description_template = 'feeds/latest_title.html'
#
# def items(self):
# """ Retrieve latest 5 bookmarks """
# return Bookmark.objects.order_by('-registered_datetime')[:5]
. Output only the next line. | tags = Tag.objects.all() |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
class BookmarkTransactionTest(TransactionTestCase):
fixtures = ['shiori_tests/test_data/dummy_users.json',
'shiori_tests/test_data/dummy_data.json']
def setUp(self):
self.user0 = User.objects.get(pk=1)
self.user1 = User.objects.get(pk=2)
categories = Category.objects.all()
self.category0 = categories[0]
self.category1 = categories[1]
tags = Tag.objects.all()
self.tag0 = tags[0]
self.tag1 = tags[1]
<|code_end|>
with the help of current file imports:
from django.core.urlresolvers import resolve
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import User
from django.views.generic import RedirectView
from django.db import IntegrityError
from shiori.bookmark.models import Category, Tag, Bookmark
from shiori.bookmark.feed_generator import LatestEntries
import shiori.bookmark.views
import shiori_tests.tests.vars as v
and context from other files:
# Path: shiori/bookmark/models.py
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# Path: shiori/bookmark/feed_generator.py
# class LatestEntries(Feed):
# """ Feed generator class """
# title = 'Shiori new bookmarks'
# link = '/shiori/'
# description = 'Updates on changes and additions to Shiori.'
# description_template = 'feeds/latest_title.html'
#
# def items(self):
# """ Retrieve latest 5 bookmarks """
# return Bookmark.objects.order_by('-registered_datetime')[:5]
, which may contain function names, class names, or code. Output only the next line. | self.bookmark = Bookmark.objects.create(url=v.url0, |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
""" serializer of shiori.api """
class CategorySerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.Category """
class Meta(object):
""" Meta class of CategorySerializer """
model = Category
fields = ('id', 'category')
class TagSerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.Tag """
class Meta(object):
""" Meta class of TagSerializer """
<|code_end|>
. Use current file imports:
(from rest_framework import serializers
from shiori.bookmark.models import (Category,
Tag,
Bookmark,
BookmarkTag,
FeedSubscription))
and context including class names, function names, or small code snippets from other files:
# Path: shiori/bookmark/models.py
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
. Output only the next line. | model = Tag |
Using the snippet: <|code_start|>
class CategorySerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.Category """
class Meta(object):
""" Meta class of CategorySerializer """
model = Category
fields = ('id', 'category')
class TagSerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.Tag """
class Meta(object):
""" Meta class of TagSerializer """
model = Tag
fields = ('id', 'tag')
class BookmarkSerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.Bookmark """
title = serializers.Field(source='title')
category = serializers.SlugRelatedField(many=False, slug_field='category')
category_id = serializers.Field(source='category.id')
tags = serializers.SlugRelatedField(many=True, slug_field='tag',
read_only=True)
owner = serializers.Field(source='owner.username')
class Meta(object):
""" Meta class of BookmarkSerializer """
<|code_end|>
, determine the next line of code. You have imports:
from rest_framework import serializers
from shiori.bookmark.models import (Category,
Tag,
Bookmark,
BookmarkTag,
FeedSubscription)
and context (class names, function names, or code) available:
# Path: shiori/bookmark/models.py
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
. Output only the next line. | model = Bookmark |
Based on the snippet: <|code_start|> """ Meta class of TagSerializer """
model = Tag
fields = ('id', 'tag')
class BookmarkSerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.Bookmark """
title = serializers.Field(source='title')
category = serializers.SlugRelatedField(many=False, slug_field='category')
category_id = serializers.Field(source='category.id')
tags = serializers.SlugRelatedField(many=True, slug_field='tag',
read_only=True)
owner = serializers.Field(source='owner.username')
class Meta(object):
""" Meta class of BookmarkSerializer """
model = Bookmark
fields = ('id', 'url', 'title', 'category', 'category_id',
'tags', 'registered_datetime', 'description',
'owner', 'is_hide')
class BookmarkTagSerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.BookmarkTag """
bookmark = serializers.SlugRelatedField(many=False, slug_field='url')
tag = serializers.SlugRelatedField(many=False, slug_field='tag')
class Meta(object):
""" Meta class of BookmarkTagSerializer """
<|code_end|>
, predict the immediate next line with the help of imports:
from rest_framework import serializers
from shiori.bookmark.models import (Category,
Tag,
Bookmark,
BookmarkTag,
FeedSubscription)
and context (classes, functions, sometimes code) from other files:
# Path: shiori/bookmark/models.py
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
. Output only the next line. | model = BookmarkTag |
Next line prediction: <|code_start|> class Meta(object):
""" Meta class of BookmarkSerializer """
model = Bookmark
fields = ('id', 'url', 'title', 'category', 'category_id',
'tags', 'registered_datetime', 'description',
'owner', 'is_hide')
class BookmarkTagSerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.BookmarkTag """
bookmark = serializers.SlugRelatedField(many=False, slug_field='url')
tag = serializers.SlugRelatedField(many=False, slug_field='tag')
class Meta(object):
""" Meta class of BookmarkTagSerializer """
model = BookmarkTag
fields = ('id', 'bookmark', 'tag')
class FeedSubscriptionSerializer(serializers.ModelSerializer):
""" Serializer for shiori.bookmark.models.FeedSubscription """
owner = serializers.Field(source='owner.username')
default_category = serializers.SlugRelatedField(many=False,
slug_field='category')
category_id = serializers.Field(source='category.id')
class Meta(object):
""" Meta class of FeedSubscriptionSerializer """
<|code_end|>
. Use current file imports:
(from rest_framework import serializers
from shiori.bookmark.models import (Category,
Tag,
Bookmark,
BookmarkTag,
FeedSubscription))
and context including class names, function names, or small code snippets from other files:
# Path: shiori/bookmark/models.py
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class Tag(BaseObject):
# """ Tag model """
# tag = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'tag'
#
# def __unicode__(self):
# return self.tag
#
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class BookmarkTag(models.Model):
# """ BookmarkTag model """
# id = models.AutoField(primary_key=True)
# bookmark = models.ForeignKey(Bookmark,
# db_column='bookmark_id',
# to_field='id')
# tag = models.ForeignKey(Tag,
# db_column='tag_id',
# to_field='id')
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark_tag'
# unique_together = ('bookmark', 'tag')
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
. Output only the next line. | model = FeedSubscription |
Given snippet: <|code_start|> owner: feed subscriber user
Return:
List of Feeds data
"""
result = []
for entry in FeedParser(kwargs.get('url')).retrieve_items():
return_code, msg = add_item(url=entry.get('link'),
title=entry.get('title'),
category=kwargs.get('category'),
owner=kwargs.get('owner'))
if return_code is False and "already registered:" in msg:
break
result.append(dict(link=entry.get('link'),
rc=return_code,
msg=msg))
return result
def add_item(**kwargs):
""" Adding bookmark.
Arguments:
url: feed entry url
title: feed entry title
category: feed etnry category
owner: feed subscriber user
Return:
(Bool, error message)
"""
category = Category.objects.get(category=kwargs.get('category'))
owner = User.objects.get(username=kwargs.get('owner'))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.contrib.auth.models import User
from django.db import IntegrityError
from shiori.bookmark.models import (Bookmark,
Category,
FeedSubscription,
CrawlingHistory)
from shiori.bookmark.agents.feed_parser import FeedParser
import json
and context:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
#
# Path: shiori/bookmark/agents/feed_parser.py
# class FeedParser(object):
# """ FeedParser class """
#
# def __init__(self, url):
# try:
# response = requests.get(url, stream=True)
# except requests.ConnectionError as error:
# print(error)
# sys.exit(1)
# try:
# etree = lxml.fromstring(response.content)
# except XMLSyntaxError as error:
# print(error)
# sys.exit(1)
# self.nsmap = etree.nsmap
# self.namespace = get_ns(etree.nsmap, None)
#
# self.updated = get_updated(etree, self.nsmap)
# self.title = get_title(etree, self.namespace)
# self.items = get_items(etree, self.namespace)
# self.etree = etree
#
# def retrieve_items(self):
# """ retrieve feed items """
# return [dict(title=get_title(item, self.namespace),
# link=get_link(item, self.namespace),
# updated=get_updated(item, self.nsmap))
# for item in self.items]
which might include code, classes, or functions. Output only the next line. | bookmark = Bookmark(url=kwargs.get('url'), |
Predict the next line after this snippet: <|code_start|> url: feed entry url
category: feed entry category
owner: feed subscriber user
Return:
List of Feeds data
"""
result = []
for entry in FeedParser(kwargs.get('url')).retrieve_items():
return_code, msg = add_item(url=entry.get('link'),
title=entry.get('title'),
category=kwargs.get('category'),
owner=kwargs.get('owner'))
if return_code is False and "already registered:" in msg:
break
result.append(dict(link=entry.get('link'),
rc=return_code,
msg=msg))
return result
def add_item(**kwargs):
""" Adding bookmark.
Arguments:
url: feed entry url
title: feed entry title
category: feed etnry category
owner: feed subscriber user
Return:
(Bool, error message)
"""
<|code_end|>
using the current file's imports:
from django.contrib.auth.models import User
from django.db import IntegrityError
from shiori.bookmark.models import (Bookmark,
Category,
FeedSubscription,
CrawlingHistory)
from shiori.bookmark.agents.feed_parser import FeedParser
import json
and any relevant context from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
#
# Path: shiori/bookmark/agents/feed_parser.py
# class FeedParser(object):
# """ FeedParser class """
#
# def __init__(self, url):
# try:
# response = requests.get(url, stream=True)
# except requests.ConnectionError as error:
# print(error)
# sys.exit(1)
# try:
# etree = lxml.fromstring(response.content)
# except XMLSyntaxError as error:
# print(error)
# sys.exit(1)
# self.nsmap = etree.nsmap
# self.namespace = get_ns(etree.nsmap, None)
#
# self.updated = get_updated(etree, self.nsmap)
# self.title = get_title(etree, self.namespace)
# self.items = get_items(etree, self.namespace)
# self.etree = etree
#
# def retrieve_items(self):
# """ retrieve feed items """
# return [dict(title=get_title(item, self.namespace),
# link=get_link(item, self.namespace),
# updated=get_updated(item, self.nsmap))
# for item in self.items]
. Output only the next line. | category = Category.objects.get(category=kwargs.get('category')) |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
""" Registre Bookmark for django celery task """
def register_bookmarks():
""" register bookmarks """
for feed in FeedSubscription.objects.all():
result = fetch_feeds(url=feed.url,
category=feed.default_category,
owner=feed.owner)
<|code_end|>
with the help of current file imports:
from django.contrib.auth.models import User
from django.db import IntegrityError
from shiori.bookmark.models import (Bookmark,
Category,
FeedSubscription,
CrawlingHistory)
from shiori.bookmark.agents.feed_parser import FeedParser
import json
and context from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
#
# Path: shiori/bookmark/agents/feed_parser.py
# class FeedParser(object):
# """ FeedParser class """
#
# def __init__(self, url):
# try:
# response = requests.get(url, stream=True)
# except requests.ConnectionError as error:
# print(error)
# sys.exit(1)
# try:
# etree = lxml.fromstring(response.content)
# except XMLSyntaxError as error:
# print(error)
# sys.exit(1)
# self.nsmap = etree.nsmap
# self.namespace = get_ns(etree.nsmap, None)
#
# self.updated = get_updated(etree, self.nsmap)
# self.title = get_title(etree, self.namespace)
# self.items = get_items(etree, self.namespace)
# self.etree = etree
#
# def retrieve_items(self):
# """ retrieve feed items """
# return [dict(title=get_title(item, self.namespace),
# link=get_link(item, self.namespace),
# updated=get_updated(item, self.nsmap))
# for item in self.items]
, which may contain function names, class names, or code. Output only the next line. | CrawlingHistory(feed=feed, result=json.dumps(result)).save() |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
""" Registre Bookmark for django celery task """
def register_bookmarks():
""" register bookmarks """
for feed in FeedSubscription.objects.all():
result = fetch_feeds(url=feed.url,
category=feed.default_category,
owner=feed.owner)
CrawlingHistory(feed=feed, result=json.dumps(result)).save()
def fetch_feeds(**kwargs):
""" fetching feeds.
Arguments:
url: feed entry url
category: feed entry category
owner: feed subscriber user
Return:
List of Feeds data
"""
result = []
<|code_end|>
, predict the next line using imports from the current file:
from django.contrib.auth.models import User
from django.db import IntegrityError
from shiori.bookmark.models import (Bookmark,
Category,
FeedSubscription,
CrawlingHistory)
from shiori.bookmark.agents.feed_parser import FeedParser
import json
and context including class names, function names, and sometimes code from other files:
# Path: shiori/bookmark/models.py
# class Bookmark(BaseObject):
# """ Bookmark model """
# url = models.URLField()
# title = models.CharField(max_length=255)
# category = models.ForeignKey(Category)
# tags = models.ManyToManyField(Tag, through='BookmarkTag')
# registered_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
# description = models.TextField(blank=True)
# owner = models.ForeignKey(User)
# is_hide = models.BooleanField(default=False)
#
# class Meta(object):
# """ meta class """
# db_table = 'bookmark'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.title
#
# def get_absolute_url(self):
# """ bookmark permalink path """
# return "/shiori/b/%s" % self.id
#
# class Category(BaseObject):
# """ Category model """
# category = models.CharField(max_length=255, unique=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'category'
#
# def __unicode__(self):
# return self.category
#
# class FeedSubscription(BaseObject):
# """ FeedSubsscription model """
# url = models.URLField()
# name = models.CharField(max_length=255, editable=False)
# owner = models.ForeignKey(User)
# default_category = models.ForeignKey(Category)
#
# class Meta(object):
# """ meta class """
# db_table = 'feed_subscription'
# unique_together = ('url', 'owner')
#
# def __unicode__(self):
# return self.url
#
# class CrawlingHistory(BaseObject):
# """ Crawling History model """
# feed = models.ForeignKey(FeedSubscription)
# result = jsonfield.JSONField()
# update_datetime = models.DateTimeField(auto_now=True,
# auto_now_add=True)
#
# class Meta(object):
# """ meta class """
# db_table = 'crawling_history'
#
# def __unicode__(self):
# return self.id
#
# Path: shiori/bookmark/agents/feed_parser.py
# class FeedParser(object):
# """ FeedParser class """
#
# def __init__(self, url):
# try:
# response = requests.get(url, stream=True)
# except requests.ConnectionError as error:
# print(error)
# sys.exit(1)
# try:
# etree = lxml.fromstring(response.content)
# except XMLSyntaxError as error:
# print(error)
# sys.exit(1)
# self.nsmap = etree.nsmap
# self.namespace = get_ns(etree.nsmap, None)
#
# self.updated = get_updated(etree, self.nsmap)
# self.title = get_title(etree, self.namespace)
# self.items = get_items(etree, self.namespace)
# self.etree = etree
#
# def retrieve_items(self):
# """ retrieve feed items """
# return [dict(title=get_title(item, self.namespace),
# link=get_link(item, self.namespace),
# updated=get_updated(item, self.nsmap))
# for item in self.items]
. Output only the next line. | for entry in FeedParser(kwargs.get('url')).retrieve_items(): |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
""" routing of bookmark app """
urlpatterns = patterns('shiori.bookmark.views',
url(r'^$', 'index', name='index'),
url(r'^profile/$', 'profile'),
url(r'^add/$', 'add'),
url(r'^categories$', 'categories'),
url(r'^categories/(?P<category_id>[\w.]+)$',
'category'),
url(r'^tags$', 'tags'),
url(r'^tags/(?P<tag_id>[\w.]+)$', 'tag'),
<|code_end|>
with the help of current file imports:
from django.conf.urls import patterns, url
from shiori.bookmark.feed_generator import LatestEntries
and context from other files:
# Path: shiori/bookmark/feed_generator.py
# class LatestEntries(Feed):
# """ Feed generator class """
# title = 'Shiori new bookmarks'
# link = '/shiori/'
# description = 'Updates on changes and additions to Shiori.'
# description_template = 'feeds/latest_title.html'
#
# def items(self):
# """ Retrieve latest 5 bookmarks """
# return Bookmark.objects.order_by('-registered_datetime')[:5]
, which may contain function names, class names, or code. Output only the next line. | url(r'^feeds$', LatestEntries()), |
Using the snippet: <|code_start|>from __future__ import unicode_literals
"""
WSGI config for featuredjango project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
For products built with django-productline this can be done by refining
django.core.wsgi.get_wsgi_application directly.
"""
<|code_end|>
, determine the next line of code. You have imports:
from django_productline.startup import get_wsgi_application
and context (class names, function names, or code) available:
# Path: django_productline/startup.py
# def get_wsgi_application():
# """
# returns the wsgi application for the selected product
#
# this function is called by featuredjango.wsgi to get the wsgi
# application object
#
# if you need to refine the wsgi application object e.g. to add
# wsgi middleware please refine django.core.wsgi.get_wsgi_application directly.
# """
# # make sure the product is selected before importing and constructing wsgi app
# select_product()
# # return (possibly refined) wsgi application
# from django.core.wsgi import get_wsgi_application
# return get_wsgi_application()
. Output only the next line. | application = get_wsgi_application() |
Predict the next line for this snippet: <|code_start|>from __future__ import unicode_literals, print_function
__all__ = ['UtilsTestCase']
class UtilsTestCase(NoMigrationsTestCase):
def test_zipdir(self):
"""
Tests the zipdir function
:return:
"""
testdatadir = os.path.join(os.path.dirname(__file__), '__testdata__')
zipthisfolder = os.path.join(testdatadir, 'zipthisfolder')
os.makedirs(zipthisfolder)
os.mknod(os.path.join(zipthisfolder, 'file'))
src_path = os.path.join(testdatadir, 'zipthisfolder')
target_path = os.path.join(testdatadir, 'result.zip')
<|code_end|>
with the help of current file imports:
import os
import os.path
import shutil
from django_productline import utils
from django_productline.testingutils import NoMigrationsTestCase
and context from other files:
# Path: django_productline/utils.py
# def create_or_append_to_zip(file_handle, zip_path, arc_name=None):
# def zipdir(src_path, target_path, wrapdir=''):
# def compare_version(version1, version2):
# def normalize(v):
#
# Path: django_productline/testingutils.py
# class NoMigrationsTestCase(DjangoTestCase):
# """
# Extend your test cases from this class an migrations will be disabled.
# """
#
# def __init__(self, *args, **kw):
# from django.conf import settings
# settings.MIGRATION_MODULES = DisableMigrations()
# super(DjangoTestCase, self).__init__(*args, **kw)
, which may contain function names, class names, or code. Output only the next line. | utils.zipdir(src_path, target_path) |
Given the following code snippet before the placeholder: <|code_start|>from __future__ import unicode_literals
"""
django-productline root urlconf
urlpatterns are constructed by refining django_productline.urls.get_urls.
Here, get_urls is called to get the (composed) urlpatterns.
Django uses these to construct the root RegexUrlResolver.
"""
<|code_end|>
, predict the next line using imports from the current file:
from django_productline import urls
and context including class names, function names, and sometimes code from other files:
# Path: django_productline/urls.py
# def get_urls():
# def get_fallback_urls():
. Output only the next line. | urlpatterns = urls.get_urls() + urls.get_fallback_urls() |
Given snippet: <|code_start|>__author__ = 'robert'
try:
except ImportError:
sys.path.append('/media/data/PYTHON_WORKSPACE/pypet-project')
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pypet
import sys
from pypet.tests.testutils.ioutils import discover_tests, parse_args, run_suite
from pypet.tests.integration.environment_scoop_test import scoop_not_functional_check
and context:
# Path: pypet/tests/testutils/ioutils.py
# def discover_tests(predicate=None):
# """Builds a LambdaTestLoader and discovers tests according to `predicate`."""
# loader = LambdaTestDiscoverer(predicate)
# start_dir = os.path.dirname(os.path.abspath(__file__))
# start_dir = os.path.abspath(os.path.join(start_dir, '..'))
# suite = loader.discover(start_dir=start_dir, pattern='*test.py')
# return suite
#
# def parse_args():
# """Parses arguments and returns a dictionary"""
# opt_list, _ = getopt.getopt(sys.argv[1:],'k',['folder=', 'suite='])
# opt_dict = {}
#
# for opt, arg in opt_list:
# if opt == '-k':
# opt_dict['remove'] = False
# errwrite('I will keep all files.')
#
# if opt == '--folder':
# opt_dict['folder'] = arg
# errwrite('I will put all data into folder `%s`.' % arg)
#
# if opt == '--suite':
# opt_dict['suite_no'] = arg
# errwrite('I will run suite `%s`.' % arg)
#
# sys.argv = [sys.argv[0]]
# return opt_dict
#
# def run_suite(remove=None, folder=None, suite=None):
# """Runs a particular test suite or simply unittest.main.
#
# Takes care that all temporary data in `folder` is removed if `remove=True`.
#
# """
# if remove is not None:
# testParams['remove'] = remove
#
# testParams['user_tempdir'] = folder
#
# prepare_log_config()
#
# # Just signal if make_temp_dir works
# make_temp_dir('tmp.txt', signal=True)
#
# success = False
# try:
# if suite is None:
# unittest.main(verbosity=2)
# else:
# runner = unittest.TextTestRunner(verbosity=2)
# result = runner.run(suite)
# success = result.wasSuccessful()
# finally:
# remove_data()
#
# if not success:
# # Exit with 1 if tests were not successful
# sys.exit(1)
#
# Path: pypet/tests/integration/environment_scoop_test.py
# def scoop_not_functional_check():
# if scoop is not None and scoop.IS_RUNNING:
# print('SCOOP mode functional!')
# return False
# else:
# print('SCOOP NOT running!')
# return True
which might include code, classes, or functions. Output only the next line. | scoop_suite = discover_tests(lambda class_name, test_name, tags: 'scoop' in tags) |
Based on the snippet: <|code_start|>__author__ = 'robert'
try:
except ImportError:
sys.path.append('/media/data/PYTHON_WORKSPACE/pypet-project')
scoop_suite = discover_tests(lambda class_name, test_name, tags: 'scoop' in tags)
if __name__ == '__main__':
if scoop_not_functional_check():
raise RuntimeError('Not running in SCOOP mode!')
<|code_end|>
, predict the immediate next line with the help of imports:
import pypet
import sys
from pypet.tests.testutils.ioutils import discover_tests, parse_args, run_suite
from pypet.tests.integration.environment_scoop_test import scoop_not_functional_check
and context (classes, functions, sometimes code) from other files:
# Path: pypet/tests/testutils/ioutils.py
# def discover_tests(predicate=None):
# """Builds a LambdaTestLoader and discovers tests according to `predicate`."""
# loader = LambdaTestDiscoverer(predicate)
# start_dir = os.path.dirname(os.path.abspath(__file__))
# start_dir = os.path.abspath(os.path.join(start_dir, '..'))
# suite = loader.discover(start_dir=start_dir, pattern='*test.py')
# return suite
#
# def parse_args():
# """Parses arguments and returns a dictionary"""
# opt_list, _ = getopt.getopt(sys.argv[1:],'k',['folder=', 'suite='])
# opt_dict = {}
#
# for opt, arg in opt_list:
# if opt == '-k':
# opt_dict['remove'] = False
# errwrite('I will keep all files.')
#
# if opt == '--folder':
# opt_dict['folder'] = arg
# errwrite('I will put all data into folder `%s`.' % arg)
#
# if opt == '--suite':
# opt_dict['suite_no'] = arg
# errwrite('I will run suite `%s`.' % arg)
#
# sys.argv = [sys.argv[0]]
# return opt_dict
#
# def run_suite(remove=None, folder=None, suite=None):
# """Runs a particular test suite or simply unittest.main.
#
# Takes care that all temporary data in `folder` is removed if `remove=True`.
#
# """
# if remove is not None:
# testParams['remove'] = remove
#
# testParams['user_tempdir'] = folder
#
# prepare_log_config()
#
# # Just signal if make_temp_dir works
# make_temp_dir('tmp.txt', signal=True)
#
# success = False
# try:
# if suite is None:
# unittest.main(verbosity=2)
# else:
# runner = unittest.TextTestRunner(verbosity=2)
# result = runner.run(suite)
# success = result.wasSuccessful()
# finally:
# remove_data()
#
# if not success:
# # Exit with 1 if tests were not successful
# sys.exit(1)
#
# Path: pypet/tests/integration/environment_scoop_test.py
# def scoop_not_functional_check():
# if scoop is not None and scoop.IS_RUNNING:
# print('SCOOP mode functional!')
# return False
# else:
# print('SCOOP NOT running!')
# return True
. Output only the next line. | opt_dict = parse_args() |
Here is a snippet: <|code_start|>__author__ = 'robert'
try:
except ImportError:
sys.path.append('/media/data/PYTHON_WORKSPACE/pypet-project')
scoop_suite = discover_tests(lambda class_name, test_name, tags: 'scoop' in tags)
if __name__ == '__main__':
if scoop_not_functional_check():
raise RuntimeError('Not running in SCOOP mode!')
opt_dict = parse_args()
<|code_end|>
. Write the next line using the current file imports:
import pypet
import sys
from pypet.tests.testutils.ioutils import discover_tests, parse_args, run_suite
from pypet.tests.integration.environment_scoop_test import scoop_not_functional_check
and context from other files:
# Path: pypet/tests/testutils/ioutils.py
# def discover_tests(predicate=None):
# """Builds a LambdaTestLoader and discovers tests according to `predicate`."""
# loader = LambdaTestDiscoverer(predicate)
# start_dir = os.path.dirname(os.path.abspath(__file__))
# start_dir = os.path.abspath(os.path.join(start_dir, '..'))
# suite = loader.discover(start_dir=start_dir, pattern='*test.py')
# return suite
#
# def parse_args():
# """Parses arguments and returns a dictionary"""
# opt_list, _ = getopt.getopt(sys.argv[1:],'k',['folder=', 'suite='])
# opt_dict = {}
#
# for opt, arg in opt_list:
# if opt == '-k':
# opt_dict['remove'] = False
# errwrite('I will keep all files.')
#
# if opt == '--folder':
# opt_dict['folder'] = arg
# errwrite('I will put all data into folder `%s`.' % arg)
#
# if opt == '--suite':
# opt_dict['suite_no'] = arg
# errwrite('I will run suite `%s`.' % arg)
#
# sys.argv = [sys.argv[0]]
# return opt_dict
#
# def run_suite(remove=None, folder=None, suite=None):
# """Runs a particular test suite or simply unittest.main.
#
# Takes care that all temporary data in `folder` is removed if `remove=True`.
#
# """
# if remove is not None:
# testParams['remove'] = remove
#
# testParams['user_tempdir'] = folder
#
# prepare_log_config()
#
# # Just signal if make_temp_dir works
# make_temp_dir('tmp.txt', signal=True)
#
# success = False
# try:
# if suite is None:
# unittest.main(verbosity=2)
# else:
# runner = unittest.TextTestRunner(verbosity=2)
# result = runner.run(suite)
# success = result.wasSuccessful()
# finally:
# remove_data()
#
# if not success:
# # Exit with 1 if tests were not successful
# sys.exit(1)
#
# Path: pypet/tests/integration/environment_scoop_test.py
# def scoop_not_functional_check():
# if scoop is not None and scoop.IS_RUNNING:
# print('SCOOP mode functional!')
# return False
# else:
# print('SCOOP NOT running!')
# return True
, which may include functions, classes, or code. Output only the next line. | run_suite(suite=scoop_suite, **opt_dict) |
Predict the next line for this snippet: <|code_start|>__author__ = 'robert'
try:
except ImportError:
sys.path.append('/media/data/PYTHON_WORKSPACE/pypet-project')
scoop_suite = discover_tests(lambda class_name, test_name, tags: 'scoop' in tags)
if __name__ == '__main__':
<|code_end|>
with the help of current file imports:
import pypet
import sys
from pypet.tests.testutils.ioutils import discover_tests, parse_args, run_suite
from pypet.tests.integration.environment_scoop_test import scoop_not_functional_check
and context from other files:
# Path: pypet/tests/testutils/ioutils.py
# def discover_tests(predicate=None):
# """Builds a LambdaTestLoader and discovers tests according to `predicate`."""
# loader = LambdaTestDiscoverer(predicate)
# start_dir = os.path.dirname(os.path.abspath(__file__))
# start_dir = os.path.abspath(os.path.join(start_dir, '..'))
# suite = loader.discover(start_dir=start_dir, pattern='*test.py')
# return suite
#
# def parse_args():
# """Parses arguments and returns a dictionary"""
# opt_list, _ = getopt.getopt(sys.argv[1:],'k',['folder=', 'suite='])
# opt_dict = {}
#
# for opt, arg in opt_list:
# if opt == '-k':
# opt_dict['remove'] = False
# errwrite('I will keep all files.')
#
# if opt == '--folder':
# opt_dict['folder'] = arg
# errwrite('I will put all data into folder `%s`.' % arg)
#
# if opt == '--suite':
# opt_dict['suite_no'] = arg
# errwrite('I will run suite `%s`.' % arg)
#
# sys.argv = [sys.argv[0]]
# return opt_dict
#
# def run_suite(remove=None, folder=None, suite=None):
# """Runs a particular test suite or simply unittest.main.
#
# Takes care that all temporary data in `folder` is removed if `remove=True`.
#
# """
# if remove is not None:
# testParams['remove'] = remove
#
# testParams['user_tempdir'] = folder
#
# prepare_log_config()
#
# # Just signal if make_temp_dir works
# make_temp_dir('tmp.txt', signal=True)
#
# success = False
# try:
# if suite is None:
# unittest.main(verbosity=2)
# else:
# runner = unittest.TextTestRunner(verbosity=2)
# result = runner.run(suite)
# success = result.wasSuccessful()
# finally:
# remove_data()
#
# if not success:
# # Exit with 1 if tests were not successful
# sys.exit(1)
#
# Path: pypet/tests/integration/environment_scoop_test.py
# def scoop_not_functional_check():
# if scoop is not None and scoop.IS_RUNNING:
# print('SCOOP mode functional!')
# return False
# else:
# print('SCOOP NOT running!')
# return True
, which may contain function names, class names, or code. Output only the next line. | if scoop_not_functional_check(): |
Continue the code snippet: <|code_start|>"""Module for easy compartmental implementation of a BRIAN2 network.
Build parts of a network via subclassing :class:`~pypet.brian2.network.NetworkComponent` and
:class:`~pypet.brian2.network.NetworkAnalyser` for recording and statistical analysis.
Specify a :class:`~pypet.brian2.network.NetworkRunner` (subclassing optionally) that handles
the execution of your experiment in different subruns. Subruns can be defined
as :class:`~pypet.brian2.parameter.Brian2Parameter` instances in a particular
trajectory group. You must add to every parameter's :class:`~pypet.annotations.Annotations` the
attribute `order`. This order must be an integer specifying the index or order
the subrun should about to be executed in.
The creation and management of a BRIAN2 network is handled by the
:class:`~pypet.brian2.network.NetworkManager` (no need for subclassing). Pass your
components, analyser and your runner to the manager.
Pass the :func:`~pypet.brian2.network.run_network` function together with a
:class:`~pypet.brian2.network.NetworkManager` to your main environment function
:func:`~pypet.environment.Environment.run` to start a simulation and parallel
parameter exploration. Be aware that in case of a *pre-built* network,
successful parameter exploration
requires parallel processing (see :class:`~pypet.brian2.network.NetworkManager`).
"""
__author__ = 'Robert Meyer'
<|code_end|>
. Use current file imports:
from brian2 import Network, second
from pypet.pypetlogging import HasLogger
and context (classes, functions, or code) from other files:
# Path: pypet/pypetlogging.py
# class HasLogger(HasSlots):
# """Abstract super class that automatically adds a logger to a class.
#
# To add a logger to a sub-class of yours simply call ``myobj._set_logger(name)``.
# If ``name=None`` the logger is chosen as follows:
#
# ``self._logger = logging.getLogger(self.__class.__.__module__ + '.' + self.__class__.__name__)``
#
# The logger can be accessed via ``myobj._logger``.
#
# """
#
# __slots__ = ('_logger',)
#
# def __getstate__(self):
# """Called for pickling.
#
# Removes the logger to allow pickling and returns a copy of `__dict__`.
#
# """
# state_dict = super(HasLogger, self).__getstate__()
# if '_logger' in state_dict:
# # Pickling does not work with loggers objects,
# # so we just keep the logger's name:
# state_dict['_logger'] = self._logger.name
# return state_dict
#
# def __setstate__(self, statedict):
# """Called after loading a pickle dump.
#
# Restores `__dict__` from `statedict` and adds a new logger.
#
# """
# super(HasLogger, self).__setstate__(statedict)
# if '_logger' in statedict:
# # If we re-instantiate the component the
# # logger attribute only contains a name,
# # so we also need to re-create the logger:
# self._set_logger(statedict['_logger'])
#
# def _set_logger(self, name=None):
# """Adds a logger with a given `name`.
#
# If no name is given, name is constructed as
# `type(self).__name__`.
#
# """
# if name is None:
# cls = self.__class__
# name = '%s.%s' % (cls.__module__, cls.__name__)
# self._logger = logging.getLogger(name)
. Output only the next line. | class NetworkComponent(HasLogger): |
Predict the next line after this snippet: <|code_start|>
class MultiprocWrapper(object):
"""Abstract class definition of a Wrapper.
Note that only storing is required, loading is optional.
ABSTRACT: Needs to be defined in subclass
"""
@property
def is_open(self):
""" Normally the file is opened and closed after each insertion.
However, the storage service may provide to keep the store open and signals
this via this property.
"""
return False
@property
def multiproc_safe(self):
"""This wrapper guarantees multiprocessing safety"""
return True
def store(self, *args, **kwargs):
raise NotImplementedError('Implement this!')
<|code_end|>
using the current file's imports:
from threading import ThreadError
from collections import deque
from threading import Thread
from pypet.pypetlogging import HasLogger
from pypet.utils.decorators import retry
from pypet.utils.helpful_functions import is_ipv6
import queue
import pickle
import zmq
import copy as cp
import gc
import sys
import time
import os
import socket
import pypet.pypetconstants as pypetconstants
and any relevant context from other files:
# Path: pypet/pypetlogging.py
# class HasLogger(HasSlots):
# """Abstract super class that automatically adds a logger to a class.
#
# To add a logger to a sub-class of yours simply call ``myobj._set_logger(name)``.
# If ``name=None`` the logger is chosen as follows:
#
# ``self._logger = logging.getLogger(self.__class.__.__module__ + '.' + self.__class__.__name__)``
#
# The logger can be accessed via ``myobj._logger``.
#
# """
#
# __slots__ = ('_logger',)
#
# def __getstate__(self):
# """Called for pickling.
#
# Removes the logger to allow pickling and returns a copy of `__dict__`.
#
# """
# state_dict = super(HasLogger, self).__getstate__()
# if '_logger' in state_dict:
# # Pickling does not work with loggers objects,
# # so we just keep the logger's name:
# state_dict['_logger'] = self._logger.name
# return state_dict
#
# def __setstate__(self, statedict):
# """Called after loading a pickle dump.
#
# Restores `__dict__` from `statedict` and adds a new logger.
#
# """
# super(HasLogger, self).__setstate__(statedict)
# if '_logger' in statedict:
# # If we re-instantiate the component the
# # logger attribute only contains a name,
# # so we also need to re-create the logger:
# self._set_logger(statedict['_logger'])
#
# def _set_logger(self, name=None):
# """Adds a logger with a given `name`.
#
# If no name is given, name is constructed as
# `type(self).__name__`.
#
# """
# if name is None:
# cls = self.__class__
# name = '%s.%s' % (cls.__module__, cls.__name__)
# self._logger = logging.getLogger(name)
#
# Path: pypet/utils/decorators.py
# def retry(n, errors, wait=0.0, logger_name=None):
# """This is a decorator that retries a function.
#
# Tries `n` times and catches a given tuple of `errors`.
#
# If the `n` retries are not enough, the error is reraised.
#
# If desired `waits` some seconds.
#
# Optionally takes a 'logger_name' of a given logger to print the caught error.
#
# """
#
# def wrapper(func):
# @functools.wraps(func)
# def new_func(*args, **kwargs):
# retries = 0
# while True:
# try:
# result = func(*args, **kwargs)
# if retries and logger_name:
# logger = logging.getLogger(logger_name)
# logger.debug('Retry of `%s` successful' % func.__name__)
# return result
# except errors:
# if retries >= n:
# if logger_name:
# logger = logging.getLogger(logger_name)
# logger.exception('I could not execute `%s` with args %s and kwargs %s, '
# 'starting next try. ' % (func.__name__,
# str(args),
# str(kwargs)))
# raise
# elif logger_name:
# logger = logging.getLogger(logger_name)
# logger.debug('I could not execute `%s` with args %s and kwargs %s, '
# 'starting next try. ' % (func.__name__,
# str(args),
# str(kwargs)))
# retries += 1
# if wait:
# time.sleep(wait)
# return new_func
#
# return wrapper
#
# Path: pypet/utils/helpful_functions.py
# def is_ipv6(url):
# return '[' in url
. Output only the next line. | class ZMQServer(HasLogger): |
Here is a snippet: <|code_start|> super(ForkAwareLockerClient, self).start(test_connection)
class QueueStorageServiceSender(MultiprocWrapper, HasLogger):
""" For multiprocessing with :const:`~pypet.pypetconstants.WRAP_MODE_QUEUE`, replaces the
original storage service.
All storage requests are send over a queue to the process running the
:class:`~pypet.storageservice.QdebugueueStorageServiceWriter`.
Does not support loading of data!
"""
def __init__(self, storage_queue=None):
self.queue = storage_queue
self.pickle_queue = True
self._set_logger()
def __getstate__(self):
result = super(QueueStorageServiceSender, self).__getstate__()
if not self.pickle_queue:
result['queue'] = None
return result
def load(self, *args, **kwargs):
raise NotImplementedError('Queue wrapping does not support loading. If you want to '
'load data in a multiprocessing environment, use a Lock '
'wrapping.')
<|code_end|>
. Write the next line using the current file imports:
from threading import ThreadError
from collections import deque
from threading import Thread
from pypet.pypetlogging import HasLogger
from pypet.utils.decorators import retry
from pypet.utils.helpful_functions import is_ipv6
import queue
import pickle
import zmq
import copy as cp
import gc
import sys
import time
import os
import socket
import pypet.pypetconstants as pypetconstants
and context from other files:
# Path: pypet/pypetlogging.py
# class HasLogger(HasSlots):
# """Abstract super class that automatically adds a logger to a class.
#
# To add a logger to a sub-class of yours simply call ``myobj._set_logger(name)``.
# If ``name=None`` the logger is chosen as follows:
#
# ``self._logger = logging.getLogger(self.__class.__.__module__ + '.' + self.__class__.__name__)``
#
# The logger can be accessed via ``myobj._logger``.
#
# """
#
# __slots__ = ('_logger',)
#
# def __getstate__(self):
# """Called for pickling.
#
# Removes the logger to allow pickling and returns a copy of `__dict__`.
#
# """
# state_dict = super(HasLogger, self).__getstate__()
# if '_logger' in state_dict:
# # Pickling does not work with loggers objects,
# # so we just keep the logger's name:
# state_dict['_logger'] = self._logger.name
# return state_dict
#
# def __setstate__(self, statedict):
# """Called after loading a pickle dump.
#
# Restores `__dict__` from `statedict` and adds a new logger.
#
# """
# super(HasLogger, self).__setstate__(statedict)
# if '_logger' in statedict:
# # If we re-instantiate the component the
# # logger attribute only contains a name,
# # so we also need to re-create the logger:
# self._set_logger(statedict['_logger'])
#
# def _set_logger(self, name=None):
# """Adds a logger with a given `name`.
#
# If no name is given, name is constructed as
# `type(self).__name__`.
#
# """
# if name is None:
# cls = self.__class__
# name = '%s.%s' % (cls.__module__, cls.__name__)
# self._logger = logging.getLogger(name)
#
# Path: pypet/utils/decorators.py
# def retry(n, errors, wait=0.0, logger_name=None):
# """This is a decorator that retries a function.
#
# Tries `n` times and catches a given tuple of `errors`.
#
# If the `n` retries are not enough, the error is reraised.
#
# If desired `waits` some seconds.
#
# Optionally takes a 'logger_name' of a given logger to print the caught error.
#
# """
#
# def wrapper(func):
# @functools.wraps(func)
# def new_func(*args, **kwargs):
# retries = 0
# while True:
# try:
# result = func(*args, **kwargs)
# if retries and logger_name:
# logger = logging.getLogger(logger_name)
# logger.debug('Retry of `%s` successful' % func.__name__)
# return result
# except errors:
# if retries >= n:
# if logger_name:
# logger = logging.getLogger(logger_name)
# logger.exception('I could not execute `%s` with args %s and kwargs %s, '
# 'starting next try. ' % (func.__name__,
# str(args),
# str(kwargs)))
# raise
# elif logger_name:
# logger = logging.getLogger(logger_name)
# logger.debug('I could not execute `%s` with args %s and kwargs %s, '
# 'starting next try. ' % (func.__name__,
# str(args),
# str(kwargs)))
# retries += 1
# if wait:
# time.sleep(wait)
# return new_func
#
# return wrapper
#
# Path: pypet/utils/helpful_functions.py
# def is_ipv6(url):
# return '[' in url
, which may include functions, classes, or code. Output only the next line. | @retry(9, Exception, 0.01, 'pypet.retry') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.