Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line after this snippet: <|code_start|># coding: utf8
from __future__ import unicode_literals
class ProjectApplyForm(forms.ModelForm):
class Meta:
<|code_end|>
using the current file's imports:
from django import forms
from .models import ProjectApply
and any relevant context from other files:
# Path: brasilcomvc/projects/models.py
# class ProjectApply(models.Model):
#
# volunteer = models.ForeignKey(
# settings.AUTH_USER_MODEL, related_name='applications', editable=False)
# project = models.ForeignKey(
# 'Project', related_name='applications', editable=False)
# message = models.TextField()
# created = models.DateTimeField(auto_now_add=True)
#
# volunteer.verbose_name = 'voluntário'
# project.verbose_name = 'projeto'
# message.verbose_name = 'mensagem ao organizador'
# message.help_text = 'Conte-nos brevemente como você pode ajudar.'
# created.verbose_name = 'hora do registro'
#
# class Meta:
# unique_together = ('project', 'volunteer',)
# verbose_name = 'inscrição em projeto'
# verbose_name_plural = 'inscrições em projetos'
#
# def __str__(self):
# return '{}: {}'.format(self.project.name, self.volunteer.full_name)
#
# def _get_email_context(self):
# return {
# attr: getattr(self, attr)
# for attr in ('message', 'project', 'volunteer',)}
#
# def send_owner_email(self):
# send_template_email(
# subject='Alguém se inscreveu no seu projeto!',
# to=self.project.owner.email,
# template_name='emails/project_apply_owner.html',
# context=self._get_email_context())
#
# def send_volunteer_email(self):
# send_template_email(
# subject='Você se inscreveu num projeto!',
# to=self.volunteer.email,
# template_name='emails/project_apply_volunteer.html',
# context=self._get_email_context())
. Output only the next line. | model = ProjectApply |
Given snippet: <|code_start|># coding: utf-8
from __future__ import unicode_literals
class AccountsAppConfig(AppConfig):
name = 'brasilcomvc.accounts'
verbose_name = 'Autenticação e Autorização'
def ready(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.apps import AppConfig
from cities_light.signals import city_items_pre_import
from .signals import (
filter_city_import,
)
and context:
# Path: brasilcomvc/accounts/signals.py
# def filter_city_import(sender, items, **kwargs):
# if items[8] != 'BR':
# raise InvalidItems()
which might include code, classes, or functions. Output only the next line. | city_items_pre_import.connect(filter_city_import) |
Given the code snippet: <|code_start|> date_updated = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(editable=False, default=True)
is_staff = models.BooleanField(editable=False, default=False)
# Notifications
email_newsletter = models.BooleanField(default=True)
# Verbose names
email.verbose_name = 'e-mail'
full_name.verbose_name = 'nome completo'
username.verbose_name = 'nome de usuário'
picture.verbose_name = 'foto do usuário'
job_title.verbose_name = 'profissão'
bio.verbose_name = 'biografia'
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ('full_name',)
objects = UserManager()
class Meta:
verbose_name = 'usuário'
def get_short_name(self):
return self.full_name.split()[0] if self.full_name else ''
def get_full_name(self):
return self.full_name
def send_welcome_email(self):
<|code_end|>
, generate the next line using the imports in this file:
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin,
)
from django.db import models
from imagekit.models import ImageSpecField, ProcessedImageField
from imagekit.processors import ResizeToFill
from brasilcomvc.common.email import send_template_email
and context (functions, classes, or occasionally code) from other files:
# Path: brasilcomvc/common/email.py
# def send_template_email(subject, to, template_name, context=None):
# '''
# Render a template into an email body and send it through Django's send_mail
#
# - The `to` parameter must be a single email address.
# - This function omits `from_email` because it expects it to exist from an
# environment variable.
# - Other parameters are omitted as well because there are no
# use for them in the current use case.
# '''
# body = render_to_string(template_name, context or {}, Context({
# 'mailing_address': settings.MAILING_ADDRESS,
# 'site_url': settings.BASE_URL,
# 'sns_facebook': settings.SNS_FACEBOOK,
# 'sns_googleplus': settings.SNS_GOOGLEPLUS,
# 'sns_twitter': settings.SNS_TWITTER,
# 'subject': subject,
# }))
# plain_body = strip_tags(body)
#
# email = EmailMultiAlternatives(
# subject=subject,
# body=plain_body,
# to=(to,))
# email.attach_alternative(body, 'text/html')
# email.send()
. Output only the next line. | send_template_email( |
Here is a snippet: <|code_start|>
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
twisted.internet.base.DelayedCall.debug = True
class NetworkAddressTestCase(unittest.TestCase):
def testGetNetworkAddress(self):
d = getNetworkAddress()
d.addCallback(self._testGetNetworkAddressCallback)
return d
def _testGetNetworkAddressCallback(self, result):
if "public_ip" in result:
self._checkIP(result["public_ip"])
if "local_ip" in result:
self._checkIP(result["local_ip"])
if "public_ip" not in result and "local_ip" not in result:
raise Exception("Could not find a local or public IP.")
def testNetworkAddressGetter(self):
<|code_end|>
. Write the next line using the current file imports:
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from awspider.networkaddress import NetworkAddressGetter, getNetworkAddress
import os
import sys
import twisted
import re
and context from other files:
# Path: awspider/networkaddress.py
# class NetworkAddressGetter():
#
# local_ip = None
# public_ip = None
#
# def __init__( self ):
# self.ip_functions = [self.getDomaintools, self.getIPPages]
# random.shuffle(self.ip_functions)
#
# def __call__( self ):
# d = self.getAmazonIPs()
# d.addCallback( self._getAmazonIPsCallback )
# d.addErrback( self.getPublicIP )
# return d
#
# def getAmazonIPs( self ):
# logger.debug( "Getting local IP from Amazon." )
# a = getPage( "http://169.254.169.254/2009-04-04/meta-data/local-ipv4", timeout=5 )
#
# logger.debug( "Getting public IP from Amazon." )
# b = getPage( "http://169.254.169.254/2009-04-04/meta-data/public-ipv4", timeout=5 )
#
# d = DeferredList([a,b], consumeErrors=True)
# return d
#
# def _getAmazonIPsCallback( self, data ):
#
# if data[0][0] == True:
# self.local_ip = data[0][1]
# logger.debug( "Got local IP %s from Amazon." % self.local_ip )
# else:
# logger.debug( "Could not get local IP from Amazon." )
#
# if data[1][0] == True:
# public_ip = data[1][1]
# logger.debug( "Got public IP %s from Amazon." % public_ip )
#
# response = {}
# if self.local_ip is not None:
# response["local_ip"] = self.local_ip
# response["public_ip"] = public_ip
#
# return response
#
# else:
# logger.debug( "Could not get public IP from Amazon." )
# raise Exception( "Could not get public IP from Amazon." )
#
#
# def getPublicIP( self, error=None ):
#
# if len(self.ip_functions) > 0:
# func = self.ip_functions.pop()
# d = func()
# d.addCallback( self._getPublicIPCallback )
# d.addErrback( self.getPublicIP )
# return d
# else:
# logger.error( "Unable to get public IP address. Check your network connection" )
# response = {}
# if self.local_ip is not None:
# response["local_ip"] = self.local_ip
# else:
# response["local_ip"] = socket.gethostbyname(socket.gethostname())
# return response
#
# def _getPublicIPCallback( self, public_ip ):
# response = {}
# response["public_ip"] = public_ip
# if self.local_ip is not None:
# response["local_ip"] = self.local_ip
# else:
# response["local_ip"] = socket.gethostbyname(socket.gethostname())
# return response
#
# def getIPPages(self):
# logger.debug( "Getting public IP from ippages.com." )
# d = getPage( "http://www.ippages.com/xml/", timeout=5 )
# d.addCallback( self._getIPPagesCallback )
# return d
#
# def _getIPPagesCallback(self, data ):
# domaintools_xml = ET.XML( data )
# public_ip = domaintools_xml.find("ip").text
# logger.debug( "Got public IP %s from ippages.com." % public_ip )
# return public_ip
#
# def getDomaintools(self):
# logger.debug( "Getting public IP from domaintools.com." )
# d = getPage( "http://ip-address.domaintools.com/myip.xml", timeout=5 )
# d.addCallback( self._getDomaintoolsCallback )
# return d
#
# def _getDomaintoolsCallback(self, data):
# domaintools_xml = ET.XML( data )
# public_ip = domaintools_xml.find("ip_address").text
# logger.debug( "Got public IP %s from domaintools.com." % public_ip )
# return public_ip
#
# def getNetworkAddress():
# n = NetworkAddressGetter()
# d = n()
# d.addCallback( _getNetworkAddressCallback )
# return d
, which may include functions, classes, or code. Output only the next line. | n = NetworkAddressGetter() |
Given the following code snippet before the placeholder: <|code_start|>
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
twisted.internet.base.DelayedCall.debug = True
class NetworkAddressTestCase(unittest.TestCase):
def testGetNetworkAddress(self):
<|code_end|>
, predict the next line using imports from the current file:
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from awspider.networkaddress import NetworkAddressGetter, getNetworkAddress
import os
import sys
import twisted
import re
and context including class names, function names, and sometimes code from other files:
# Path: awspider/networkaddress.py
# class NetworkAddressGetter():
#
# local_ip = None
# public_ip = None
#
# def __init__( self ):
# self.ip_functions = [self.getDomaintools, self.getIPPages]
# random.shuffle(self.ip_functions)
#
# def __call__( self ):
# d = self.getAmazonIPs()
# d.addCallback( self._getAmazonIPsCallback )
# d.addErrback( self.getPublicIP )
# return d
#
# def getAmazonIPs( self ):
# logger.debug( "Getting local IP from Amazon." )
# a = getPage( "http://169.254.169.254/2009-04-04/meta-data/local-ipv4", timeout=5 )
#
# logger.debug( "Getting public IP from Amazon." )
# b = getPage( "http://169.254.169.254/2009-04-04/meta-data/public-ipv4", timeout=5 )
#
# d = DeferredList([a,b], consumeErrors=True)
# return d
#
# def _getAmazonIPsCallback( self, data ):
#
# if data[0][0] == True:
# self.local_ip = data[0][1]
# logger.debug( "Got local IP %s from Amazon." % self.local_ip )
# else:
# logger.debug( "Could not get local IP from Amazon." )
#
# if data[1][0] == True:
# public_ip = data[1][1]
# logger.debug( "Got public IP %s from Amazon." % public_ip )
#
# response = {}
# if self.local_ip is not None:
# response["local_ip"] = self.local_ip
# response["public_ip"] = public_ip
#
# return response
#
# else:
# logger.debug( "Could not get public IP from Amazon." )
# raise Exception( "Could not get public IP from Amazon." )
#
#
# def getPublicIP( self, error=None ):
#
# if len(self.ip_functions) > 0:
# func = self.ip_functions.pop()
# d = func()
# d.addCallback( self._getPublicIPCallback )
# d.addErrback( self.getPublicIP )
# return d
# else:
# logger.error( "Unable to get public IP address. Check your network connection" )
# response = {}
# if self.local_ip is not None:
# response["local_ip"] = self.local_ip
# else:
# response["local_ip"] = socket.gethostbyname(socket.gethostname())
# return response
#
# def _getPublicIPCallback( self, public_ip ):
# response = {}
# response["public_ip"] = public_ip
# if self.local_ip is not None:
# response["local_ip"] = self.local_ip
# else:
# response["local_ip"] = socket.gethostbyname(socket.gethostname())
# return response
#
# def getIPPages(self):
# logger.debug( "Getting public IP from ippages.com." )
# d = getPage( "http://www.ippages.com/xml/", timeout=5 )
# d.addCallback( self._getIPPagesCallback )
# return d
#
# def _getIPPagesCallback(self, data ):
# domaintools_xml = ET.XML( data )
# public_ip = domaintools_xml.find("ip").text
# logger.debug( "Got public IP %s from ippages.com." % public_ip )
# return public_ip
#
# def getDomaintools(self):
# logger.debug( "Getting public IP from domaintools.com." )
# d = getPage( "http://ip-address.domaintools.com/myip.xml", timeout=5 )
# d.addCallback( self._getDomaintoolsCallback )
# return d
#
# def _getDomaintoolsCallback(self, data):
# domaintools_xml = ET.XML( data )
# public_ip = domaintools_xml.find("ip_address").text
# logger.debug( "Got public IP %s from domaintools.com." % public_ip )
# return public_ip
#
# def getNetworkAddress():
# n = NetworkAddressGetter()
# d = n()
# d.addCallback( _getNetworkAddressCallback )
# return d
. Output only the next line. | d = getNetworkAddress() |
Based on the snippet: <|code_start|>
**Arguments:**
* *url* -- URL for the request.
**Keyword arguments:**
* *last_modified* -- Last modified date string to send as a request
header. (Default ``None``)
* *etag* -- Etag string to send as a request header. (Default
``None``)
* *method* -- HTTP request method. (Default ``'GET'``)
* *postdata* -- Dictionary of strings to post with the request.
(Default ``None``)
* *headers* -- Dictionary of strings to send as request headers.
(Default ``None``)
* *agent* -- User agent to send with request. (Default
``'RequestQueuer'``)
* *timeout* -- Request timeout, in seconds. (Default ``60``)
* *cookies* -- Dictionary of strings to send as request cookies.
(Default ``None``).
* *follow_redirect* -- Boolean switch to follow HTTP redirects.
(Default ``True``)
* *prioritize* -- Move this request to the front of the request
queue. (Default ``False``)
"""
if headers is None:
headers={}
if postdata is not None:
if isinstance(postdata, dict):
for key in postdata:
<|code_end|>
, predict the immediate next line with the help of imports:
import urllib
import time
import dateutil.parser
import logging
from twisted.internet.defer import Deferred
from twisted.internet import reactor, ssl
from twisted.web.client import HTTPClientFactory, _parse
from .unicodeconverter import convertToUTF8
from OpenSSL import SSL
and context (classes, functions, sometimes code) from other files:
# Path: awspider/unicodeconverter.py
# def convertToUTF8( s ):
# s = UnicodeConverter( s ).unicode
# if isinstance( s, unicode ):
# return s.encode("utf-8")
# else:
# return None
. Output only the next line. | postdata[key] = convertToUTF8(postdata[key]) |
Predict the next line after this snippet: <|code_start|>class ValueIteration(object):
"""
Implementation of the enumerative Value Iteration algorithm.
It performs successive, synchronous Bellman backups until
convergence is achieved for the given error epsilon for the
infinite-horizon MDP with discount factor gamma.
:param mdp: MDP representation
:type mdp: mdpproblog.MDP
"""
def __init__(self, mdp):
self._mdp = mdp
def run(self, gamma=0.9, epsilon=0.1):
"""
Execute value iteration until convergence.
Return optimal value function, greedy policy and number
of iterations.
:param gamma: discount factor
:type gamma: float
:param epsilon: maximum error
:type epsilon: float
:rtype: triple (dict(state, value), dict(policy, action), float)
"""
V = {}
policy = {}
actions = ActionSpace(self._mdp.actions())
<|code_end|>
using the current file's imports:
import sys
from mdpproblog.fluent import StateSpace, ActionSpace
and any relevant context from other files:
# Path: mdpproblog/fluent.py
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
. Output only the next line. | states = StateSpace(self._mdp.current_state_fluents()) |
Given the code snippet: <|code_start|>
class ValueIteration(object):
"""
Implementation of the enumerative Value Iteration algorithm.
It performs successive, synchronous Bellman backups until
convergence is achieved for the given error epsilon for the
infinite-horizon MDP with discount factor gamma.
:param mdp: MDP representation
:type mdp: mdpproblog.MDP
"""
def __init__(self, mdp):
self._mdp = mdp
def run(self, gamma=0.9, epsilon=0.1):
"""
Execute value iteration until convergence.
Return optimal value function, greedy policy and number
of iterations.
:param gamma: discount factor
:type gamma: float
:param epsilon: maximum error
:type epsilon: float
:rtype: triple (dict(state, value), dict(policy, action), float)
"""
V = {}
policy = {}
<|code_end|>
, generate the next line using the imports in this file:
import sys
from mdpproblog.fluent import StateSpace, ActionSpace
and context (functions, classes, or occasionally code) from other files:
# Path: mdpproblog/fluent.py
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
. Output only the next line. | actions = ActionSpace(self._mdp.actions()) |
Given the following code snippet before the placeholder: <|code_start|># GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MDP-ProbLog. If not, see <http://www.gnu.org/licenses/>.
class MDP(object):
"""
Representation of an MDP and its components. Implemented as a bridge
class to the ProbLog programs specifying the MDP domain and problems.
:param model: a valid MDP-ProbLog program
:type model: str
"""
def __init__(self, model):
self._model = model
self._engine = eng.Engine(model)
self.__transition_cache = {}
self.__reward_cache = {}
self.__prepare()
def __prepare(self):
""" Prepare the mdp-problog knowledge database to accept queries. """
# add dummy current state fluents probabilistic facts
for term in self.state_fluents():
<|code_end|>
, predict the next line using imports from the current file:
import mdpproblog.engine as eng
from mdpproblog.fluent import Fluent, StateSpace, ActionSpace
and context including class names, function names, and sometimes code from other files:
# Path: mdpproblog/fluent.py
# class Fluent(object):
# """
# Factory class for building fluent terms. A fluent term is a
# problog.logic.Term with a problog.logic.Constant as last argument
# representing its timestep.
# """
#
# @classmethod
# def create_fluent(cls, term, timestep):
# """"
# Return a new fluent made from `term` with given `timestep`.
#
# :param term: any problog term
# :type term: problog.logic.Term
# :param timestep: timestep numeric value
# :type timestep: int
# :rtype: problog.logic.Term
# """
# args = term.args + (Constant(timestep),)
# return term.with_args(*args)
#
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
. Output only the next line. | self._engine.add_fact(Fluent.create_fluent(term, 0), 0.5) |
Predict the next line after this snippet: <|code_start|> return self.__transition(state, action)
transition = self.__transition_cache.get(cache, None)
if transition is None:
transition = self.__transition(state, action)
self.__transition_cache[cache] = transition
return transition
def __transition(self, state, action):
"""
Return the probabilities of next state fluents given current
`state` and `action`.
:param state: state vector representation of current state fluents
:type state: list of 0/1 according to state fluents order
:param action: action vector representation
:type action: one-hot vector encoding of action as a list of 0/1
:rtype: list of pairs (problog.logic.Term, float)
"""
evidence = state.copy()
evidence.update(action)
return self._engine.evaluate(self.__next_state_queries, evidence)
def transition_model(self):
"""
Return the transition model of all valid transitions.
:rtype: dict of ((state,action), list of probabilities)
"""
transitions = {}
<|code_end|>
using the current file's imports:
import mdpproblog.engine as eng
from mdpproblog.fluent import Fluent, StateSpace, ActionSpace
and any relevant context from other files:
# Path: mdpproblog/fluent.py
# class Fluent(object):
# """
# Factory class for building fluent terms. A fluent term is a
# problog.logic.Term with a problog.logic.Constant as last argument
# representing its timestep.
# """
#
# @classmethod
# def create_fluent(cls, term, timestep):
# """"
# Return a new fluent made from `term` with given `timestep`.
#
# :param term: any problog term
# :type term: problog.logic.Term
# :param timestep: timestep numeric value
# :type timestep: int
# :rtype: problog.logic.Term
# """
# args = term.args + (Constant(timestep),)
# return term.with_args(*args)
#
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
. Output only the next line. | states = StateSpace(self.current_state_fluents()) |
Predict the next line for this snippet: <|code_start|>
transition = self.__transition_cache.get(cache, None)
if transition is None:
transition = self.__transition(state, action)
self.__transition_cache[cache] = transition
return transition
def __transition(self, state, action):
"""
Return the probabilities of next state fluents given current
`state` and `action`.
:param state: state vector representation of current state fluents
:type state: list of 0/1 according to state fluents order
:param action: action vector representation
:type action: one-hot vector encoding of action as a list of 0/1
:rtype: list of pairs (problog.logic.Term, float)
"""
evidence = state.copy()
evidence.update(action)
return self._engine.evaluate(self.__next_state_queries, evidence)
def transition_model(self):
"""
Return the transition model of all valid transitions.
:rtype: dict of ((state,action), list of probabilities)
"""
transitions = {}
states = StateSpace(self.current_state_fluents())
<|code_end|>
with the help of current file imports:
import mdpproblog.engine as eng
from mdpproblog.fluent import Fluent, StateSpace, ActionSpace
and context from other files:
# Path: mdpproblog/fluent.py
# class Fluent(object):
# """
# Factory class for building fluent terms. A fluent term is a
# problog.logic.Term with a problog.logic.Constant as last argument
# representing its timestep.
# """
#
# @classmethod
# def create_fluent(cls, term, timestep):
# """"
# Return a new fluent made from `term` with given `timestep`.
#
# :param term: any problog term
# :type term: problog.logic.Term
# :param timestep: timestep numeric value
# :type timestep: int
# :rtype: problog.logic.Term
# """
# args = term.args + (Constant(timestep),)
# return term.with_args(*args)
#
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
, which may contain function names, class names, or code. Output only the next line. | actions = ActionSpace(self.actions()) |
Based on the snippet: <|code_start|># This file is part of MDP-ProbLog.
# MDP-ProbLog is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MDP-ProbLog is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MDP-ProbLog. If not, see <http://www.gnu.org/licenses/>.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../mdpproblog'))
class TestFluent(unittest.TestCase):
def test_fluent(self):
terms = [
Term('t0'),
Term('t1', args=(Constant('c1'),)),
Term('t2', args=(Constant('c1'), Constant('c2')))
]
for term in terms:
for timestep in range(2):
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import unittest
from mdpproblog.fluent import Fluent, StateSpace, ActionSpace
from problog.logic import Term, Constant
and context (classes, functions, sometimes code) from other files:
# Path: mdpproblog/fluent.py
# class Fluent(object):
# """
# Factory class for building fluent terms. A fluent term is a
# problog.logic.Term with a problog.logic.Constant as last argument
# representing its timestep.
# """
#
# @classmethod
# def create_fluent(cls, term, timestep):
# """"
# Return a new fluent made from `term` with given `timestep`.
#
# :param term: any problog term
# :type term: problog.logic.Term
# :param timestep: timestep numeric value
# :type timestep: int
# :rtype: problog.logic.Term
# """
# args = term.args + (Constant(timestep),)
# return term.with_args(*args)
#
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
. Output only the next line. | fluent = Fluent.create_fluent(term, timestep) |
Given the following code snippet before the placeholder: <|code_start|> reward = self.__collect_reward(state, action)
state = self.__sample_next_state(state, action)
total += discount * reward
path.extend([action, state])
discount *= gamma
return total, path
def __select_action(self, state):
"""
Return the action prescribed by its policy for the given `state`.
:param state: state represented as a valuation over fluents
:type state: tuple of pairs (str, bool)
:rtype: str
"""
a = self._policy[state]
for action in self.__actions:
if action[a] == 1:
return action
def __collect_reward(self, state, action):
"""
Return the reward for applying `action` to `state`.
:param state: state represented as a valuation over fluents
:type state: tuple of pairs (str, bool)
:param action: action represented as a valuation over fluents
:type action: tuple of pairs (str, bool)
:rtype: float
"""
<|code_end|>
, predict the next line using imports from the current file:
import random
from mdpproblog.fluent import StateSpace, ActionSpace
and context including class names, function names, and sometimes code from other files:
# Path: mdpproblog/fluent.py
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
. Output only the next line. | state = StateSpace.state(state) |
Based on the snippet: <|code_start|># (at your option) any later version.
# MDP-ProbLog is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MDP-ProbLog. If not, see <http://www.gnu.org/licenses/>.
class Simulator(object):
"""
Simulator class for MDPs. Given an `mdp` and a `policy`,
it generates histories and its corresponding
expected cummulative discounted rewards.
:param mdp: an MDP formulation
:type mdp: mdpproblog.mdp.MDP object
:param policy: mapping from state to action
:type policy: dict of (tuple, str)
"""
def __init__(self, mdp, policy):
self._mdp = mdp
self._policy = policy
self.__current_state_fluents = mdp.current_state_fluents()
<|code_end|>
, predict the immediate next line with the help of imports:
import random
from mdpproblog.fluent import StateSpace, ActionSpace
and context (classes, functions, sometimes code) from other files:
# Path: mdpproblog/fluent.py
# class StateSpace(object):
# """
# Iterator class for looping over vector representations of
# states in a factored MDP defined by `state_fluents`. Each state
# is implemented by an OrderedDict of (problog.logic.Term, 0/1).
#
# :param state_fluents: predicates defining a state in a given timestep
# :type state_fluents: list of problog.logic.Term
# """
#
# def __init__(self, state_fluents):
# self.__state_fluents = state_fluents
# self.__state_space_size = 2**len(self.__state_fluents)
#
# def __len__(self):
# """ Return the number of states of the state space. """
# return self.__state_space_size
#
# def __iter__(self):
# """ Return an iterator over the state space. """
# self.__state_number = 0
# self.__state = OrderedDict([ (fluent, 1) for fluent in self.__state_fluents ])
# return self
#
# def __next__(self):
# """ Return representation of next state in the sequence. """
# if self.__state_number == self.__state_space_size:
# raise StopIteration
#
# for fluent, value in self.__state.items():
# if value == 1:
# self.__state[fluent] = 0
# else:
# self.__state[fluent] = 1
# break
#
# self.__state_number += 1
# return self.__state
#
# def __getitem__(self, index):
# """
# Return the state representation with given `index`.
#
# :param index: state index in state space
# :type index: int
# """
# state = []
# for fluent in self.__state_fluents:
# value = index % 2
# index //= 2
# state.append((fluent, value))
# return tuple(state)
#
# @classmethod
# def state(cls, valuation):
# """
# Return the state representation of a `valuation` of fluents.
#
# :param valuation: mapping from fluent to boolean value
# :type valuation: list of pairs (Fluent, bool)
# :rtype: OrderedDict
# """
# return OrderedDict(valuation)
#
# @classmethod
# def index(cls, state):
# """
# Return the `state` index in the state space.
#
# :param state: state representation
# :type state: OrderedDict
# :rtype: int
# """
# i = 0
# index = 0
# for _, value in state.items():
# index += value * 2 ** i
# i += 1
# return index
#
# class ActionSpace(object):
# """
# Iterator class for looping over vector representations of
# `actions` in a factored MDP. Each action is implemented by
# an OrderedDict of (problog.logic.Term, 0/1).
#
# :param actions: predicates listing possible actions
# :type actions: list of problog.logic.Term
# """
#
# def __init__(self, actions):
# self.__actions = actions
# self.__action_space_size = len(self.__actions)
#
# def __len__(self):
# """ Return the number of actions of the action space. """
# return self.__action_space_size
#
# def __iter__(self):
# """ Return an iterator over the action space. """
# self.__action_number = 0
# self.__action = OrderedDict([ (action, 0) for action in self.__actions ])
# self.__action[self.__actions[-1]] = 1
# return self
#
# def __next__(self):
# """ Return representation of next action in the sequence. """
# if self.__action_number == self.__action_space_size:
# raise StopIteration
#
# self.__action[self.__actions[self.__action_number - 1]] = 0
# self.__action[self.__actions[self.__action_number]] = 1
#
# self.__action_number += 1
# return self.__action
#
# def __getitem__(self, index):
# """
# Return the action representation with given `index`.
#
# :param index: action index in action space
# :type index: int
# """
# return self.__actions[index]
#
# @classmethod
# def index(cls, action):
# """
# Return action index in the action space.
#
# :param action: action representation
# :type action: OrderedDict
# :rtype: int
# """
# for index, fluent in enumerate(action):
# if action[fluent] == 1:
# return index
. Output only the next line. | self.__actions = ActionSpace(mdp.actions()) |
Using the snippet: <|code_start|> self.assertEqual(value, expected_assignments[str(term)])
def test_add_assignment(self):
engine = self.engines['sysadmin']
fluents = engine.declarations('state_fluent')
for i in range(2**len(fluents)):
state = Term('__s%d__' % i)
value = (-1)**(i % 2) * 10.0*i
node = engine.add_assignment(state, value)
fact = engine.get_fact(node)
self.assertEqual(fact.functor, 'utility')
self.assertEqual(fact.args, (state, Constant(value)))
def test_get_assignment(self):
engine = self.engines['sysadmin']
assignments = engine.assignments('utility')
instructions = engine.get_instructions_table()
facts = instructions['fact']
for node, fact in facts:
if fact.functor == 'utility':
assignment = engine.get_assignment(node)
self.assertEqual(assignment[0], fact.args[0])
self.assertEqual(assignment[1], fact.args[1])
else:
with self.assertRaises(IndexError):
not_an_assignment = engine.get_assignment(node)
def test_add_fact(self):
engine = self.engines['sysadmin']
terms = engine.declarations('state_fluent')
<|code_end|>
, determine the next line of code. You have imports:
import os
import sys
import unittest
import random
import mdpproblog.engine as eng
from problog.logic import Term, Constant, AnnotatedDisjunction
from problog.program import PrologString
from mdpproblog.fluent import Fluent
and context (class names, function names, or code) available:
# Path: mdpproblog/fluent.py
# class Fluent(object):
# """
# Factory class for building fluent terms. A fluent term is a
# problog.logic.Term with a problog.logic.Constant as last argument
# representing its timestep.
# """
#
# @classmethod
# def create_fluent(cls, term, timestep):
# """"
# Return a new fluent made from `term` with given `timestep`.
#
# :param term: any problog term
# :type term: problog.logic.Term
# :param timestep: timestep numeric value
# :type timestep: int
# :rtype: problog.logic.Term
# """
# args = term.args + (Constant(timestep),)
# return term.with_args(*args)
. Output only the next line. | terms = [Fluent.create_fluent(term, 0) for term in terms] |
Next line prediction: <|code_start|> def test_format(self):
"""Test number formatting function"""
tp = Template()
T = 12121414141232
G = 12121414141.235
M = 12121414
k = 12123.23
z = 12.25
m = 0.01212
u = 0.00001212
n = 0.00000001212
Ts = tp.format_field(T, '3.2h')
Gs = tp.format_field(G, '3.2h')
Ms = tp.format_field(M, '3.2h')
ks = tp.format_field(k, '3.2h')
zs = tp.format_field(z, '3.2h')
ms = tp.format_field(m, '3.2h')
us = tp.format_field(u, '3.2h')
ns = tp.format_field(n, '3.2h')
self.assertEqual(Ts, '12.12T')
self.assertEqual(Gs, '12.12G')
self.assertEqual(Ms, '12.12M')
self.assertEqual(ks, '12.12k')
self.assertEqual(zs, '12.25')
self.assertEqual(ms, '12.12m')
self.assertEqual(us, '12.12u')
self.assertEqual(ns, '12.12n')
def test_recast(self):
"""Test recasting string to float, int, date, or other"""
<|code_end|>
. Use current file imports:
(import unittest
import datetime
from snakemakelib.report.utils import Template, recast)
and context including class names, function names, or small code snippets from other files:
# Path: snakemakelib/report/utils.py
# class Template(string.Formatter):
# _suffix = {'-3':('n', 10**(-9)), '-2':('u', 10**(-6)), '-1':('m', 10**(-3)), '0':('', 1), '1':('k', 10**3), '2':('M', 10**6), '3':('G', 10**9), '4':('T', 10**12), '5':('P', 10**15)}
# def format_field(self, value, spec):
# sfx = ""
# if spec.endswith('h'):
# if not value == 0:
# spec = spec[:-1] + 'f'
# n = (math.floor(math.log(value,10) / 3))
# value = value / self._suffix[str(n)][1]
# sfx = self._suffix[str(n)][0]
# else:
# spec = 'd'
# return super(Template, self).format_field(value, spec) + sfx
#
# def recast(x, strpfmt="%b %d %H:%M:%S"):
# x = x.rstrip().lstrip()
# if re.match('^[0-9]+$', x):
# return int(x)
# elif re.match('^[0-9]+[,\.][0-9]+$', x):
# return float(x.replace(",", "."))
# elif re.search("%", x):
# return float(x.replace(",", ".").replace("%", ""))
# else:
# try:
# dateobj = datetime.strptime(x, strpfmt)
# return dateobj
# except:
# return x
. Output only the next line. | self.assertEqual(type(recast("1234")), int) |
Given the following code snippet before the placeholder: <|code_start|>''' Provides access to templates and css files '''
logger = logging.getLogger(__name__)
# Template path and templates
SmlTemplateEnv = Environment(loader = PackageLoader("snakemakelib", "_templates"))
SmlTemplateEnv.globals.update(zip=zip)
# Static css files
<|code_end|>
, predict the next line using imports from the current file:
import logging
from os.path import join
from jinja2 import Environment, PackageLoader
from snakemakelib.config import SNAKEMAKELIB_PATH
and context including class names, function names, and sometimes code from other files:
# Path: snakemakelib/config.py
# SNAKEMAKELIB_PATH = os.path.dirname(__file__)
. Output only the next line. | css_files = [join(SNAKEMAKELIB_PATH, 'static', 'basic.css')] |
Given the code snippet: <|code_start|># Copyright (C) 2014 by Per Unneberg
# pylint: disable=R0904, C0301, C0103
class TestUtils(unittest.TestCase):
"""Test snakemakelib.utils functions"""
def test_isoformat(self):
"""Test isoformatting function"""
s = "120924"
self.assertEqual(isoformat(s), "2012-09-24")
class TestStat(unittest.TestCase):
"""Test snakemakelib.stat functions"""
def test_is_installed(self):
"""Test function for checking that program is installed or that path exists"""
<|code_end|>
, generate the next line using the imports in this file:
import unittest
from snakemakelib.stat import is_installed
from snakemakelib.utils import isoformat
and context (functions, classes, or occasionally code) from other files:
# Path: snakemakelib/stat.py
# def is_installed(prog):
# if not shutil.which(prog) is None:
# return True
# if os.path.exists(prog):
# return True
# return False
#
# Path: snakemakelib/utils.py
# def isoformat(s=None):
# """Return isoformat date from string"""
# if s is None:
# return
# # Assume YYMMDD format
# if len(s) == 6:
# (YY, MM, DD) = (s[0:2], s[2:4], s[4:6])
# return date(int("20{YY}".format(YY=YY)), int(MM.lstrip("0")), int(DD)).isoformat()
. Output only the next line. | self.assertTrue (is_installed("ls")) |
Predict the next line for this snippet: <|code_start|># Copyright (C) 2014 by Per Unneberg
# pylint: disable=R0904, C0301, C0103
class TestUtils(unittest.TestCase):
"""Test snakemakelib.utils functions"""
def test_isoformat(self):
"""Test isoformatting function"""
s = "120924"
<|code_end|>
with the help of current file imports:
import unittest
from snakemakelib.stat import is_installed
from snakemakelib.utils import isoformat
and context from other files:
# Path: snakemakelib/stat.py
# def is_installed(prog):
# if not shutil.which(prog) is None:
# return True
# if os.path.exists(prog):
# return True
# return False
#
# Path: snakemakelib/utils.py
# def isoformat(s=None):
# """Return isoformat date from string"""
# if s is None:
# return
# # Assume YYMMDD format
# if len(s) == 6:
# (YY, MM, DD) = (s[0:2], s[2:4], s[4:6])
# return date(int("20{YY}".format(YY=YY)), int(MM.lstrip("0")), int(DD)).isoformat()
, which may contain function names, class names, or code. Output only the next line. | self.assertEqual(isoformat(s), "2012-09-24") |
Given snippet: <|code_start|># PM Platform model. Free-form text providing further details of the platform/technology used.
# PU Platform unit (e.g. flowcell-barcode.lane for Illumina or slide for SOLiD). Unique identifier.
# SM Sample. Use pool name where a pool is being sequenced.
class ReadGroup(RunRegexp):
"""Adds formatting function for generating read group option string"""
_group_keys = ['ID', 'CN', 'DS', 'DT', 'FO', 'KS',
'LB', 'PG', 'PI', 'PL', 'PU', 'SM']
_extra_keys = ['PATH']
_group_dict = {'ID': 'identifier', 'CN': 'center', 'DS': 'description',
'DT': 'date', 'FO': 'floworder', 'KS': 'keysequence',
'LB': 'library', 'PG': 'program', 'PI': 'insertsize',
'PL': 'platform', 'PU': 'platform-unit', 'SM': 'sample'}
def __init__(self, regexp=None, opt_prefix="--", *args, **kwargs):
super(ReadGroup, self).__init__(regexp, *args, **kwargs)
self._opt_prefix = opt_prefix
def _post_process_keys(self, m):
self['PATH'] = os.path.dirname(m.string)
if 'ID' not in self.keys() or not self.get('ID', ""):
# inv_map = {v: k for (k, v) in list(self.re.groupindex.items())}
self['ID'] = os.path.basename(self.fmt.format(**self))
def _fmt_string(self, k):
"""Take care of date string"""
if k == 'DT':
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
import os
from itertools import groupby
from snakemakelib.utils import isoformat
from snakemakelib.log import LoggerManager
and context:
# Path: snakemakelib/utils.py
# def isoformat(s=None):
# """Return isoformat date from string"""
# if s is None:
# return
# # Assume YYMMDD format
# if len(s) == 6:
# (YY, MM, DD) = (s[0:2], s[2:4], s[4:6])
# return date(int("20{YY}".format(YY=YY)), int(MM.lstrip("0")), int(DD)).isoformat()
#
# Path: snakemakelib/log.py
# class LoggerManager(object):
# __metaclass__ = Singleton
#
# _loggers = {}
# _fmt = "%(asctime)s (%(levelname)s) %(name)s : %(message)s"
# _ch = logging.StreamHandler()
# _formatter = logging.Formatter(_fmt)
# _ch.setFormatter(_formatter)
# _has_loaded_config = False
#
# def __init__(self, *args, **kwargs):
# if not LoggerManager._has_loaded_config:
# # Add snakemakelib root handler
# LoggerManager._loggers['snakemakelib'] = logging.getLogger('snakemakelib')
# LoggerManager._loggers['snakemakelib'].setLevel(logging.WARNING)
# LoggerManager._loggers['snakemakelib'].addHandler(LoggerManager._ch)
# self._load_config()
# LoggerManager._has_loaded_config = True
#
# def _load_config(self):
# conf = {}
# if os.path.exists("logconf.yaml"):
# with open ("logconf.yaml", "r") as fh:
# conf = yaml.load(fh)
# else:
# conf = self._load_sml_config()
# if conf:
# logging.config.dictConfig(conf)
#
# def _load_sml_config(self):
# cfg = {}
# for fn in [os.path.join(os.getenv("HOME"), ".smlconf.yaml"),
# os.path.join(os.curdir, "smlconf.yaml")]:
# if (fn is None):
# continue
# if not os.path.exists(fn):
# continue
# with open(fn, "r") as fh:
# cfg_tmp = yaml.load(fh)
# if cfg_tmp:
# cfg.update(cfg_tmp)
# return cfg.get("logging", {})
#
# @staticmethod
# def getLogger(name=None):
# if not name:
# smllogger = logging.getLogger()
# return logging.getLogger()
# elif name not in LoggerManager._loggers.keys():
# LoggerManager._loggers[name] = logging.getLogger(str(name))
# LoggerManager._loggers[name].addHandler(LoggerManager._ch)
# return LoggerManager._loggers[name]
# else:
# logging.warn("Trying to get already existing logger")
which might include code, classes, or functions. Output only the next line. | return isoformat(self[k]) |
Continue the code snippet: <|code_start|> """
if data is None:
return ""
else:
tab_tt = tt.Texttable()
tab_tt.set_precision(2)
if not header is None:
data[0] = header
w = [len(c) + 2 for c in data[0]]
for r in data:
for i in range(0, len(r)):
w[i] = max(w[i], len(r[i]) + 2)
tab_tt.add_rows(data)
tab_tt.set_cols_width(w)
tab_tt.set_cols_align("r" * len(data[0]))
if indent:
return _indent_texttable_for_rst(tab_tt)
else:
return tab_tt.draw()
class DataFrame(object):
"""Light weight data frame object
A data frame is represented as an OrderedDict.
Args:
*args: if provided, must be a list of lists that upon initialization is converted to an OrderedDict. The first list is treated as a header.
"""
_format = collections.OrderedDict()
<|code_end|>
. Use current file imports:
import os
import sys
import re
import csv
import texttable as tt
import collections
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from snakemakelib.report.utils import Template
from pylab import *
and context (classes, functions, or code) from other files:
# Path: snakemakelib/report/utils.py
# class Template(string.Formatter):
# _suffix = {'-3':('n', 10**(-9)), '-2':('u', 10**(-6)), '-1':('m', 10**(-3)), '0':('', 1), '1':('k', 10**3), '2':('M', 10**6), '3':('G', 10**9), '4':('T', 10**12), '5':('P', 10**15)}
# def format_field(self, value, spec):
# sfx = ""
# if spec.endswith('h'):
# if not value == 0:
# spec = spec[:-1] + 'f'
# n = (math.floor(math.log(value,10) / 3))
# value = value / self._suffix[str(n)][1]
# sfx = self._suffix[str(n)][0]
# else:
# spec = 'd'
# return super(Template, self).format_field(value, spec) + sfx
. Output only the next line. | _tp = Template() |
Given the code snippet: <|code_start|> Number of input reads | 4699845
Average input read length | 202
UNIQUE READS:
Uniquely mapped reads number | 4011114
Uniquely mapped reads % | 85.35%
Average mapped length | 198.26
Number of splices: Total | 1452777
Number of splices: Annotated (sjdb) | 1424534
Number of splices: GT/AG | 1429760
Number of splices: GC/AG | 12299
Number of splices: AT/AC | 1528
Number of splices: Non-canonical | 9190
Mismatch rate per base, % | 0.70%
Deletion rate per base | 0.02%
Deletion average length | 1.76
Insertion rate per base | 0.01%
Insertion average length | 1.46
MULTI-MAPPING READS:
Number of reads mapped to multiple loci | 267393
% of reads mapped to multiple loci | 5.69%
Number of reads mapped to too many loci | 7530
% of reads mapped to too many loci | 0.16%
UNMAPPED READS:
% of reads unmapped: too many mismatches | 0.00%
% of reads unmapped: too short | 8.73%
% of reads unmapped: other | 0.08%
"""
self.f = io.StringIO(self.data)
def test_collect_results(self):
<|code_end|>
, generate the next line using the imports in this file:
import unittest
import io
from snakemakelib.bio.ngs.align.star import Star
and context (functions, classes, or occasionally code) from other files:
# Path: snakemakelib/bio/ngs/align/star.py
# class Star(Results):
# """Star: container class for star results"""
# _keys = ['align']
#
# def __init__(self, *args, **kw):
# self['align'] = None
# super(Star, self).__init__(*args, **kw)
#
# def _collect_results(self):
# smllogger.info("collecting results")
# df = None
# for (f, s) in zip(self._inputfiles, self._samples):
# smllogger.debug("Reading input file {f} for sample {s}".format(f=f, s=s))
# df_tmp = pd.read_table(f, sep="|",
# names=["name", "value"],
# engine="python", skiprows=[7, 22, 27])
# d = {trim_header(x, underscore=True, percent=True): recast(y)
# for (x, y) in zip(df_tmp["name"], df_tmp["value"])}
# if df is None:
# df = pd.DataFrame(data=d, index=pd.Index([s], name="Sample"))
# else:
# df = df.append(pd.DataFrame(data=d, index=pd.Index([s], name="Sample")))
# df['mismatch_sum'] = df['Mismatch_rate_per_base__PCT'] +\
# df['Deletion_rate_per_base'] + df['Insertion_rate_per_base']
# df['PCT_of_reads_unmapped'] = df['PCT_of_reads_unmapped:_other'] +\
# df['PCT_of_reads_unmapped:_too_many_mismatches'] +\
# df['PCT_of_reads_unmapped:_too_short']
# self['align'] = df
. Output only the next line. | st = Star([(self.f, 'bar')]) |
Using the snippet: <|code_start|># Copyright (C) 2015 by Per Unneberg
# pylint: disable=R0904
logging.basicConfig(level=logging.DEBUG)
class TestRules(unittest.TestCase):
"""Test rules"""
def setUp(self):
self.workflow = Workflow("foo")
name = self.workflow.add_rule(name="bar")
self.workflow._rules["bar"].set_output(*(), **{"foo":"bar"})
self.workflow._rules["bar"].set_params(*(), **{"cmd":"foo", "options":["foo", "bar"]})
def test_create_rule(self):
self.assertListEqual(["bar"], [x.name for x in self.workflow.rules])
<|code_end|>
, determine the next line of code. You have imports:
import os
import unittest
import logging
from nose.tools import raises
from snakemake.workflow import Workflow
from snakemake.exceptions import UnknownRuleException, NoRulesException
from snakemakelib.rules import create_rule_from_existing
and context (class names, function names, or code) available:
# Path: snakemakelib/rules.py
# def create_rule_from_existing(name, template, workflow, **kw):
# """Create rule from a template.
#
# Create rule from existing rule and add it to the workflow. By
# passing keyword arguments it is also possible to update/modify the
# input, output and/or params.
#
# Args:
# name (str): name of new rule
# template (str): name of existing template rule
# workflow (:class:`Workflow <snakemake.workflow.Workflow>`): snakemake workflow
# kw (dict): keyword argument for updating input, output and/or params
#
# Returns:
# None
# """
# assert type(workflow) is Workflow, "workflow is not a Workflow: {}".format(workflow)
# try:
# rule = copy.copy(workflow.get_rule(template))
# rule.name = name
# except:
# smllogger.warn("no such template rule '{}'; make sure you have included the template rule file".format(template))
# raise
# workflow.add_rule(name=rule.name)
# workflow._rules[name] = rule
# if kw.get('output'):
# assert type(kw['output']) is tuple, "output argument must be a tuple of type (tuple, dict)"
# rule._output = OutputFiles()
# workflow._rules[name].set_output(*kw['output'][0], **kw['output'][1])
# if kw.get('input'):
# assert type(kw['input']) is tuple, "input argument must be a tuple of type (tuple, dict)"
# workflow._rules[name].set_input(*kw['input'][0], **kw['input'][1])
# if kw.get('params'):
# assert type(kw['params']) is tuple, "params argument must be a tuple of type (tuple, dict)"
# workflow._rules[name].set_params(*kw['params'][0], **kw['params'][1])
. Output only the next line. | create_rule_from_existing(name="foo", template="bar", workflow=self.workflow) |
Given the following code snippet before the placeholder: <|code_start|># Copyright (C) 2015 by Per Unneberg
# pylint: disable=R0904, C0301, C0103
class TestFindFiles(unittest.TestCase):
def setUp(self):
"""Setup text fixtures"""
self.walk = [
[os.curdir, ['foo', 'bar'], ['1_121023_FLOWCELL_FOO.fastq.gz', 'bar.txt']],
['./foo', [], ['foo.txt', '1_121023_FLOWCELL_BAR.fastq.gz']],
['./bar', [], ['bar.txt']],
]
@patch('snakemakelib.bio.ngs.utils.os.walk')
def test_find_fastq_files(self, mock_walk):
"""Find fastq files using match"""
mock_walk.return_value = self.walk
<|code_end|>
, predict the next line using imports from the current file:
import unittest
import os
from unittest.mock import patch
from snakemakelib.bio.ngs.utils import find_files
and context including class names, function names, and sometimes code from other files:
# Path: snakemakelib/bio/ngs/utils.py
# def find_files(regexp, path=os.curdir, search=False, limit=None):
# """Find files in path that comply with a regular expression.
#
# Args:
# regexp (RegexpDict | str): regular expression object of class
# <RegexpDict> or <str>
# path (str): path to search
# search (bool): use re.search instead of re.match for pattern matching
# limit (dict): dictionary where keys correspond to regular expression
# grouping labels and values are lists that limit the
# returned pattern
#
# Returns:
# flist: list of file names, prepended with root path
# """
# if isinstance(regexp, RegexpDict):
# r = regexp.re
# else:
# if not regexp:
# return []
# r = re.compile(regexp)
# if not limit is None and any(k not in r.groupindex.keys() for k in limit.keys()):
# smllogger.warning("""Some limit keys '{}' not in regexp
# groupindex '{}'; disregarding limit option for these
# keys""".format(list(limit.keys()), list(r.groupindex.keys())))
# re_fn = r.search if search else r.match
# flist = []
# for root, dirs, files in os.walk(path):
# for x in files:
# m = re_fn(x)
# if m is None:
# continue
# if limit:
# if any([m.group(k) in limit[k] for k in limit.keys() if k in m.groupdict().keys()]):
# flist += [os.path.join(root, x)]
# else:
# flist += [os.path.join(root, x)]
# return sorted(flist)
. Output only the next line. | f = find_files(regexp="\w+.fastq.gz") |
Based on the snippet: <|code_start|>def set_cmd(home, cmd, module):
"""Set the command, checking if the program is installed in the
process.
Args:
home (str): path to application
cmd (str): the actual command name
module (str): the calling module
Returns:
str: full path to the command
Example:
cmd = set_cmd("/path/to/cmd", "helloworld")
print(cmd)
# prints /path/to/cmd/helloworld
Raises:
NotInstalledError: if program not found in path
"""
if home:
cmd = os.path.join(home, cmd)
else:
try:
cmd = shutil.which(cmd)
except:
pass
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import shutil
from datetime import datetime, date
from snakemakelib.log import LoggerManager
from snakemakelib.stat import is_installed
and context (classes, functions, sometimes code) from other files:
# Path: snakemakelib/log.py
# class LoggerManager(object):
# __metaclass__ = Singleton
#
# _loggers = {}
# _fmt = "%(asctime)s (%(levelname)s) %(name)s : %(message)s"
# _ch = logging.StreamHandler()
# _formatter = logging.Formatter(_fmt)
# _ch.setFormatter(_formatter)
# _has_loaded_config = False
#
# def __init__(self, *args, **kwargs):
# if not LoggerManager._has_loaded_config:
# # Add snakemakelib root handler
# LoggerManager._loggers['snakemakelib'] = logging.getLogger('snakemakelib')
# LoggerManager._loggers['snakemakelib'].setLevel(logging.WARNING)
# LoggerManager._loggers['snakemakelib'].addHandler(LoggerManager._ch)
# self._load_config()
# LoggerManager._has_loaded_config = True
#
# def _load_config(self):
# conf = {}
# if os.path.exists("logconf.yaml"):
# with open ("logconf.yaml", "r") as fh:
# conf = yaml.load(fh)
# else:
# conf = self._load_sml_config()
# if conf:
# logging.config.dictConfig(conf)
#
# def _load_sml_config(self):
# cfg = {}
# for fn in [os.path.join(os.getenv("HOME"), ".smlconf.yaml"),
# os.path.join(os.curdir, "smlconf.yaml")]:
# if (fn is None):
# continue
# if not os.path.exists(fn):
# continue
# with open(fn, "r") as fh:
# cfg_tmp = yaml.load(fh)
# if cfg_tmp:
# cfg.update(cfg_tmp)
# return cfg.get("logging", {})
#
# @staticmethod
# def getLogger(name=None):
# if not name:
# smllogger = logging.getLogger()
# return logging.getLogger()
# elif name not in LoggerManager._loggers.keys():
# LoggerManager._loggers[name] = logging.getLogger(str(name))
# LoggerManager._loggers[name].addHandler(LoggerManager._ch)
# return LoggerManager._loggers[name]
# else:
# logging.warn("Trying to get already existing logger")
#
# Path: snakemakelib/stat.py
# def is_installed(prog):
# if not shutil.which(prog) is None:
# return True
# if os.path.exists(prog):
# return True
# return False
. Output only the next line. | if not is_installed(cmd): |
Here is a snippet: <|code_start|># Copyright (C) 2015 by Per Unneberg
# pylint: disable=R0904
logger = logging.getLogger(__name__)
class TestTuxedoReadGroup(unittest.TestCase):
"""Test TuxedoReadGroup class"""
def test_rg_init(self):
"""Test initializing TuxedoReadGroup"""
<|code_end|>
. Write the next line using the current file imports:
import unittest
import logging
from snakemakelib.bio.ngs.rnaseq.tuxedo import TuxedoReadGroup
and context from other files:
# Path: snakemakelib/bio/ngs/rnaseq/tuxedo.py
# class TuxedoReadGroup(ReadGroup):
# _group_dict = {'ID' : 'id', 'CN' : 'center',
# 'DS' : 'description', 'DT' : 'date',
# 'FO' : 'floworder', 'KS' : 'keysequence',
# 'LB' : 'library', 'PG' : 'program',
# 'PI' : 'insertsize', 'PL': 'platform',
# 'PU' : 'platform-unit', 'SM' : 'sample'}
#
# def __init__(self, opt_prefix="--rg-", *args, **kwargs):
# ReadGroup.__init__(self, *args, **kwargs)
# self._opt_prefix = opt_prefix
, which may include functions, classes, or code. Output only the next line. | rg = TuxedoReadGroup(regexp="test", ID='test', DT="120924") |
Using the snippet: <|code_start|># Copyright (C) 2015 by Per Unneberg
# pylint: disable=R0904
logger = logging.getLogger(__name__)
class TestSraTools(unittest.TestCase):
"""Test sratools functionality"""
def setUp(self):
self.metadata = io.StringIO("\n".join(["SampleName,Run",
"Sample1,Run1S1",
"Sample1,Run2S1",
"Sample2,Run1S2"]))
@raises(Exception)
def test_register_metadata(self):
"""Test registering metadata for non-existent file"""
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import logging
import io
import mock
from nose.tools import raises
from snakemakelib.bio.ngs.tools.sratools import register_metadata
and context (class names, function names, or code) available:
# Path: snakemakelib/bio/ngs/tools/sratools.py
# def register_metadata(metadata_file, config):
# """Read an SRA project file and register metadata in sml_config. Will
# issue a warning if file does not exists.
#
# Args:
# metadata - file name
# config - configuration to update
# """
# metadata_list = []
# import sys
# if metadata_file in sys.argv:
# return config
# try:
# with open(metadata_file, "r") as fh:
# reader = csv.DictReader(fh.readlines())
# metadata_list = [row for row in reader]
# run2sample = {row["Run"]:row["SampleName"] for row in metadata_list}
# config_default = {
# 'bio.ngs.settings' : {
# 'sampleinfo' : metadata_file
# },
# 'bio.ngs.tools.sratools': {
# '_datadir': os.path.dirname(metadata_file),
# '_run2sample' : run2sample,
# '_metadata' : metadata_list
# },
# }
# update_config(config_default, config)
# config = config_default
#
# except Exception:
# raise Exception("""
#
# no metadata file '{metadata}' found
#
# please initiate analysis by running 'snakemake {metadata}'
#
# """.format(metadata=metadata_file))
# return config
. Output only the next line. | register_metadata("foo.csv", config = {}) |
Predict the next line after this snippet: <|code_start|># Copyright (C) 2015 by Per Unneberg
# pylint: disable=R0904, C0301, C0103
class TestQualimap(unittest.TestCase):
"""Test Qualimap"""
def setUp(self):
self.data = ['>>>>>>> Globals\n',
'number of windows = 10\n',
'number of reads = 10,000,000\n',
'number of mapped reads = 9,900,000 (99.00%)\n',
'number of duplicated reads = 400,000\n\n',
'>>>>>>> Insert size\n',
'>>>>>>> Coverage per contig\n',
'\n',
'\t'.join(['foo', '11', '12', '1.1', '1.2\n']),
'\t'.join(['bar', '21', '22', '2.1', '2.2\n'])]
@patch('snakemakelib.results.Results.load_lines')
def test_qualimap_coverage(self, mock_load_lines):
mock_load_lines.return_value = self.data
<|code_end|>
using the current file's imports:
import unittest
import pandas as pd
from unittest.mock import patch
from snakemakelib.bio.ngs.qc.qualimap import Qualimap
and any relevant context from other files:
# Path: snakemakelib/bio/ngs/qc/qualimap.py
# class Qualimap(Results):
# _keys = ['globals', 'coverage_per_contig']
#
# def __init__(self, *args, **kw):
# super(Qualimap, self).__init__(*args, **kw)
#
# def _collect_globals(self, data, first, sample):
# df_tmp = self.parse_data(data,
# rs=("Globals", "Insert"),
# skip=1, split=True,
# columns=GLOBALS_COLUMNS,
# dtype=float, sep=" = ")
# df_tmp['value'] = [float(x.split(" ")[0].replace(",", ""))
# for x in df_tmp['value']]
# df_tmp['Sample'] = sample
# try:
# if first:
# self['globals'] = df_tmp
# else:
# self['globals'] = self['globals'].append(df_tmp, ignore_index=True)
# except:
# smllogger.warn("failed to append data to globals dataframe")
#
# def _collect_coverage_per_contig(self, data, first, sample):
# df_tmp = self.parse_data(data,
# rs=("Coverage per contig", None),
# skip=2, split=True,
# columns=COVERAGE_PER_CONTIG_COLUMNS,
# dtype=float)
# df_tmp["Sample"] = sample
# try:
# df_tmp['chrlen_percent'] = 100 * df_tmp['chrlen'] /\
# sum(df_tmp['chrlen'])
# df_tmp['mapped_bases_percent'] = 100 * df_tmp['mapped_bases'] /\
# sum(df_tmp['mapped_bases'])
# except:
# smllogger.warn("coverage_per_contig: failed to normalize data")
# try:
# if first:
# self['coverage_per_contig'] = df_tmp
# else:
# self['coverage_per_contig'] = self['coverage_per_contig'].append(
# df_tmp, ignore_index=True)
# except:
# smllogger.warn("failed to append data to coverage_per_contig dataframe")
#
# def _collect_results(self):
# smllogger.info("Collecting results")
# first = True
# for (f, s) in zip(self._inputfiles, self._samples):
# smllogger.debug("Reading input file {f} for sample {s}".format(f=f, s=s))
# data = self.load_lines(f)
# self._collect_globals(data, first, s)
# self._collect_coverage_per_contig(data, first, s)
# first = False
# if self['globals'] is not None:
# self['globals'] = self['globals'].pivot(
# index='Sample', columns='name', values='value')
# self['globals']['number of unique reads'] = self['globals']['number of mapped reads']\
# - self['globals']['number of duplicated reads']
. Output only the next line. | qm = Qualimap([('foo', 'bar')]) |
Given the code snippet: <|code_start|> assert results == "pika/plop"
def test_007(finder):
results = finder.get_relative_from_paths("/home/foo/pika/plop", [
"/etc",
"/home/foo/pika",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_008(finder):
results = finder.get_relative_from_paths("/home/foo/pika/bim/bam/plop", [
"/etc",
"/home/foo/pika/bim/bam",
"/home/foo/pika/bim/bom",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_009(finder):
"""
Unable to find relative path raise an exception
"""
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from boussole.exceptions import FinderException
and context (functions, classes, or occasionally code) from other files:
# Path: boussole/exceptions.py
# class FinderException(BoussoleBaseException):
# """
# Exception to be raised when error occurs with finder usage.
# """
# pass
. Output only the next line. | with pytest.raises(FinderException): |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
def test_001_success(settings):
"""
Validate required value on dummy value
"""
processor = SettingsPostProcessor()
result = processor._validate_required({}, "DUMMY_NAME", "foo")
assert result == "foo"
def test_002_fail(settings):
"""
Validate existing file on empty value
"""
processor = SettingsPostProcessor()
<|code_end|>
. Use current file imports:
import pytest
from boussole.exceptions import SettingsInvalidError
from boussole.conf.post_processor import SettingsPostProcessor
and context (classes, functions, or code) from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# Path: boussole/conf/post_processor.py
# class SettingsPostProcessor(object):
# """
# Mixin object for all available post processing methods to use in
# settings manifest (default manifest comes from ``SETTINGS_MANIFEST``).
# """
# settings_manifesto = SETTINGS_MANIFEST
# projectdir = ""
#
# def post_process(self, settings):
# """
# Perform post processing methods on settings according to their
# definition in manifest.
#
# Post process methods are implemented in their own method that have the
# same signature:
#
# * Get arguments: Current settings, item name and item value;
# * Return item value possibly patched;
#
# Args:
# settings (dict): Loaded settings.
#
# Returns:
# dict: Settings object possibly modified (depending from applied
# post processing).
#
# """
# for k in settings:
# # Search for post process rules for setting in manifest
# if (
# k in self.settings_manifesto and
# self.settings_manifesto[k].get("postprocess", None) is not None
# ):
# rules = self.settings_manifesto[k]["postprocess"]
#
# # Chain post process rules from each setting
# for method_name in rules:
# settings[k] = getattr(self, method_name)(
# settings,
# k,
# settings[k]
# )
#
# return settings
#
# def _patch_expand_path(self, settings, name, value):
# """
# Patch a path to expand home directory and make absolute path.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Path to patch.
#
# Returns:
# str: Patched path to an absolute path.
#
# """
# if os.path.isabs(value):
# return os.path.normpath(value)
#
# # Expand home directory if any
# value = os.path.expanduser(value)
#
# # If the path is not yet an absolute directory, make it so from base
# # directory if not empty
# if not os.path.isabs(value) and self.projectdir:
# value = os.path.join(self.projectdir, value)
#
# return os.path.normpath(value)
#
# def _patch_expand_paths(self, settings, name, value):
# """
# Apply ``SettingsPostProcessor._patch_expand_path`` to each element in
# list.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (list): List of paths to patch.
#
# Returns:
# list: Patched path list to an absolute path.
#
# """
# return [self._patch_expand_path(settings, name, item)
# for item in value]
#
# def _validate_path(self, settings, name, value):
# """
# Validate path exists
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Path to validate.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: If path does not exists.
#
# Returns:
# str: Validated path.
#
# """
# if not os.path.exists(value):
# msg = "Path from setting '{name}' does not exists: {value}"
# raise SettingsInvalidError(msg.format(name=name, value=value))
#
# return value
#
# def _validate_paths(self, settings, name, value):
# """
# Apply ``SettingsPostProcessor._validate_path`` to each element in
# list.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (list): List of paths to patch.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: Once a path does not
# exists.
#
# Returns:
# list: Validated paths.
#
# """
# return [self._validate_path(settings, name, item)
# for item in value]
#
# def _validate_required(self, settings, name, value):
# """
# Validate a required setting (value can not be empty)
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Required value to validate.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: If value is empty.
#
# Returns:
# str: Validated value.
#
# """
# if not value:
# msg = "Required value from setting '{name}' must not be empty."
# raise SettingsInvalidError(msg.format(name=name))
#
# return value
. Output only the next line. | with pytest.raises(SettingsInvalidError): |
Predict the next line for this snippet: <|code_start|> os.path.join(settings.lib2_path, 'addons/_some_addon.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'components/_webfont.scss'),
os.path.join(settings.lib1_path, 'library_1_fullstack.scss'),
]
def test_commented(settings, parser, resolver):
"""Resolve paths from sample with comments"""
sourcepath = os.path.join(settings.sample_path, 'main_commented.scss')
with open(sourcepath) as fp:
finded_paths = parser.parse(fp.read())
resolved_paths = resolver.resolve(sourcepath, finded_paths)
assert resolved_paths == [
os.path.join(settings.sample_path, '_vendor.scss'),
os.path.join(settings.sample_path, 'components/_filename_test_1.scss'),
os.path.join(settings.sample_path, '_empty.scss'),
os.path.join(settings.sample_path, 'components/_webfont.scss'),
os.path.join(settings.sample_path, 'components/_filename_test_2.scss'),
]
def test_error_unresolvable(settings, parser, resolver):
"""Exception on wrong import path"""
sourcepath = os.path.join(settings.sample_path, 'main_error.scss')
with open(sourcepath) as fp:
finded_paths = parser.parse(fp.read())
<|code_end|>
with the help of current file imports:
import os
import pytest
from boussole.exceptions import UnresolvablePath
from boussole.exceptions import UnclearResolution
and context from other files:
# Path: boussole/exceptions.py
# class UnresolvablePath(BoussoleBaseException):
# """
# Exception to be raised when the resolver can not resolve a given path.
# """
# pass
#
# Path: boussole/exceptions.py
# class UnclearResolution(BoussoleBaseException):
# """
# Exception to be raised when the resolver encounts multiple existing
# candidates for a path.
# """
# pass
, which may contain function names, class names, or code. Output only the next line. | with pytest.raises(UnresolvablePath): |
Based on the snippet: <|code_start|>
assert resolved_paths == [
os.path.join(settings.sample_path, '_vendor.scss'),
os.path.join(settings.sample_path, 'components/_filename_test_1.scss'),
os.path.join(settings.sample_path, '_empty.scss'),
os.path.join(settings.sample_path, 'components/_webfont.scss'),
os.path.join(settings.sample_path, 'components/_filename_test_2.scss'),
]
def test_error_unresolvable(settings, parser, resolver):
"""Exception on wrong import path"""
sourcepath = os.path.join(settings.sample_path, 'main_error.scss')
with open(sourcepath) as fp:
finded_paths = parser.parse(fp.read())
with pytest.raises(UnresolvablePath):
resolver.resolve(
sourcepath,
finded_paths,
library_paths=settings.libraries_fixture_paths
)
def test_error_unclear_001(settings, parser, resolver):
"""Check candidates on unclear resolution"""
sourcepath = os.path.join(settings.sample_path, 'main_twins_1.scss')
with open(sourcepath) as fp:
finded_paths = parser.parse(fp.read())
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import pytest
from boussole.exceptions import UnresolvablePath
from boussole.exceptions import UnclearResolution
and context (classes, functions, sometimes code) from other files:
# Path: boussole/exceptions.py
# class UnresolvablePath(BoussoleBaseException):
# """
# Exception to be raised when the resolver can not resolve a given path.
# """
# pass
#
# Path: boussole/exceptions.py
# class UnclearResolution(BoussoleBaseException):
# """
# Exception to be raised when the resolver encounts multiple existing
# candidates for a path.
# """
# pass
. Output only the next line. | with pytest.raises(UnclearResolution): |
Given the following code snippet before the placeholder: <|code_start|> "foo.scss",
"bar/*.scss",
"*.scss",
"*.css",
],
False
),
(
"pika/bar/foo.sass",
[
"foo.sass",
"bar/*.sass",
"*.sass",
"*.css",
],
False
),
])
def test_allowed_001(settings, finder, filepath, excludes, expected):
"""
Allowed simple filename
"""
allowed = finder.is_allowed(filepath, excludes=excludes)
assert expected == allowed
def test_allowed_exception_201(settings, finder):
"""
Absolute path raise an exception
"""
<|code_end|>
, predict the next line using imports from the current file:
import pytest
from boussole.exceptions import FinderException
and context including class names, function names, and sometimes code from other files:
# Path: boussole/exceptions.py
# class FinderException(BoussoleBaseException):
# """
# Exception to be raised when error occurs with finder usage.
# """
# pass
. Output only the next line. | with pytest.raises(FinderException): |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
# Backend default filename shortcuts
YAML_FILENAME = SettingsBackendYaml._default_filename
JSON_FILENAME = SettingsBackendJson._default_filename
@pytest.mark.parametrize("options,filename", [
([], JSON_FILENAME),
(["--backend=yaml"], YAML_FILENAME),
(["--backend=json"], JSON_FILENAME),
])
def test_error_verbosity_001(caplog, options, filename):
"""
Testing default verbosity (aka INFO level) on setting error with
different backends
"""
runner = CliRunner()
# Temporary isolated current dir
with runner.isolated_filesystem():
test_cwd = os.getcwd()
# Default verbosity
<|code_end|>
with the help of current file imports:
import json
import os
import pyaml
import pytest
from click.testing import CliRunner
from boussole.cli.console_script import cli_frontend
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
and context from other files:
# Path: boussole/cli/console_script.py
# @click.group(context_settings=CONTEXT_SETTINGS)
# @click.option(
# "-v",
# "--verbose",
# type=click.IntRange(min=0, max=5),
# default=4,
# metavar="INTEGER",
# help=(
# "An integer between 0 and 5, where '0' make a totaly "
# "silent output and '5' set level to DEBUG (the most verbose "
# "level). Default to '4' (Info level)."
# )
# )
# @click.pass_context
# def cli_frontend(ctx, verbose):
# """
# Boussole is a commandline interface to build Sass projects using libsass.
#
# Every project will need a settings file containing all needed settings to
# build it.
# """
# printout = True
# if verbose == 0:
# verbose = 1
# printout = False
#
# # Verbosity is the inverse of logging levels
# levels = [item for item in BOUSSOLE_LOGGER_CONF]
# levels.reverse()
# # Init the logger config
# root_logger = init_logger(levels[verbose], printout=printout)
#
# # Init the default context that will be passed to commands
# ctx.obj = {
# "verbosity": verbose,
# "logger": root_logger,
# }
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
, which may contain function names, class names, or code. Output only the next line. | result = runner.invoke(cli_frontend, ["compile"]+options) |
Predict the next line for this snippet: <|code_start|> # absolute filepath
(
{"basedir": "/home/no/pasaran"},
{"filepath": "/home/bart/www/boussole_custom.txt"},
("/home/bart/www", "boussole_custom.txt"),
),
# filename and filled basedir, need normalize
(
{"basedir": "/home/bart/www"},
{"filepath": "./boussole_custom.txt"},
("/home/bart/www", "boussole_custom.txt"),
),
# filename and filled basedir, need normalize
(
{"basedir": "/home/bart/www"},
{"filepath": "../boussole_custom.txt"},
("/home/bart", "boussole_custom.txt"),
),
# filename and empty basedir, normalize can't do anything
(
{},
{"filepath": "../boussole_custom.txt"},
("..", "boussole_custom.txt"),
),
])
def test_base_parsepath(settings, sample_project_settings, settings_kwargs,
parse_kwargs, expected):
"""
no path given and with empty basedir
"""
<|code_end|>
with the help of current file imports:
import pytest
from boussole.conf.base_backend import SettingsBackendBase
and context from other files:
# Path: boussole/conf/base_backend.py
# class SettingsBackendBase(SettingsPostProcessor):
# """
# Base project settings backend
#
# Args:
# basedir (str): Directory path where to search for settings filepath.
#
# Default is empty, meaning it will resolve path from current
# directory. Don't use an empty ``basedir`` attribute to load
# settings from non-absolute filepath.
#
# Given value will fill intial value for ``projectdir`` attribute.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.txt``.
# _kind_name: Backend format name.
# Value is ``txt``.
# _file_extension: Default filename extension.
# Value is ``txt``.
# """
# _default_filename = "boussole.txt"
# _kind_name = "txt"
# _file_extension = "txt"
#
# def __init__(self, basedir=None):
# self.basedir = basedir or ""
# self.projectdir = self.basedir
#
# def parse_filepath(self, filepath=None):
# """
# Parse given filepath to split possible path directory from filename.
#
# * If path directory is empty, will use ``basedir`` attribute as base
# filepath;
# * If path directory is absolute, ignore ``basedir`` attribute;
# * If path directory is relative, join it to ``basedir`` attribute;
#
# Keyword Arguments:
# filepath (str): Filepath to use to search for settings file. Will
# use value from ``_default_filename`` class attribute if empty.
#
# If filepath contain a directory path, it will be splitted from
# filename and used as base directory (and update object
# ``basedir`` attribute).
#
# Returns:
# tuple: Separated path directory and filename.
# """
# filepath = filepath or self._default_filename
#
# path, filename = os.path.split(filepath)
#
# if not path:
# path = self.basedir
# elif not os.path.isabs(path):
# path = os.path.join(self.basedir, path)
#
# return os.path.normpath(path), filename
#
# def check_filepath(self, path, filename):
# """
# Check and return the final filepath to settings
#
# Args:
# path (str): Directory path where to search for settings file.
# filename (str): Filename to use to search for settings file.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If determined filepath
# does not exists or is a directory.
#
# Returns:
# string: Settings file path, joining given path and filename.
#
# """
# settings_path = os.path.join(path, filename)
#
# if not os.path.exists(settings_path) or \
# not os.path.isfile(settings_path):
# msg = "Unable to find settings file: {}"
# raise SettingsBackendError(msg.format(settings_path))
#
# return settings_path
#
# def open(self, filepath):
# """
# Open settings backend to return its content
#
# Args:
# filepath (str): Settings object, depends from backend
#
# Returns:
# string: File content.
#
# """
# with io.open(filepath, "r", encoding="utf-8") as fp:
# content = fp.read()
# return content
#
# def parse(self, filepath, content):
# """
# Load and parse opened settings content.
#
# Base method do nothing because parsing is dependent from backend.
#
# Args:
# filepath (str): Settings file location.
# content (str): Settings content from opened file, depends from
# backend.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def dump(self, content, filepath):
# """
# Dump settings content to filepath.
#
# Base method do nothing because dumping is dependent from backend.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def clean(self, settings):
# """
# Clean given settings for backend needs.
#
# Default backend only apply available post processor methods.
#
# Args:
# dict: Loaded settings.
#
# Returns:
# dict: Settings object cleaned.
#
# """
# return self.post_process(settings)
#
# def load(self, filepath=None):
# """
# Load settings file from given path and optionnal filepath.
#
# During path resolving, the ``projectdir`` is updated to the file path
# directory.
#
# Keyword Arguments:
# filepath (str): Filepath to the settings file.
#
# Returns:
# boussole.conf.model.Settings: Settings object with loaded options.
#
# """
# self.projectdir, filename = self.parse_filepath(filepath)
#
# settings_path = self.check_filepath(self.projectdir, filename)
#
# parsed = self.parse(settings_path, self.open(settings_path))
#
# settings = self.clean(parsed)
#
# return Settings(initial=settings)
, which may contain function names, class names, or code. Output only the next line. | backend = SettingsBackendBase(**settings_kwargs) |
Given the following code snippet before the placeholder: <|code_start|>
def test_001_success(settings, temp_builds_dir):
"""
Validate existing file path
"""
basedir = temp_builds_dir.join("postprocessor_validate_path_001")
os.makedirs(basedir.strpath)
processor = SettingsPostProcessor()
foo = basedir.join("foo.txt")
foo.write("Hello world!")
result = processor._validate_path({}, "DUMMY_NAME", foo.strpath)
assert result == foo.strpath
def test_002_exception(settings, temp_builds_dir):
"""
Validate not existing file path
"""
basedir = temp_builds_dir.join("postprocessor_validate_path_002")
os.makedirs(basedir.strpath)
processor = SettingsPostProcessor()
foo = basedir.join("foo.txt")
<|code_end|>
, predict the next line using imports from the current file:
import os
import pytest
from boussole.exceptions import SettingsInvalidError
from boussole.conf.post_processor import SettingsPostProcessor
and context including class names, function names, and sometimes code from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# Path: boussole/conf/post_processor.py
# class SettingsPostProcessor(object):
# """
# Mixin object for all available post processing methods to use in
# settings manifest (default manifest comes from ``SETTINGS_MANIFEST``).
# """
# settings_manifesto = SETTINGS_MANIFEST
# projectdir = ""
#
# def post_process(self, settings):
# """
# Perform post processing methods on settings according to their
# definition in manifest.
#
# Post process methods are implemented in their own method that have the
# same signature:
#
# * Get arguments: Current settings, item name and item value;
# * Return item value possibly patched;
#
# Args:
# settings (dict): Loaded settings.
#
# Returns:
# dict: Settings object possibly modified (depending from applied
# post processing).
#
# """
# for k in settings:
# # Search for post process rules for setting in manifest
# if (
# k in self.settings_manifesto and
# self.settings_manifesto[k].get("postprocess", None) is not None
# ):
# rules = self.settings_manifesto[k]["postprocess"]
#
# # Chain post process rules from each setting
# for method_name in rules:
# settings[k] = getattr(self, method_name)(
# settings,
# k,
# settings[k]
# )
#
# return settings
#
# def _patch_expand_path(self, settings, name, value):
# """
# Patch a path to expand home directory and make absolute path.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Path to patch.
#
# Returns:
# str: Patched path to an absolute path.
#
# """
# if os.path.isabs(value):
# return os.path.normpath(value)
#
# # Expand home directory if any
# value = os.path.expanduser(value)
#
# # If the path is not yet an absolute directory, make it so from base
# # directory if not empty
# if not os.path.isabs(value) and self.projectdir:
# value = os.path.join(self.projectdir, value)
#
# return os.path.normpath(value)
#
# def _patch_expand_paths(self, settings, name, value):
# """
# Apply ``SettingsPostProcessor._patch_expand_path`` to each element in
# list.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (list): List of paths to patch.
#
# Returns:
# list: Patched path list to an absolute path.
#
# """
# return [self._patch_expand_path(settings, name, item)
# for item in value]
#
# def _validate_path(self, settings, name, value):
# """
# Validate path exists
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Path to validate.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: If path does not exists.
#
# Returns:
# str: Validated path.
#
# """
# if not os.path.exists(value):
# msg = "Path from setting '{name}' does not exists: {value}"
# raise SettingsInvalidError(msg.format(name=name, value=value))
#
# return value
#
# def _validate_paths(self, settings, name, value):
# """
# Apply ``SettingsPostProcessor._validate_path`` to each element in
# list.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (list): List of paths to patch.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: Once a path does not
# exists.
#
# Returns:
# list: Validated paths.
#
# """
# return [self._validate_path(settings, name, item)
# for item in value]
#
# def _validate_required(self, settings, name, value):
# """
# Validate a required setting (value can not be empty)
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Required value to validate.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: If value is empty.
#
# Returns:
# str: Validated value.
#
# """
# if not value:
# msg = "Required value from setting '{name}' must not be empty."
# raise SettingsInvalidError(msg.format(name=name))
#
# return value
. Output only the next line. | with pytest.raises(SettingsInvalidError): |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
def test_001_success(settings, temp_builds_dir):
"""
Validate existing file path
"""
basedir = temp_builds_dir.join("postprocessor_validate_path_001")
os.makedirs(basedir.strpath)
<|code_end|>
, generate the next line using the imports in this file:
import os
import pytest
from boussole.exceptions import SettingsInvalidError
from boussole.conf.post_processor import SettingsPostProcessor
and context (functions, classes, or occasionally code) from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# Path: boussole/conf/post_processor.py
# class SettingsPostProcessor(object):
# """
# Mixin object for all available post processing methods to use in
# settings manifest (default manifest comes from ``SETTINGS_MANIFEST``).
# """
# settings_manifesto = SETTINGS_MANIFEST
# projectdir = ""
#
# def post_process(self, settings):
# """
# Perform post processing methods on settings according to their
# definition in manifest.
#
# Post process methods are implemented in their own method that have the
# same signature:
#
# * Get arguments: Current settings, item name and item value;
# * Return item value possibly patched;
#
# Args:
# settings (dict): Loaded settings.
#
# Returns:
# dict: Settings object possibly modified (depending from applied
# post processing).
#
# """
# for k in settings:
# # Search for post process rules for setting in manifest
# if (
# k in self.settings_manifesto and
# self.settings_manifesto[k].get("postprocess", None) is not None
# ):
# rules = self.settings_manifesto[k]["postprocess"]
#
# # Chain post process rules from each setting
# for method_name in rules:
# settings[k] = getattr(self, method_name)(
# settings,
# k,
# settings[k]
# )
#
# return settings
#
# def _patch_expand_path(self, settings, name, value):
# """
# Patch a path to expand home directory and make absolute path.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Path to patch.
#
# Returns:
# str: Patched path to an absolute path.
#
# """
# if os.path.isabs(value):
# return os.path.normpath(value)
#
# # Expand home directory if any
# value = os.path.expanduser(value)
#
# # If the path is not yet an absolute directory, make it so from base
# # directory if not empty
# if not os.path.isabs(value) and self.projectdir:
# value = os.path.join(self.projectdir, value)
#
# return os.path.normpath(value)
#
# def _patch_expand_paths(self, settings, name, value):
# """
# Apply ``SettingsPostProcessor._patch_expand_path`` to each element in
# list.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (list): List of paths to patch.
#
# Returns:
# list: Patched path list to an absolute path.
#
# """
# return [self._patch_expand_path(settings, name, item)
# for item in value]
#
# def _validate_path(self, settings, name, value):
# """
# Validate path exists
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Path to validate.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: If path does not exists.
#
# Returns:
# str: Validated path.
#
# """
# if not os.path.exists(value):
# msg = "Path from setting '{name}' does not exists: {value}"
# raise SettingsInvalidError(msg.format(name=name, value=value))
#
# return value
#
# def _validate_paths(self, settings, name, value):
# """
# Apply ``SettingsPostProcessor._validate_path`` to each element in
# list.
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (list): List of paths to patch.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: Once a path does not
# exists.
#
# Returns:
# list: Validated paths.
#
# """
# return [self._validate_path(settings, name, item)
# for item in value]
#
# def _validate_required(self, settings, name, value):
# """
# Validate a required setting (value can not be empty)
#
# Args:
# settings (dict): Current settings.
# name (str): Setting name.
# value (str): Required value to validate.
#
# Raises:
# boussole.exceptions.SettingsInvalidError: If value is empty.
#
# Returns:
# str: Validated value.
#
# """
# if not value:
# msg = "Required value from setting '{name}' must not be empty."
# raise SettingsInvalidError(msg.format(name=name))
#
# return value
. Output only the next line. | processor = SettingsPostProcessor() |
Using the snippet: <|code_start|> Dump settings content to filepath.
Args:
content (str): Settings content.
filepath (str): Settings file location.
"""
with open(filepath, "w") as fp:
pyaml.dump(content, dst=fp, indent=indent)
def parse(self, filepath, content):
"""
Parse opened settings content using YAML parser.
Args:
filepath (str): Settings object, depends from backend
content (str): Settings content from opened file, depends from
backend.
Raises:
boussole.exceptions.SettingsBackendError: If parser can not decode
a valid YAML object.
Returns:
dict: Dictionnary containing parsed setting elements.
"""
try:
parsed = yaml.load(content, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
msg = "No YAML object could be decoded from file: {}\n{}"
<|code_end|>
, determine the next line of code. You have imports:
import yaml
import pyaml
from ..exceptions import SettingsBackendError
from .base_backend import SettingsBackendBase
and context (class names, function names, or code) available:
# Path: boussole/exceptions.py
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/base_backend.py
# class SettingsBackendBase(SettingsPostProcessor):
# """
# Base project settings backend
#
# Args:
# basedir (str): Directory path where to search for settings filepath.
#
# Default is empty, meaning it will resolve path from current
# directory. Don't use an empty ``basedir`` attribute to load
# settings from non-absolute filepath.
#
# Given value will fill intial value for ``projectdir`` attribute.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.txt``.
# _kind_name: Backend format name.
# Value is ``txt``.
# _file_extension: Default filename extension.
# Value is ``txt``.
# """
# _default_filename = "boussole.txt"
# _kind_name = "txt"
# _file_extension = "txt"
#
# def __init__(self, basedir=None):
# self.basedir = basedir or ""
# self.projectdir = self.basedir
#
# def parse_filepath(self, filepath=None):
# """
# Parse given filepath to split possible path directory from filename.
#
# * If path directory is empty, will use ``basedir`` attribute as base
# filepath;
# * If path directory is absolute, ignore ``basedir`` attribute;
# * If path directory is relative, join it to ``basedir`` attribute;
#
# Keyword Arguments:
# filepath (str): Filepath to use to search for settings file. Will
# use value from ``_default_filename`` class attribute if empty.
#
# If filepath contain a directory path, it will be splitted from
# filename and used as base directory (and update object
# ``basedir`` attribute).
#
# Returns:
# tuple: Separated path directory and filename.
# """
# filepath = filepath or self._default_filename
#
# path, filename = os.path.split(filepath)
#
# if not path:
# path = self.basedir
# elif not os.path.isabs(path):
# path = os.path.join(self.basedir, path)
#
# return os.path.normpath(path), filename
#
# def check_filepath(self, path, filename):
# """
# Check and return the final filepath to settings
#
# Args:
# path (str): Directory path where to search for settings file.
# filename (str): Filename to use to search for settings file.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If determined filepath
# does not exists or is a directory.
#
# Returns:
# string: Settings file path, joining given path and filename.
#
# """
# settings_path = os.path.join(path, filename)
#
# if not os.path.exists(settings_path) or \
# not os.path.isfile(settings_path):
# msg = "Unable to find settings file: {}"
# raise SettingsBackendError(msg.format(settings_path))
#
# return settings_path
#
# def open(self, filepath):
# """
# Open settings backend to return its content
#
# Args:
# filepath (str): Settings object, depends from backend
#
# Returns:
# string: File content.
#
# """
# with io.open(filepath, "r", encoding="utf-8") as fp:
# content = fp.read()
# return content
#
# def parse(self, filepath, content):
# """
# Load and parse opened settings content.
#
# Base method do nothing because parsing is dependent from backend.
#
# Args:
# filepath (str): Settings file location.
# content (str): Settings content from opened file, depends from
# backend.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def dump(self, content, filepath):
# """
# Dump settings content to filepath.
#
# Base method do nothing because dumping is dependent from backend.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def clean(self, settings):
# """
# Clean given settings for backend needs.
#
# Default backend only apply available post processor methods.
#
# Args:
# dict: Loaded settings.
#
# Returns:
# dict: Settings object cleaned.
#
# """
# return self.post_process(settings)
#
# def load(self, filepath=None):
# """
# Load settings file from given path and optionnal filepath.
#
# During path resolving, the ``projectdir`` is updated to the file path
# directory.
#
# Keyword Arguments:
# filepath (str): Filepath to the settings file.
#
# Returns:
# boussole.conf.model.Settings: Settings object with loaded options.
#
# """
# self.projectdir, filename = self.parse_filepath(filepath)
#
# settings_path = self.check_filepath(self.projectdir, filename)
#
# parsed = self.parse(settings_path, self.open(settings_path))
#
# settings = self.clean(parsed)
#
# return Settings(initial=settings)
. Output only the next line. | raise SettingsBackendError(msg.format(filepath, exc)) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
def test_self(settings, inspector):
"""
Ensure self import is detected
"""
sourcepath_0 = os.path.join(settings.sample_path, 'main_circular_0.scss')
sources = [
os.path.join(settings.sample_path, 'main_basic.scss'),
sourcepath_0,
]
inspector.inspect(*sources)
<|code_end|>
. Use current file imports:
(import os
import pytest
from boussole.exceptions import CircularImport)
and context including class names, function names, or small code snippets from other files:
# Path: boussole/exceptions.py
# class CircularImport(BoussoleBaseException):
# """
# Exception to be raised when inspector detect a circular import from
# sources.
# """
# pass
. Output only the next line. | with pytest.raises(CircularImport): |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
def test_css_compat_ok(compiler, temp_builds_dir):
"""
Ensure CSS import compatibility is ok
"""
basedir = temp_builds_dir.join("compiler_css_compat_ok").strpath
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import io
from boussole.conf.model import Settings
and context (classes, functions, sometimes code) from other files:
# Path: boussole/conf/model.py
# class Settings(object):
# """
# Settings model object
#
# Class init method fills object attributes from default settings
# (``DEFAULT_SETTINGS``) then update it with initial settings if given.
#
# Settings are available as object attributes, there is also a private
# ``_settings`` attribute containing a dict of all stored settings. You are
# strongly advised to never directly manipulate the ``_settings`` attribute.
# Instead, allways use the ``update()`` method.
#
# Note:
# Model is only about data model, there is no other validation that
# available 'fields' from ``DEFAULT_SETTINGS``.
#
# If you intend to manually open and fill a Settings instance, remember
# to allways use absolute paths in your settings. Relative path will
# cause issues in resolving that lead to wrong compilations.
#
# You may also apply post processor validation to ensure your datas.
#
# Keyword Arguments:
# initial (dict): A dictionnary of settings for initial values.
# """
# def __init__(self, initial={}):
# self._settings = copy.deepcopy(DEFAULT_SETTINGS)
# if initial:
# initial = self.clean(initial)
# self._settings.update(initial)
# self.set_settings(self._settings)
#
# def clean(self, settings):
# """
# Filter given settings to keep only key names available in
# ``DEFAULT_SETTINGS``.
#
# Args:
# settings (dict): Loaded settings.
#
# Returns:
# dict: Settings object filtered.
#
# """
# return {k: v for k, v in settings.items() if k in DEFAULT_SETTINGS}
#
# def set_settings(self, settings):
# """
# Set every given settings as object attributes.
#
# Args:
# settings (dict): Dictionnary of settings.
#
# """
# for k, v in settings.items():
# setattr(self, k, v)
#
# def update(self, settings):
# """
# Update object attributes from given settings
#
# Args:
# settings (dict): Dictionnary of elements to update settings.
#
# Returns:
# dict: Dictionnary of all current saved settings.
#
# """
# settings = self.clean(settings)
#
# # Update internal dict
# self._settings.update(settings)
#
# # Push every setting items as class object attributes
# self.set_settings(settings)
#
# return self._settings
. Output only the next line. | basic_settings = Settings(initial={ |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
@pytest.mark.parametrize("backend_engine,loader,loader_opts", [
(
SettingsBackendJson,
json.loads,
{},
),
(
<|code_end|>
, generate the next line using the imports in this file:
import os
import json
import yaml
import pytest
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
and context (functions, classes, or occasionally code) from other files:
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
. Output only the next line. | SettingsBackendYaml, |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
@pytest.mark.parametrize("backend_engine", [
SettingsBackendJson,
SettingsBackendYaml,
])
def test_basic(settings, custom_project_settings, backend_engine):
"""
Load basic settings file fail because of wrong paths
"""
backend = backend_engine(basedir=settings.fixtures_path)
<|code_end|>
. Write the next line using the current file imports:
import os
import pytest
from boussole.exceptions import SettingsInvalidError
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
and context from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
, which may include functions, classes, or code. Output only the next line. | with pytest.raises(SettingsInvalidError): |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
def test_ok_001(settings, parser):
"""parser.ScssImportsParser: Single quote"""
assert parser.strip_quotes("'foo'") == "foo"
def test_ok_002(settings, parser):
"""parser.ScssImportsParser: Double quotes"""
assert parser.strip_quotes('"foo"') == "foo"
def test_ok_003(settings, parser):
"""parser.ScssImportsParser: Without quotes"""
assert parser.strip_quotes("foo") == "foo"
def test_error_001(settings, parser):
"""parser.ScssImportsParser: Error, quote starting but not ended"""
<|code_end|>
. Use current file imports:
(import pytest
from boussole.exceptions import InvalidImportRule)
and context including class names, function names, or small code snippets from other files:
# Path: boussole/exceptions.py
# class InvalidImportRule(BoussoleBaseException):
# """
# Exception to be raised when the parser encounts an invalid import rule.
# """
# pass
. Output only the next line. | with pytest.raises(InvalidImportRule): |
Based on the snippet: <|code_start|> (
"/home/plop/foobar.PY",
{"*.py"},
{"*.txt"},
False,
True,
),
(
"/home/plop/foobar.txt",
{"*.py"},
{"*.txt"},
False,
False,
),
(
"/home/plop/",
{"*.py"},
{"*.txt"},
False,
False,
),
(
"/home/plop/",
{"*.py"},
{"*.txt"},
True,
False,
),
])
def test_match_path_success(path, included, excluded, case_sensitive, expected):
<|code_end|>
, predict the immediate next line with the help of imports:
import pytest
from boussole.utils import match_path
and context (classes, functions, sometimes code) from other files:
# Path: boussole/utils.py
# def match_path(path, included_patterns, excluded_patterns, case_sensitive):
# """
# Matches a pathname against a set of acceptable and ignored patterns.
#
# Inspired from ``watchdog.utils.patterns_match_path`` which was inspired from
# deprecated ``pathtools.patterns.match_path``.
#
# Arguments:
# path (string): A pathname which will be matched against a pattern.
# included_patterns (set): Allow filenames matching wildcard patterns
# specified in this list. If no pattern is specified, the function
# treats the pathname as a match_path.
# excluded_patterns (set): Ignores filenames matching wildcard patterns
# specified in this list. If no pattern is specified, the function
# treats the pathname as a match_path.
# case_sensitive (boolean): ``True`` if matching should be
# case-sensitive; ``False`` otherwise.
#
# Raises:
# ValueError: if included patterns and excluded patterns contain the
# same pattern.
# Returns:
# boolean: ``True`` if the pathname matches; ``False`` otherwise.
# """
# if case_sensitive:
# path = PurePosixPath(path)
# else:
# included_patterns = {pattern.lower() for pattern in included_patterns}
# excluded_patterns = {pattern.lower() for pattern in excluded_patterns}
# path = PureWindowsPath(path)
#
# common_patterns = included_patterns & excluded_patterns
#
# if common_patterns:
# msg = "conflicting patterns '{}' included and excluded"
# raise ValueError(msg.format(common_patterns))
#
# return (
# any(path.match(p) for p in included_patterns) and
# not any(path.match(p) for p in excluded_patterns)
# )
. Output only the next line. | assert match_path(path, included, excluded, case_sensitive) is expected |
Given snippet: <|code_start|> syntax = "scss"
REGEX_IMPORT_RULE = re.compile(r'@import\s*(url)?\s*\(?([^;]+?)\)?;',
re.IGNORECASE)
# Second part (for singleline comment) contain a negative lookbehind
# assertion to avoid to match on url protocole (http://) which cause issues
# in parsing
REGEX_COMMENTS = re.compile(r'(/\*.*?\*/)|((?<!(:))//.*?(\n|$))',
re.IGNORECASE | re.DOTALL)
def strip_quotes(self, content):
"""
Unquote given rule.
Args:
content (str): An import rule.
Raises:
InvalidImportRule: Raise exception if the rule is badly quoted
(not started or not ended quotes).
Returns:
string: The given rule unquoted.
"""
error_msg = "Following rule is badly quoted: {}"
if (content.startswith('"') and content.endswith('"')) or \
(content.startswith("'") and content.endswith("'")):
return content[1:-1]
# Quote starting but not ended
elif (content.startswith('"') and not content.endswith('"')) or \
(content.startswith("'") and not content.endswith("'")):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from .exceptions import InvalidImportRule
and context:
# Path: boussole/exceptions.py
# class InvalidImportRule(BoussoleBaseException):
# """
# Exception to be raised when the parser encounts an invalid import rule.
# """
# pass
which might include code, classes, or functions. Output only the next line. | raise InvalidImportRule(error_msg.format(content)) |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: utf-8 -*-
def test_001(compiler, temp_builds_dir):
"""
Basic sample without source map
"""
basedir = temp_builds_dir.join("compiler_safecompile").strpath
<|code_end|>
, predict the next line using imports from the current file:
import os
import io
from boussole.conf.model import Settings
and context including class names, function names, and sometimes code from other files:
# Path: boussole/conf/model.py
# class Settings(object):
# """
# Settings model object
#
# Class init method fills object attributes from default settings
# (``DEFAULT_SETTINGS``) then update it with initial settings if given.
#
# Settings are available as object attributes, there is also a private
# ``_settings`` attribute containing a dict of all stored settings. You are
# strongly advised to never directly manipulate the ``_settings`` attribute.
# Instead, allways use the ``update()`` method.
#
# Note:
# Model is only about data model, there is no other validation that
# available 'fields' from ``DEFAULT_SETTINGS``.
#
# If you intend to manually open and fill a Settings instance, remember
# to allways use absolute paths in your settings. Relative path will
# cause issues in resolving that lead to wrong compilations.
#
# You may also apply post processor validation to ensure your datas.
#
# Keyword Arguments:
# initial (dict): A dictionnary of settings for initial values.
# """
# def __init__(self, initial={}):
# self._settings = copy.deepcopy(DEFAULT_SETTINGS)
# if initial:
# initial = self.clean(initial)
# self._settings.update(initial)
# self.set_settings(self._settings)
#
# def clean(self, settings):
# """
# Filter given settings to keep only key names available in
# ``DEFAULT_SETTINGS``.
#
# Args:
# settings (dict): Loaded settings.
#
# Returns:
# dict: Settings object filtered.
#
# """
# return {k: v for k, v in settings.items() if k in DEFAULT_SETTINGS}
#
# def set_settings(self, settings):
# """
# Set every given settings as object attributes.
#
# Args:
# settings (dict): Dictionnary of settings.
#
# """
# for k, v in settings.items():
# setattr(self, k, v)
#
# def update(self, settings):
# """
# Update object attributes from given settings
#
# Args:
# settings (dict): Dictionnary of elements to update settings.
#
# Returns:
# dict: Dictionnary of all current saved settings.
#
# """
# settings = self.clean(settings)
#
# # Update internal dict
# self._settings.update(settings)
#
# # Push every setting items as class object attributes
# self.set_settings(settings)
#
# return self._settings
. Output only the next line. | basic_settings = Settings(initial={ |
Continue the code snippet: <|code_start|> """
FINDER_STYLESHEET_EXTS = ["scss", "sass"]
def get_relative_from_paths(self, filepath, paths):
"""
Find the relative filepath from the most relevant multiple paths.
This is somewhat like a ``os.path.relpath(path[, start])`` but where
``start`` is a list. The most relevant item from ``paths`` will be used
to apply the relative transform.
Args:
filepath (str): Path to transform to relative.
paths (list): List of absolute paths to use to find and remove the
start path from ``filepath`` argument. If there is multiple
path starting with the same directories, the biggest will
match.
Raises:
boussole.exception.FinderException: If no ``filepath`` start could
be finded.
Returns:
str: Relative filepath where the start coming from ``paths`` is
removed.
"""
for systempath in paths_by_depth(paths):
if filepath.startswith(systempath):
return os.path.relpath(filepath, systempath)
<|code_end|>
. Use current file imports:
import fnmatch
import os
from .exceptions import FinderException
and context (classes, functions, or code) from other files:
# Path: boussole/exceptions.py
# class FinderException(BoussoleBaseException):
# """
# Exception to be raised when error occurs with finder usage.
# """
# pass
. Output only the next line. | raise FinderException("'Finder.get_relative_from_paths()' could not " |
Continue the code snippet: <|code_start|>])
def test_ok(settings, sample_project_settings, backend_engine):
"""
Backend content parsing success
"""
backend = backend_engine(basedir=settings.fixtures_path)
path, filename = backend.parse_filepath()
filepath = backend.check_filepath(path, filename)
content = backend.open(filepath)
assert backend.parse(filepath, content) == sample_project_settings
@pytest.mark.parametrize("filename,backend_engine", [
("boussole_error.json", SettingsBackendJson),
("boussole_error.yml", SettingsBackendYaml),
])
def test_error(settings, sample_project_settings, filename, backend_engine):
"""
Backend content parsing error
"""
backend = backend_engine(basedir=settings.fixtures_path)
path, filename = backend.parse_filepath(filepath=filename)
filepath = backend.check_filepath(path, filename)
content = backend.open(filepath)
<|code_end|>
. Use current file imports:
import pytest
from boussole.exceptions import SettingsBackendError
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
and context (classes, functions, or code) from other files:
# Path: boussole/exceptions.py
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
. Output only the next line. | with pytest.raises(SettingsBackendError): |
Predict the next line for this snippet: <|code_start|># -*- coding: utf-8 -*-
def test_empty(settings, inspector):
"""inspector.ScssInspector: Some lib components used but no given library"""
sourcepath = os.path.join(settings.sample_path, 'main_using_libs.scss')
sources = [
sourcepath,
]
# Some imports use libraries that was not given, so resolver raise
# an error
<|code_end|>
with the help of current file imports:
import os
import pytest
from boussole.exceptions import UnresolvablePath
and context from other files:
# Path: boussole/exceptions.py
# class UnresolvablePath(BoussoleBaseException):
# """
# Exception to be raised when the resolver can not resolve a given path.
# """
# pass
, which may contain function names, class names, or code. Output only the next line. | with pytest.raises(UnresolvablePath): |
Here is a snippet: <|code_start|> """
backend = SettingsBackendBase()
result = backend.check_filepath(
settings.fixtures_path,
filename=SettingsBackendBase._default_filename
)
assert result == os.path.join(settings.fixtures_path,
SettingsBackendBase._default_filename)
def test_ok_002(settings):
"""
conf.base_backendSettingsBackendBase: Filepath check case 2
"""
backend = SettingsBackendBase()
result = backend.check_filepath(settings.sample_path, filename="dummy")
assert result == os.path.join(settings.sample_path, "dummy")
def test_error_001(settings):
"""
conf.base_backendSettingsBackendBase: Filepath check error case 1
(dont exist)
"""
backend = SettingsBackendBase()
<|code_end|>
. Write the next line using the current file imports:
import os
import pytest
from boussole.exceptions import SettingsBackendError
from boussole.conf.base_backend import SettingsBackendBase
and context from other files:
# Path: boussole/exceptions.py
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/base_backend.py
# class SettingsBackendBase(SettingsPostProcessor):
# """
# Base project settings backend
#
# Args:
# basedir (str): Directory path where to search for settings filepath.
#
# Default is empty, meaning it will resolve path from current
# directory. Don't use an empty ``basedir`` attribute to load
# settings from non-absolute filepath.
#
# Given value will fill intial value for ``projectdir`` attribute.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.txt``.
# _kind_name: Backend format name.
# Value is ``txt``.
# _file_extension: Default filename extension.
# Value is ``txt``.
# """
# _default_filename = "boussole.txt"
# _kind_name = "txt"
# _file_extension = "txt"
#
# def __init__(self, basedir=None):
# self.basedir = basedir or ""
# self.projectdir = self.basedir
#
# def parse_filepath(self, filepath=None):
# """
# Parse given filepath to split possible path directory from filename.
#
# * If path directory is empty, will use ``basedir`` attribute as base
# filepath;
# * If path directory is absolute, ignore ``basedir`` attribute;
# * If path directory is relative, join it to ``basedir`` attribute;
#
# Keyword Arguments:
# filepath (str): Filepath to use to search for settings file. Will
# use value from ``_default_filename`` class attribute if empty.
#
# If filepath contain a directory path, it will be splitted from
# filename and used as base directory (and update object
# ``basedir`` attribute).
#
# Returns:
# tuple: Separated path directory and filename.
# """
# filepath = filepath or self._default_filename
#
# path, filename = os.path.split(filepath)
#
# if not path:
# path = self.basedir
# elif not os.path.isabs(path):
# path = os.path.join(self.basedir, path)
#
# return os.path.normpath(path), filename
#
# def check_filepath(self, path, filename):
# """
# Check and return the final filepath to settings
#
# Args:
# path (str): Directory path where to search for settings file.
# filename (str): Filename to use to search for settings file.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If determined filepath
# does not exists or is a directory.
#
# Returns:
# string: Settings file path, joining given path and filename.
#
# """
# settings_path = os.path.join(path, filename)
#
# if not os.path.exists(settings_path) or \
# not os.path.isfile(settings_path):
# msg = "Unable to find settings file: {}"
# raise SettingsBackendError(msg.format(settings_path))
#
# return settings_path
#
# def open(self, filepath):
# """
# Open settings backend to return its content
#
# Args:
# filepath (str): Settings object, depends from backend
#
# Returns:
# string: File content.
#
# """
# with io.open(filepath, "r", encoding="utf-8") as fp:
# content = fp.read()
# return content
#
# def parse(self, filepath, content):
# """
# Load and parse opened settings content.
#
# Base method do nothing because parsing is dependent from backend.
#
# Args:
# filepath (str): Settings file location.
# content (str): Settings content from opened file, depends from
# backend.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def dump(self, content, filepath):
# """
# Dump settings content to filepath.
#
# Base method do nothing because dumping is dependent from backend.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def clean(self, settings):
# """
# Clean given settings for backend needs.
#
# Default backend only apply available post processor methods.
#
# Args:
# dict: Loaded settings.
#
# Returns:
# dict: Settings object cleaned.
#
# """
# return self.post_process(settings)
#
# def load(self, filepath=None):
# """
# Load settings file from given path and optionnal filepath.
#
# During path resolving, the ``projectdir`` is updated to the file path
# directory.
#
# Keyword Arguments:
# filepath (str): Filepath to the settings file.
#
# Returns:
# boussole.conf.model.Settings: Settings object with loaded options.
#
# """
# self.projectdir, filename = self.parse_filepath(filepath)
#
# settings_path = self.check_filepath(self.projectdir, filename)
#
# parsed = self.parse(settings_path, self.open(settings_path))
#
# settings = self.clean(parsed)
#
# return Settings(initial=settings)
, which may include functions, classes, or code. Output only the next line. | with pytest.raises(SettingsBackendError): |
Here is a snippet: <|code_start|># -*- coding: utf-8 -*-
def test_001_default(settings, sample_project_settings):
"""
Create a empty Settings object width default values
"""
settings_object = Settings()
<|code_end|>
. Write the next line using the current file imports:
import copy
from boussole.conf.model import DEFAULT_SETTINGS, Settings
and context from other files:
# Path: boussole/conf/model.py
# DEFAULT_SETTINGS = {k: copy.deepcopy(v["default"])
# for k, v in SETTINGS_MANIFEST.items()}
#
# class Settings(object):
# """
# Settings model object
#
# Class init method fills object attributes from default settings
# (``DEFAULT_SETTINGS``) then update it with initial settings if given.
#
# Settings are available as object attributes, there is also a private
# ``_settings`` attribute containing a dict of all stored settings. You are
# strongly advised to never directly manipulate the ``_settings`` attribute.
# Instead, allways use the ``update()`` method.
#
# Note:
# Model is only about data model, there is no other validation that
# available 'fields' from ``DEFAULT_SETTINGS``.
#
# If you intend to manually open and fill a Settings instance, remember
# to allways use absolute paths in your settings. Relative path will
# cause issues in resolving that lead to wrong compilations.
#
# You may also apply post processor validation to ensure your datas.
#
# Keyword Arguments:
# initial (dict): A dictionnary of settings for initial values.
# """
# def __init__(self, initial={}):
# self._settings = copy.deepcopy(DEFAULT_SETTINGS)
# if initial:
# initial = self.clean(initial)
# self._settings.update(initial)
# self.set_settings(self._settings)
#
# def clean(self, settings):
# """
# Filter given settings to keep only key names available in
# ``DEFAULT_SETTINGS``.
#
# Args:
# settings (dict): Loaded settings.
#
# Returns:
# dict: Settings object filtered.
#
# """
# return {k: v for k, v in settings.items() if k in DEFAULT_SETTINGS}
#
# def set_settings(self, settings):
# """
# Set every given settings as object attributes.
#
# Args:
# settings (dict): Dictionnary of settings.
#
# """
# for k, v in settings.items():
# setattr(self, k, v)
#
# def update(self, settings):
# """
# Update object attributes from given settings
#
# Args:
# settings (dict): Dictionnary of elements to update settings.
#
# Returns:
# dict: Dictionnary of all current saved settings.
#
# """
# settings = self.clean(settings)
#
# # Update internal dict
# self._settings.update(settings)
#
# # Push every setting items as class object attributes
# self.set_settings(settings)
#
# return self._settings
, which may include functions, classes, or code. Output only the next line. | assert settings_object._settings == DEFAULT_SETTINGS |
Given snippet: <|code_start|> Dump settings content to filepath.
Args:
content (str): Settings content.
filepath (str): Settings file location.
"""
with open(filepath, "w") as fp:
json.dump(content, fp, indent=indent)
def parse(self, filepath, content):
"""
Parse opened settings content using JSON parser.
Args:
filepath (str): Settings object, depends from backend
content (str): Settings content from opened file, depends from
backend.
Raises:
boussole.exceptions.SettingsBackendError: If parser can not decode
a valid JSON object.
Returns:
dict: Dictionnary containing parsed setting elements.
"""
try:
parsed = json.loads(content)
except ValueError:
msg = "No JSON object could be decoded from file: {}"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
from ..exceptions import SettingsBackendError
from .base_backend import SettingsBackendBase
and context:
# Path: boussole/exceptions.py
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/base_backend.py
# class SettingsBackendBase(SettingsPostProcessor):
# """
# Base project settings backend
#
# Args:
# basedir (str): Directory path where to search for settings filepath.
#
# Default is empty, meaning it will resolve path from current
# directory. Don't use an empty ``basedir`` attribute to load
# settings from non-absolute filepath.
#
# Given value will fill intial value for ``projectdir`` attribute.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.txt``.
# _kind_name: Backend format name.
# Value is ``txt``.
# _file_extension: Default filename extension.
# Value is ``txt``.
# """
# _default_filename = "boussole.txt"
# _kind_name = "txt"
# _file_extension = "txt"
#
# def __init__(self, basedir=None):
# self.basedir = basedir or ""
# self.projectdir = self.basedir
#
# def parse_filepath(self, filepath=None):
# """
# Parse given filepath to split possible path directory from filename.
#
# * If path directory is empty, will use ``basedir`` attribute as base
# filepath;
# * If path directory is absolute, ignore ``basedir`` attribute;
# * If path directory is relative, join it to ``basedir`` attribute;
#
# Keyword Arguments:
# filepath (str): Filepath to use to search for settings file. Will
# use value from ``_default_filename`` class attribute if empty.
#
# If filepath contain a directory path, it will be splitted from
# filename and used as base directory (and update object
# ``basedir`` attribute).
#
# Returns:
# tuple: Separated path directory and filename.
# """
# filepath = filepath or self._default_filename
#
# path, filename = os.path.split(filepath)
#
# if not path:
# path = self.basedir
# elif not os.path.isabs(path):
# path = os.path.join(self.basedir, path)
#
# return os.path.normpath(path), filename
#
# def check_filepath(self, path, filename):
# """
# Check and return the final filepath to settings
#
# Args:
# path (str): Directory path where to search for settings file.
# filename (str): Filename to use to search for settings file.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If determined filepath
# does not exists or is a directory.
#
# Returns:
# string: Settings file path, joining given path and filename.
#
# """
# settings_path = os.path.join(path, filename)
#
# if not os.path.exists(settings_path) or \
# not os.path.isfile(settings_path):
# msg = "Unable to find settings file: {}"
# raise SettingsBackendError(msg.format(settings_path))
#
# return settings_path
#
# def open(self, filepath):
# """
# Open settings backend to return its content
#
# Args:
# filepath (str): Settings object, depends from backend
#
# Returns:
# string: File content.
#
# """
# with io.open(filepath, "r", encoding="utf-8") as fp:
# content = fp.read()
# return content
#
# def parse(self, filepath, content):
# """
# Load and parse opened settings content.
#
# Base method do nothing because parsing is dependent from backend.
#
# Args:
# filepath (str): Settings file location.
# content (str): Settings content from opened file, depends from
# backend.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def dump(self, content, filepath):
# """
# Dump settings content to filepath.
#
# Base method do nothing because dumping is dependent from backend.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
#
# Returns:
# dict: Dictionnary containing parsed setting options.
#
# """
# return {}
#
# def clean(self, settings):
# """
# Clean given settings for backend needs.
#
# Default backend only apply available post processor methods.
#
# Args:
# dict: Loaded settings.
#
# Returns:
# dict: Settings object cleaned.
#
# """
# return self.post_process(settings)
#
# def load(self, filepath=None):
# """
# Load settings file from given path and optionnal filepath.
#
# During path resolving, the ``projectdir`` is updated to the file path
# directory.
#
# Keyword Arguments:
# filepath (str): Filepath to the settings file.
#
# Returns:
# boussole.conf.model.Settings: Settings object with loaded options.
#
# """
# self.projectdir, filename = self.parse_filepath(filepath)
#
# settings_path = self.check_filepath(self.projectdir, filename)
#
# parsed = self.parse(settings_path, self.open(settings_path))
#
# settings = self.clean(parsed)
#
# return Settings(initial=settings)
which might include code, classes, or functions. Output only the next line. | raise SettingsBackendError(msg.format(filepath)) |
Given the following code snippet before the placeholder: <|code_start|>
# Backend default filename shortcuts
YAML_FILENAME = SettingsBackendYaml._default_filename
JSON_FILENAME = SettingsBackendJson._default_filename
def test_001(settings, caplog):
"""
Basic
"""
runner = CliRunner()
# Temporary isolated current dir
with runner.isolated_filesystem():
test_cwd = os.getcwd()
sourcedir = os.path.join(test_cwd, "scss")
targetdir = os.path.join(test_cwd, "css")
config_filepath = os.path.join(test_cwd, JSON_FILENAME)
opts = [
"startproject",
"--basedir={}".format(test_cwd),
"--config={}".format(JSON_FILENAME),
"--sourcedir={}".format("scss"),
"--targetdir={}".format("css"),
]
# Execute command with opts
<|code_end|>
, predict the next line using imports from the current file:
import json
import os
from click.testing import CliRunner
from boussole.cli.console_script import cli_frontend
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
and context including class names, function names, and sometimes code from other files:
# Path: boussole/cli/console_script.py
# @click.group(context_settings=CONTEXT_SETTINGS)
# @click.option(
# "-v",
# "--verbose",
# type=click.IntRange(min=0, max=5),
# default=4,
# metavar="INTEGER",
# help=(
# "An integer between 0 and 5, where '0' make a totaly "
# "silent output and '5' set level to DEBUG (the most verbose "
# "level). Default to '4' (Info level)."
# )
# )
# @click.pass_context
# def cli_frontend(ctx, verbose):
# """
# Boussole is a commandline interface to build Sass projects using libsass.
#
# Every project will need a settings file containing all needed settings to
# build it.
# """
# printout = True
# if verbose == 0:
# verbose = 1
# printout = False
#
# # Verbosity is the inverse of logging levels
# levels = [item for item in BOUSSOLE_LOGGER_CONF]
# levels.reverse()
# # Init the logger config
# root_logger = init_logger(levels[verbose], printout=printout)
#
# # Init the default context that will be passed to commands
# ctx.obj = {
# "verbosity": verbose,
# "logger": root_logger,
# }
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
. Output only the next line. | result = runner.invoke(cli_frontend, opts) |
Predict the next line after this snippet: <|code_start|> (
'/home/foo',
'/home/bar',
'/home/foo',
),
(
'/home/foo',
'/home/bar',
'/home/foo',
'/home/foo',
),
(
'/home/foo',
'/home/bar',
'/home/foo',
'/home/bar',
),
(
'/home/foo',
'/home/bar',
'/home/bar',
'/home/foo',
'/var/lib',
'/var/lib',
),
])
def test_wrong(projectstarter, paths):
"""
fail on duplicated paths
"""
<|code_end|>
using the current file's imports:
import pytest
from boussole.exceptions import SettingsInvalidError
and any relevant context from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
. Output only the next line. | with pytest.raises(SettingsInvalidError): |
Predict the next line for this snippet: <|code_start|>
return engines, filenames, extensions
def get_engine(self, filepath, kind=None):
"""
From given filepath try to discover which backend format to use.
Discovering is pretty naive as it find format from file extension.
Args:
filepath (str): Settings filepath or filename.
Keyword Arguments:
kind (str): A format name to enforce a specific backend. Can be any
value from attribute ``_kind_name`` of available backend
engines.
Raises:
boussole.exceptions.SettingsDiscoveryError: If extension is
unknowed or if given format name is unknowed.
Returns:
object: Backend engine class.
"""
if not kind:
extension = os.path.splitext(filepath)[1]
if not extension:
msg = ("Unable to discover settings format from an empty file "
"extension: {}")
<|code_end|>
with the help of current file imports:
import os
from collections import OrderedDict
from ..exceptions import SettingsDiscoveryError
and context from other files:
# Path: boussole/exceptions.py
# class SettingsDiscoveryError(BoussoleBaseException):
# """
# Exception to be raised when config discovery has failed to find settings
# file.
# """
# pass
, which may contain function names, class names, or code. Output only the next line. | raise SettingsDiscoveryError(msg.format(filepath)) |
Predict the next line for this snippet: <|code_start|> for k in list(library_paths):
if k not in basepaths:
basepaths.append(k)
for import_rule in paths:
candidates = self.candidate_paths(import_rule)
# Search all existing candidates:
# * If more than one candidate raise an error;
# * If only one, accept it;
# * If no existing candidate raise an error;
stack = []
for i, basepath in enumerate(basepaths):
checked = self.check_candidate_exists(basepath, candidates)
if checked:
stack.extend(checked)
# More than one existing candidate
if len(stack) > 1:
raise UnclearResolution(
"rule '{}' This is not clear for these paths: {}".format(
import_rule, ', '.join(stack)
)
)
# Accept the single one
elif len(stack) == 1:
resolved_paths.append(os.path.normpath(stack[0]))
# No validated candidate
else:
if self.STRICT_PATH_VALIDATION:
<|code_end|>
with the help of current file imports:
import os
from .exceptions import UnresolvablePath, UnclearResolution
and context from other files:
# Path: boussole/exceptions.py
# class UnresolvablePath(BoussoleBaseException):
# """
# Exception to be raised when the resolver can not resolve a given path.
# """
# pass
#
# class UnclearResolution(BoussoleBaseException):
# """
# Exception to be raised when the resolver encounts multiple existing
# candidates for a path.
# """
# pass
, which may contain function names, class names, or code. Output only the next line. | raise UnresolvablePath( |
Based on the snippet: <|code_start|> if paths:
# Add given library paths to the basepaths for resolving
# Accept a string if not allready in basepaths
if (
library_paths and
isinstance(library_paths, str) and
library_paths not in basepaths
):
basepaths.append(library_paths)
# Add path item from list if not allready in basepaths
elif library_paths:
for k in list(library_paths):
if k not in basepaths:
basepaths.append(k)
for import_rule in paths:
candidates = self.candidate_paths(import_rule)
# Search all existing candidates:
# * If more than one candidate raise an error;
# * If only one, accept it;
# * If no existing candidate raise an error;
stack = []
for i, basepath in enumerate(basepaths):
checked = self.check_candidate_exists(basepath, candidates)
if checked:
stack.extend(checked)
# More than one existing candidate
if len(stack) > 1:
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from .exceptions import UnresolvablePath, UnclearResolution
and context (classes, functions, sometimes code) from other files:
# Path: boussole/exceptions.py
# class UnresolvablePath(BoussoleBaseException):
# """
# Exception to be raised when the resolver can not resolve a given path.
# """
# pass
#
# class UnclearResolution(BoussoleBaseException):
# """
# Exception to be raised when the resolver encounts multiple existing
# candidates for a path.
# """
# pass
. Output only the next line. | raise UnclearResolution( |
Given the code snippet: <|code_start|>])
def test_init(projectstarter, name, klass):
"""
Default backend
"""
p = projectstarter(name)
assert p.backend_name == name
assert isinstance(p.backend_engine, klass)
@pytest.mark.parametrize("name,klass", [
('json', SettingsBackendJson),
('yaml', SettingsBackendYaml),
])
def test_get_backend(projectstarter, name, klass):
"""
Get backend
"""
p = projectstarter(name)
engine = p.get_backend_engine(name)
assert isinstance(engine, klass)
def test_error(projectstarter):
"""
Error on default backend
"""
<|code_end|>
, generate the next line using the imports in this file:
import pytest
from boussole.exceptions import SettingsBackendError
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
and context (functions, classes, or occasionally code) from other files:
# Path: boussole/exceptions.py
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
. Output only the next line. | with pytest.raises(SettingsBackendError): |
Given snippet: <|code_start|> """
if name not in self._engines:
msg = "Given settings backend is unknowed: {}"
raise SettingsBackendError(msg.format(name))
return self._engines[name](**kwargs)
class ProjectStarter(ProjectBase):
"""
Provide methods to create a new Sass Project
"""
def valid_paths(self, *args):
"""
Validate that given paths are not the same.
Args:
(string): Path to validate.
Raises:
boussole.exceptions.SettingsInvalidError: If there is more than one
occurence of the same path.
Returns:
bool: ``True`` if paths are validated.
"""
for i, path in enumerate(args, start=0):
cp = list(args)
current = cp.pop(i)
if current in cp:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
from .exceptions import SettingsInvalidError, SettingsBackendError
from .conf.json_backend import SettingsBackendJson
from .conf.yaml_backend import SettingsBackendYaml
and context:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
which might include code, classes, or functions. Output only the next line. | raise SettingsInvalidError( |
Given the following code snippet before the placeholder: <|code_start|> _engines: Available Configuration backends. Read only.
backend_name: Backend name to use from available ones.
backend_engine: Backend engine selected from given name.
"""
_engines = {
"json": SettingsBackendJson,
"yaml": SettingsBackendYaml,
}
def __init__(self, backend_name, **kwargs):
self.backend_name = backend_name
self.backend_engine = self.get_backend_engine(self.backend_name,
**kwargs)
def get_backend_engine(self, name, **kwargs):
"""
Get backend engine from given name.
Args:
(string): Path to validate.
Raises:
boussole.exceptions.SettingsBackendError: If given backend name
does not match any available engine.
Returns:
object: Instance of selected backend engine.
"""
if name not in self._engines:
msg = "Given settings backend is unknowed: {}"
<|code_end|>
, predict the next line using imports from the current file:
import os
from .exceptions import SettingsInvalidError, SettingsBackendError
from .conf.json_backend import SettingsBackendJson
from .conf.yaml_backend import SettingsBackendYaml
and context including class names, function names, and sometimes code from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
. Output only the next line. | raise SettingsBackendError(msg.format(name)) |
Continue the code snippet: <|code_start|>"""
Project management
==================
"""
class ProjectBase(object):
"""
Project base
Arguments:
backend_name (string): Backend name, can be either ``json`` or
``yaml``.
Attributes:
_engines: Available Configuration backends. Read only.
backend_name: Backend name to use from available ones.
backend_engine: Backend engine selected from given name.
"""
_engines = {
<|code_end|>
. Use current file imports:
import os
from .exceptions import SettingsInvalidError, SettingsBackendError
from .conf.json_backend import SettingsBackendJson
from .conf.yaml_backend import SettingsBackendYaml
and context (classes, functions, or code) from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
. Output only the next line. | "json": SettingsBackendJson, |
Based on the snippet: <|code_start|>"""
Project management
==================
"""
class ProjectBase(object):
"""
Project base
Arguments:
backend_name (string): Backend name, can be either ``json`` or
``yaml``.
Attributes:
_engines: Available Configuration backends. Read only.
backend_name: Backend name to use from available ones.
backend_engine: Backend engine selected from given name.
"""
_engines = {
"json": SettingsBackendJson,
<|code_end|>
, predict the immediate next line with the help of imports:
import os
from .exceptions import SettingsInvalidError, SettingsBackendError
from .conf.json_backend import SettingsBackendJson
from .conf.yaml_backend import SettingsBackendYaml
and context (classes, functions, sometimes code) from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
#
# class SettingsBackendError(BoussoleBaseException):
# """
# Exception to be raised when config loading has failed from a backend.
# """
# pass
#
# Path: boussole/conf/json_backend.py
# class SettingsBackendJson(SettingsBackendBase):
# """
# JSON backend for settings
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.json``.
# _kind_name: Backend format name.
# Value is ``json``.
# _file_extension: Default filename extension.
# Value is ``json``.
# """
# _default_filename = "boussole.json"
# _kind_name = "json"
# _file_extension = "json"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# json.dump(content, fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using JSON parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid JSON object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = json.loads(content)
# except ValueError:
# msg = "No JSON object could be decoded from file: {}"
# raise SettingsBackendError(msg.format(filepath))
# return parsed
#
# Path: boussole/conf/yaml_backend.py
# class SettingsBackendYaml(SettingsBackendBase):
# """
# YAML backend for settings
#
# Use PyYaml for parsing and pyaml for dumping.
#
# Attributes:
# _default_filename: Filename for settings file to load.
# Value is ``settings.yml``.
# _kind_name: Backend format name.
# Value is ``yaml``.
# _file_extension: Default filename extension.
# Value is ``yml``.
# """
# _default_filename = "boussole.yml"
# _kind_name = "yaml"
# _file_extension = "yml"
#
# def dump(self, content, filepath, indent=4):
# """
# Dump settings content to filepath.
#
# Args:
# content (str): Settings content.
# filepath (str): Settings file location.
# """
# with open(filepath, "w") as fp:
# pyaml.dump(content, dst=fp, indent=indent)
#
# def parse(self, filepath, content):
# """
# Parse opened settings content using YAML parser.
#
# Args:
# filepath (str): Settings object, depends from backend
# content (str): Settings content from opened file, depends from
# backend.
#
# Raises:
# boussole.exceptions.SettingsBackendError: If parser can not decode
# a valid YAML object.
#
# Returns:
# dict: Dictionnary containing parsed setting elements.
#
# """
# try:
# parsed = yaml.load(content, Loader=yaml.FullLoader)
# except yaml.YAMLError as exc:
# msg = "No YAML object could be decoded from file: {}\n{}"
# raise SettingsBackendError(msg.format(filepath, exc))
# return parsed
. Output only the next line. | "yaml": SettingsBackendYaml, |
Continue the code snippet: <|code_start|>
assert os.path.exists(os.path.join(basedir, settings_filename))
assert os.path.exists(os.path.join(basedir, "scss"))
assert os.path.exists(os.path.join(basedir, "css"))
with open(os.path.join(basedir, settings_filename), "r") as fp:
assert module.load(fp, **module_opts) == {
'SOURCES_PATH': 'scss',
'TARGET_PATH': 'css',
"LIBRARY_PATHS": [],
"OUTPUT_STYLES": "nested",
"SOURCE_COMMENTS": False,
"EXCLUDES": []
}
@pytest.mark.parametrize("name,ext,module", [
('json', 'json', json),
('yaml', 'yml', yaml),
])
def test_error(projectstarter, temp_builds_dir, name, ext, module):
"""
Raised exception caused by duplicate paths
"""
tmp_dirname = 'projectstarter_init_error_{}'.format(name)
settings_filename = "settings.{}".format(ext)
basedir = temp_builds_dir.join(tmp_dirname).strpath
os.makedirs(basedir)
<|code_end|>
. Use current file imports:
import os
import json
import yaml
import pytest
from boussole.exceptions import SettingsInvalidError
and context (classes, functions, or code) from other files:
# Path: boussole/exceptions.py
# class SettingsInvalidError(BoussoleBaseException):
# """
# Exception to be raised when a settings is detected as invalid.
# """
# pass
. Output only the next line. | with pytest.raises(SettingsInvalidError): |
Next line prediction: <|code_start|>
d = {'col1' : pd.Series([1.], index=[0]),
'col2' : pd.Series([1., 2.], index=[0, 1])}
df = pd.DataFrame(d)
#--------------------------------(CSV to dataframe)-----------------------------
#test converting a CSV file to a pandas dataframe
def test_CSVtoDataframe1():
<|code_end|>
. Use current file imports:
(import numpy as np
import pandas as pd
import pytest
import datetime
from dcs import load)
and context including class names, function names, or small code snippets from other files:
# Path: dcs/load.py
# def guessEncoding(filename):
# def convertEncoding(filename, source="utf-8", destination="utf-8", buffer=1024):
# def CSVtoDataFrame(filename, header=0, initialSkip=0, sampleSize=100, seed=None, headerIncluded=True):
# def JSONtoDataFrame(filename, sampleSize=100, seed=None):
# def XLSXtoDataFrame(filename, initialSkip=0, sampleSize=100, seed=None, headerIncluded=True):
# def dataFrameToJSON(df, rowIndexFrom=None, rowIndexTo=None, columnIndexFrom=None, columnIndexTo=None):
# def renameColumn(df, column, newName):
# def emptyStringToNan(df, columnIndex):
# def newCellValue(df, columnIndex, rowIndex, newValue):
# def removeRows(df, rowIndices):
# def removeColumns(df, columnIndices):
# def rowsWithInvalidValuesInColumns(df, columnIndices):
# def duplicateRowsInColumns(df, columnIndices):
# def outliersTrimmedMeanSd(df, columnIndices, r=2, k=0):
# def changeColumnDataType(df, column, newDataType, dateFormat=None):
. Output only the next line. | testing_df = load.CSVtoDataFrame('inputCSV.csv') |
Here is a snippet: <|code_start|>
d = {'col1' : pd.Series([1, 2, 2, 2, 3, 3, 3, 4]),
'col2' : pd.Series(['hello hello', 'hello world 2', 'world world', '1 world', 'random', 'random'])}
df = pd.DataFrame(d)
#-------------------------(test analysis for string datatype)------------------------
#test min word length
def test_string_min_length():
test_series = df['col2']
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
import pandas as pd
import pytest
import datetime
from dcs import analyze
and context from other files:
# Path: dcs/analyze.py
# def textAnalysis(series):
# def numericalAnalysis(series):
# def dateAnalysis(series):
# def genericAnalysis(series):
# def analysisForColumn(df, column):
, which may include functions, classes, or code. Output only the next line. | test_analysis = analyze.textAnalysis(test_series) |
Predict the next line for this snippet: <|code_start|>
* **useWords** : a ``bool`` flag which may be set to ``True`` to plot word frequencies instad of row value frequencies for a string column
* **cutoff** : an ``int`` specifying the top *n* values by frequency to plot, default is 50, maximum is 50
The function returns a dictionary with the following key-value pairs:
* **image** : *StringIO.StringIO* – :class:`StringIO.StringIO` object containing Base64 encoded PNG image of generated plot
Args:
df (pandas.DataFrame): data frame
columnIndices (list<int>): indices of columns to plot
options (dict, optional): options dictionary
Returns:
dict: dictionary containing image
"""
cutoff = 50
useWords = False
column = df[df.columns[columnIndex]]
if type(options) is dict:
if options.get("useWords", False) is True and not issubclass(column.dtype.type, np.datetime64) and not issubclass(column.dtype.type, np.number):
useWords = True
if options.get("cutoff", -1) > 0 and options.get("cutoff", -1) <= 50:
cutoff = int(options["cutoff"])
values = []
counts = []
if useWords:
<|code_end|>
with the help of current file imports:
import pandas as pd
import numpy as np
import matplotlib
import scipy.stats
import traceback
import base64
import dateutil.parser
from matplotlib.dates import num2date, date2num
from matplotlib import pyplot
from dcs.analyze import textAnalysis
from StringIO import StringIO # USE for production
and context from other files:
# Path: dcs/analyze.py
# def textAnalysis(series):
# """Analyzes a :class:`pandas.Series` of type ``str``, returning a dictionary containing computed statistics
#
# The returned dictionary has the following structure: {*metric*: *value*}. The calculated metrics are:
#
# * **word_count_min**: minimum number of words in each row
# * **word_count_max**: maximum number of words in each row
# * **word_count_average**: average number of words in each row
# * **word_length_min**: length of shortest word
# * **word_length_max**: length of longets word
# * **word_total**: total number of words
# * **word_mode**: most frequently occurring word
# * **word_mode_frequency**: frequency of **word_mode**
# * **word_frequencies**: a ``list<tuple<str, int>>`` object containing top 50 words (by frequency) and their counts
# * **invalid**: number of invalid values
#
# The returned dictionary will also contain the general statistical metrics returned by :func:`dcs.analyze.genericAnalysis`
#
# Args:
# series (pandas.Series): series to analyze
#
# Returns:
# dict: dictionary containing statistical metric–value pairs
# """
#
# analysis = {}
# minWordCount = float('inf')
# maxWordCount = 0
# totalWords = 0
# wordCounts = {}
# sumOfWordLengths = 0
# wordFrequencies = []
# frequencyCount = 0
#
# averageWordsPerCell = 0
# minWordLength = float('inf')
# maxWordLength = 0
#
# for row in series:
# if pd.notnull(row):
# words = str(row).split()
# numberOfWords = len(words)
# if numberOfWords < minWordCount:
# minWordCount = numberOfWords
# if numberOfWords > maxWordCount:
# maxWordCount = numberOfWords
# totalWords += numberOfWords
#
# for word in words:
# wordLength = len(word)
# wordCounts[word] = wordCounts.get(word, 0) + 1
# sumOfWordLengths += wordLength
# if wordLength < minWordLength:
# minWordLength = wordLength
# elif wordLength > maxWordLength:
# maxWordLength = wordLength
#
# averageWordLength = sumOfWordLengths / totalWords if totalWords > 0 else 0
# averageWordCount = totalWords / series.count() if series.count() > 0 else 0
#
# uniqueWords = 0
# maxCount = 0
# mostProminentWords = []
# for word, count in wordCounts.iteritems():
# uniqueWords += 1
# if count > maxCount:
# maxCount = count
# mostProminentWords = [word]
# maxCount = count
# elif count == maxCount:
# mostProminentWords.append(word)
# for w in sorted(wordCounts, key=wordCounts.get, reverse=True):
# if frequencyCount < 50:
# wordFrequencies.append((w, wordCounts[w]))
# frequencyCount += 1
# else:
# break
# #wordFrequencies = {k: wordCounts[k] for k in wordCounts.keys()[:50]}
#
#
# analysis = {}
# analysis["word_count_min"] = minWordCount
# analysis["word_count_max"] = maxWordCount
# analysis["word_count_average"] = averageWordCount
# analysis["word_length_min"] = minWordLength
# analysis["word_length_max"] = maxWordLength
# analysis["word_length_average"] = averageWordLength
# analysis["word_total"] = totalWords
# analysis["word_unique_count"] = uniqueWords
# analysis["word_mode"] = mostProminentWords
# analysis["word_mode_frequency"] = maxCount
# analysis["word_frequencies"] = wordFrequencies
# analysis["invalid"] = series.isnull().sum()
# analysis.update(genericAnalysis(series))
#
# return analysis
, which may contain function names, class names, or code. Output only the next line. | tuples = textAnalysis(column)["word_frequencies"] |
Next line prediction: <|code_start|>
d = {'col1' : pd.Series([1., 2., 3., 4.], index=[0, 1, 3, 4]),
'col2' : pd.Series([1., 2., 2., 4.], index=[0, 1, 2, 4]),
'col3' : pd.Series([1., 2., 3., 4., 5.])}
df = pd.DataFrame(d)
#-----------------------------(fillDown)-----------------------------
#fill all columns of the dataframe
def test_fill_pad1():
testing_df = df.copy()
<|code_end|>
. Use current file imports:
(import numpy as np
import pandas as pd
import pytest
from dcs import clean)
and context including class names, function names, or small code snippets from other files:
# Path: dcs/clean.py
# def fillDown(df, columnFrom, columnTo, method):
# def fillByInterpolation(df, columnIndex, method, order):
# def fillWithCustomValue(df, columnIndex, newValue):
# def fillWithAverage(df, columnIndex, metric):
# def normalize(df, columnIndex, rangeFrom=0, rangeTo=1):
# def standardize(df, columnIndex):
# def deleteRowsWithNA(df, columnIndex):
# def findReplace(df, columnIndex, toReplace, replaceWith, matchRegex):
# def generateDummies(df, columnIndex, inplace):
# def insertDuplicateColumn(df, columnIndex):
# def splitColumn(df, columnIndex, delimiter, regex=False):
# def combineColumns(df, columnHeadings, seperator="", newName="merged_column", insertIndex=0):
# def discretize(df, columnIndex, cutMode, numberOfBins):
# def executeCommand(df, command):
. Output only the next line. | clean.fillDown(testing_df, 0, 1, 'pad') |
Using the snippet: <|code_start|> 'state': c['Status'],
'created': c['Created'],
}
if 'com.myaas.expiresAt' in c['Labels']:
db.update({'expires_at': c['Labels']['com.myaas.expiresAt']})
databases.append(db)
return jsonify(databases=databases)
@app.route('/templates', methods=['get'])
def show_templates():
return jsonify(templates=list_database_templates())
@app.route('/db/<template>/<name>', methods=['get'])
def inspect_database(template, name):
logger.debug(f'requested inspect DB for: "{template}" => "{name}"')
database = get_enabled_backend().Database
try:
db = database(client, template, name)
except NonExistentDatabase:
logger.debug(f'database not found "{template}" => "{name}"')
abort(404)
if request.args.get('all'):
return jsonify(container=db.inspect())
result = dict(
database=db.database,
<|code_end|>
, determine the next line of code. You have imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context (class names, function names, or code) available:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | host=HOSTNAME, |
Given the code snippet: <|code_start|>
app = Flask(__name__)
logger = logging
logger.basicConfig(
format='%(asctime)s {:4} %(levelname)s: %(message)s'.format(os.getpid()),
<|code_end|>
, generate the next line using the imports in this file:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context (functions, classes, or occasionally code) from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | level=logging.DEBUG if DEBUG else logging.WARNING) |
Predict the next line after this snippet: <|code_start|> abort(404)
if request.args.get('all'):
return jsonify(container=db.inspect())
result = dict(
database=db.database,
host=HOSTNAME,
name=db.name,
port=db.host_port,
user=db.user,
password=db.password,
running=db.running(),
status=db.container['Status'],
created=db.container['Created'],
)
if 'com.myaas.expiresAt' in db.container['Labels']:
result.update({'expires_at': db.container['Labels']['com.myaas.expiresAt']})
return jsonify(result)
@app.route('/db/<template>/<name>', methods=['post'])
def create_database(template, name):
logger.debug(f'requested create DB from "{template}" as "{name}"')
form_ttl = request.form.get("ttl")
json_ttl = request.get_json(silent=True)
json_ttl = json_ttl.get("ttl") if json_ttl else None
ttl = form_ttl or json_ttl
<|code_end|>
using the current file's imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and any relevant context from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | ttl = int(ttl) if ttl else CONTAINER_TTL |
Here is a snippet: <|code_start|> databases=list_databases())
@app.route('/db', methods=['get'])
def show_databases():
databases = []
for c in get_myaas_containers():
db = {
'template': c['Labels']['com.myaas.template'],
'name': c['Labels']['com.myaas.instance'],
'state': c['Status'],
'created': c['Created'],
}
if 'com.myaas.expiresAt' in c['Labels']:
db.update({'expires_at': c['Labels']['com.myaas.expiresAt']})
databases.append(db)
return jsonify(databases=databases)
@app.route('/templates', methods=['get'])
def show_templates():
return jsonify(templates=list_database_templates())
@app.route('/db/<template>/<name>', methods=['get'])
def inspect_database(template, name):
logger.debug(f'requested inspect DB for: "{template}" => "{name}"')
database = get_enabled_backend().Database
try:
<|code_end|>
. Write the next line using the current file imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | db = database(client, template, name) |
Continue the code snippet: <|code_start|>
app = Flask(__name__)
logger = logging
logger.basicConfig(
format='%(asctime)s {:4} %(levelname)s: %(message)s'.format(os.getpid()),
level=logging.DEBUG if DEBUG else logging.WARNING)
@app.route('/', methods=['get'])
def hello_world():
return jsonify(
status="Service is running",
templates=list_database_templates(),
databases=list_databases())
@app.route('/db', methods=['get'])
def show_databases():
databases = []
<|code_end|>
. Use current file imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context (classes, functions, or code) from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | for c in get_myaas_containers(): |
Here is a snippet: <|code_start|> status="Service is running",
templates=list_database_templates(),
databases=list_databases())
@app.route('/db', methods=['get'])
def show_databases():
databases = []
for c in get_myaas_containers():
db = {
'template': c['Labels']['com.myaas.template'],
'name': c['Labels']['com.myaas.instance'],
'state': c['Status'],
'created': c['Created'],
}
if 'com.myaas.expiresAt' in c['Labels']:
db.update({'expires_at': c['Labels']['com.myaas.expiresAt']})
databases.append(db)
return jsonify(databases=databases)
@app.route('/templates', methods=['get'])
def show_templates():
return jsonify(templates=list_database_templates())
@app.route('/db/<template>/<name>', methods=['get'])
def inspect_database(template, name):
logger.debug(f'requested inspect DB for: "{template}" => "{name}"')
<|code_end|>
. Write the next line using the current file imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | database = get_enabled_backend().Database |
Given the following code snippet before the placeholder: <|code_start|>
app = Flask(__name__)
logger = logging
logger.basicConfig(
format='%(asctime)s {:4} %(levelname)s: %(message)s'.format(os.getpid()),
level=logging.DEBUG if DEBUG else logging.WARNING)
@app.route('/', methods=['get'])
def hello_world():
return jsonify(
status="Service is running",
templates=list_database_templates(),
<|code_end|>
, predict the next line using imports from the current file:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context including class names, function names, and sometimes code from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | databases=list_databases()) |
Here is a snippet: <|code_start|>
app = Flask(__name__)
logger = logging
logger.basicConfig(
format='%(asctime)s {:4} %(levelname)s: %(message)s'.format(os.getpid()),
level=logging.DEBUG if DEBUG else logging.WARNING)
@app.route('/', methods=['get'])
def hello_world():
return jsonify(
status="Service is running",
<|code_end|>
. Write the next line using the current file imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | templates=list_database_templates(), |
Based on the snippet: <|code_start|>
@app.route('/db', methods=['get'])
def show_databases():
databases = []
for c in get_myaas_containers():
db = {
'template': c['Labels']['com.myaas.template'],
'name': c['Labels']['com.myaas.instance'],
'state': c['Status'],
'created': c['Created'],
}
if 'com.myaas.expiresAt' in c['Labels']:
db.update({'expires_at': c['Labels']['com.myaas.expiresAt']})
databases.append(db)
return jsonify(databases=databases)
@app.route('/templates', methods=['get'])
def show_templates():
return jsonify(templates=list_database_templates())
@app.route('/db/<template>/<name>', methods=['get'])
def inspect_database(template, name):
logger.debug(f'requested inspect DB for: "{template}" => "{name}"')
database = get_enabled_backend().Database
try:
db = database(client, template, name)
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context (classes, functions, sometimes code) from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | except NonExistentDatabase: |
Predict the next line after this snippet: <|code_start|>@app.route('/db/<template>/<name>', methods=['post'])
def create_database(template, name):
logger.debug(f'requested create DB from "{template}" as "{name}"')
form_ttl = request.form.get("ttl")
json_ttl = request.get_json(silent=True)
json_ttl = json_ttl.get("ttl") if json_ttl else None
ttl = form_ttl or json_ttl
ttl = int(ttl) if ttl else CONTAINER_TTL
database_class = get_enabled_backend().Database
try:
db = database_class(client, template, name)
logger.warning(f'already exists "{template}" as "{name}"')
response = Response(status=304) # not modified
del response.headers['content-type']
return response
except NonExistentDatabase:
pass
template_class = get_enabled_backend().Template
try:
template_db = template_class(client, template)
logger.debug(f'found template "{template}"')
db = template_db.clone(name, ttl=ttl)
logger.debug(f'starting database "{template}" => "{name}"')
db.start()
except ImportInProgress:
logger.error(f'requested template "{template}" not available, import in progress')
response = jsonify(status="Database not available, content is being imported.")
response.status_code = 423
return response
<|code_end|>
using the current file's imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and any relevant context from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | except NonExistentTemplate: |
Based on the snippet: <|code_start|> result.update({'expires_at': db.container['Labels']['com.myaas.expiresAt']})
return jsonify(result)
@app.route('/db/<template>/<name>', methods=['post'])
def create_database(template, name):
logger.debug(f'requested create DB from "{template}" as "{name}"')
form_ttl = request.form.get("ttl")
json_ttl = request.get_json(silent=True)
json_ttl = json_ttl.get("ttl") if json_ttl else None
ttl = form_ttl or json_ttl
ttl = int(ttl) if ttl else CONTAINER_TTL
database_class = get_enabled_backend().Database
try:
db = database_class(client, template, name)
logger.warning(f'already exists "{template}" as "{name}"')
response = Response(status=304) # not modified
del response.headers['content-type']
return response
except NonExistentDatabase:
pass
template_class = get_enabled_backend().Template
try:
template_db = template_class(client, template)
logger.debug(f'found template "{template}"')
db = template_db.clone(name, ttl=ttl)
logger.debug(f'starting database "{template}" => "{name}"')
db.start()
<|code_end|>
, predict the immediate next line with the help of imports:
import logging
import os
from flask import Flask, Response, request, jsonify, abort
from .settings import HOSTNAME, DEBUG, CONTAINER_TTL
from .utils.container import client
from .utils.database import (get_myaas_containers, get_enabled_backend,
list_databases, list_database_templates)
from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate,
ImportInProgress)
and context (classes, functions, sometimes code) from other files:
# Path: src/myaas/settings.py
# HOSTNAME = config('MYAAS_HOSTNAME', default='localhost')
#
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# def list_databases():
# containers = filter(_is_database_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# def list_database_templates():
# containers = filter(_is_template_container, list_containers())
# return [_get_database_name(c) for c in containers]
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentDatabase(Exception):
# pass
#
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportInProgress(Exception):
# pass
. Output only the next line. | except ImportInProgress: |
Predict the next line after this snippet: <|code_start|>
def list_dump_files():
files_in_dir = os.listdir(settings.DUMP_DIR)
return filter(lambda x: x.endswith('.sql'), files_in_dir)
def indent(string, level=1):
spacing = " " * level
return spacing + string
def remove_recreate_database(template):
"""
find existing database, remove it, then recreate
"""
backend = get_enabled_backend().Template
try:
<|code_end|>
using the current file's imports:
import os
import sys
import traceback
import functools
import sys
from docker.errors import NotFound as ImageNotFound
from sentry_sdk import init, capture_message, configure_scope
from . import settings
from .utils.container import client
from .utils.database import get_enabled_backend
from .utils.filesystem import is_empty
from .utils.retry import RetryPolicy
from .backends.exceptions import NonExistentTemplate, ImportDataError
and any relevant context from other files:
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/filesystem.py
# def is_empty(path):
# return not getsize(path) > 100
#
# Path: src/myaas/utils/retry.py
# class RetryPolicy(object):
# def __init__(self, maxtries, delay=None, exceptions=(Exception,)):
# if delay is None:
# # 100ms +/- 50ms of randomized jitter
# self.delay = lambda i: 0.1 + ((random.random() - 0.5) / 10)
# else:
# self.delay = lambda i: delay
#
# self.maxtries = maxtries
# self.exceptions = exceptions
#
# def __call__(self, function):
# for i in range(0, self.maxtries):
# try:
# return function()
# except self.exceptions as error:
# last_exception = error
# time.sleep(self.delay(i))
# raise last_exception
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportDataError(Exception):
# pass
. Output only the next line. | db = backend(client, template, False) |
Predict the next line for this snippet: <|code_start|>
def list_dump_files():
files_in_dir = os.listdir(settings.DUMP_DIR)
return filter(lambda x: x.endswith('.sql'), files_in_dir)
def indent(string, level=1):
spacing = " " * level
return spacing + string
def remove_recreate_database(template):
"""
find existing database, remove it, then recreate
"""
<|code_end|>
with the help of current file imports:
import os
import sys
import traceback
import functools
import sys
from docker.errors import NotFound as ImageNotFound
from sentry_sdk import init, capture_message, configure_scope
from . import settings
from .utils.container import client
from .utils.database import get_enabled_backend
from .utils.filesystem import is_empty
from .utils.retry import RetryPolicy
from .backends.exceptions import NonExistentTemplate, ImportDataError
and context from other files:
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/filesystem.py
# def is_empty(path):
# return not getsize(path) > 100
#
# Path: src/myaas/utils/retry.py
# class RetryPolicy(object):
# def __init__(self, maxtries, delay=None, exceptions=(Exception,)):
# if delay is None:
# # 100ms +/- 50ms of randomized jitter
# self.delay = lambda i: 0.1 + ((random.random() - 0.5) / 10)
# else:
# self.delay = lambda i: delay
#
# self.maxtries = maxtries
# self.exceptions = exceptions
#
# def __call__(self, function):
# for i in range(0, self.maxtries):
# try:
# return function()
# except self.exceptions as error:
# last_exception = error
# time.sleep(self.delay(i))
# raise last_exception
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportDataError(Exception):
# pass
, which may contain function names, class names, or code. Output only the next line. | backend = get_enabled_backend().Template |
Here is a snippet: <|code_start|>
def start_template_database(db_name):
print(f"- Creating database {db_name}")
db = remove_recreate_database(db_name)
print(indent("* Starting database..."))
db.start()
print(indent("* Started"))
print(indent("* Waiting for database to accept connections"))
try:
db.wait_for_service_listening()
return db
except Exception as e:
print(indent(
f"* Max time waiting for database exceeded"
", retrying..."
))
db.stop()
db.restore_backup()
print_exception()
raise e
def main():
dumps = list_dump_files()
for dump in dumps:
db_name,_ = os.path.splitext(dump)
sql_file = os.path.join(settings.DUMP_DIR, dump)
<|code_end|>
. Write the next line using the current file imports:
import os
import sys
import traceback
import functools
import sys
from docker.errors import NotFound as ImageNotFound
from sentry_sdk import init, capture_message, configure_scope
from . import settings
from .utils.container import client
from .utils.database import get_enabled_backend
from .utils.filesystem import is_empty
from .utils.retry import RetryPolicy
from .backends.exceptions import NonExistentTemplate, ImportDataError
and context from other files:
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/filesystem.py
# def is_empty(path):
# return not getsize(path) > 100
#
# Path: src/myaas/utils/retry.py
# class RetryPolicy(object):
# def __init__(self, maxtries, delay=None, exceptions=(Exception,)):
# if delay is None:
# # 100ms +/- 50ms of randomized jitter
# self.delay = lambda i: 0.1 + ((random.random() - 0.5) / 10)
# else:
# self.delay = lambda i: delay
#
# self.maxtries = maxtries
# self.exceptions = exceptions
#
# def __call__(self, function):
# for i in range(0, self.maxtries):
# try:
# return function()
# except self.exceptions as error:
# last_exception = error
# time.sleep(self.delay(i))
# raise last_exception
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportDataError(Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | if is_empty(sql_file): |
Next line prediction: <|code_start|>
print(indent("* Starting database..."))
db.start()
print(indent("* Started"))
print(indent("* Waiting for database to accept connections"))
try:
db.wait_for_service_listening()
return db
except Exception as e:
print(indent(
f"* Max time waiting for database exceeded"
", retrying..."
))
db.stop()
db.restore_backup()
print_exception()
raise e
def main():
dumps = list_dump_files()
for dump in dumps:
db_name,_ = os.path.splitext(dump)
sql_file = os.path.join(settings.DUMP_DIR, dump)
if is_empty(sql_file):
print(f"- Skipping: {sql_file} is empty")
continue
start_db_func = functools.partial(start_template_database, db_name)
<|code_end|>
. Use current file imports:
(import os
import sys
import traceback
import functools
import sys
from docker.errors import NotFound as ImageNotFound
from sentry_sdk import init, capture_message, configure_scope
from . import settings
from .utils.container import client
from .utils.database import get_enabled_backend
from .utils.filesystem import is_empty
from .utils.retry import RetryPolicy
from .backends.exceptions import NonExistentTemplate, ImportDataError)
and context including class names, function names, or small code snippets from other files:
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/filesystem.py
# def is_empty(path):
# return not getsize(path) > 100
#
# Path: src/myaas/utils/retry.py
# class RetryPolicy(object):
# def __init__(self, maxtries, delay=None, exceptions=(Exception,)):
# if delay is None:
# # 100ms +/- 50ms of randomized jitter
# self.delay = lambda i: 0.1 + ((random.random() - 0.5) / 10)
# else:
# self.delay = lambda i: delay
#
# self.maxtries = maxtries
# self.exceptions = exceptions
#
# def __call__(self, function):
# for i in range(0, self.maxtries):
# try:
# return function()
# except self.exceptions as error:
# last_exception = error
# time.sleep(self.delay(i))
# raise last_exception
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportDataError(Exception):
# pass
. Output only the next line. | db = RetryPolicy(5, delay=2)(start_db_func) |
Based on the snippet: <|code_start|>
def list_dump_files():
files_in_dir = os.listdir(settings.DUMP_DIR)
return filter(lambda x: x.endswith('.sql'), files_in_dir)
def indent(string, level=1):
spacing = " " * level
return spacing + string
def remove_recreate_database(template):
"""
find existing database, remove it, then recreate
"""
backend = get_enabled_backend().Template
try:
db = backend(client, template, False)
if db.running():
db.stop()
db.do_backup()
db.remove()
<|code_end|>
, predict the immediate next line with the help of imports:
import os
import sys
import traceback
import functools
import sys
from docker.errors import NotFound as ImageNotFound
from sentry_sdk import init, capture_message, configure_scope
from . import settings
from .utils.container import client
from .utils.database import get_enabled_backend
from .utils.filesystem import is_empty
from .utils.retry import RetryPolicy
from .backends.exceptions import NonExistentTemplate, ImportDataError
and context (classes, functions, sometimes code) from other files:
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/filesystem.py
# def is_empty(path):
# return not getsize(path) > 100
#
# Path: src/myaas/utils/retry.py
# class RetryPolicy(object):
# def __init__(self, maxtries, delay=None, exceptions=(Exception,)):
# if delay is None:
# # 100ms +/- 50ms of randomized jitter
# self.delay = lambda i: 0.1 + ((random.random() - 0.5) / 10)
# else:
# self.delay = lambda i: delay
#
# self.maxtries = maxtries
# self.exceptions = exceptions
#
# def __call__(self, function):
# for i in range(0, self.maxtries):
# try:
# return function()
# except self.exceptions as error:
# last_exception = error
# time.sleep(self.delay(i))
# raise last_exception
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportDataError(Exception):
# pass
. Output only the next line. | except NonExistentTemplate: |
Continue the code snippet: <|code_start|> return db
except Exception as e:
print(indent(
f"* Max time waiting for database exceeded"
", retrying..."
))
db.stop()
db.restore_backup()
print_exception()
raise e
def main():
dumps = list_dump_files()
for dump in dumps:
db_name,_ = os.path.splitext(dump)
sql_file = os.path.join(settings.DUMP_DIR, dump)
if is_empty(sql_file):
print(f"- Skipping: {sql_file} is empty")
continue
start_db_func = functools.partial(start_template_database, db_name)
db = RetryPolicy(5, delay=2)(start_db_func)
if not db:
continue # skip to next database to import
print(indent("* Importing data..."))
try:
db.import_data(sql_file)
<|code_end|>
. Use current file imports:
import os
import sys
import traceback
import functools
import sys
from docker.errors import NotFound as ImageNotFound
from sentry_sdk import init, capture_message, configure_scope
from . import settings
from .utils.container import client
from .utils.database import get_enabled_backend
from .utils.filesystem import is_empty
from .utils.retry import RetryPolicy
from .backends.exceptions import NonExistentTemplate, ImportDataError
and context (classes, functions, or code) from other files:
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
#
# Path: src/myaas/utils/database.py
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/filesystem.py
# def is_empty(path):
# return not getsize(path) > 100
#
# Path: src/myaas/utils/retry.py
# class RetryPolicy(object):
# def __init__(self, maxtries, delay=None, exceptions=(Exception,)):
# if delay is None:
# # 100ms +/- 50ms of randomized jitter
# self.delay = lambda i: 0.1 + ((random.random() - 0.5) / 10)
# else:
# self.delay = lambda i: delay
#
# self.maxtries = maxtries
# self.exceptions = exceptions
#
# def __call__(self, function):
# for i in range(0, self.maxtries):
# try:
# return function()
# except self.exceptions as error:
# last_exception = error
# time.sleep(self.delay(i))
# raise last_exception
#
# Path: src/myaas/backends/exceptions.py
# class NonExistentTemplate(NonExistentDatabase):
# pass
#
# class ImportDataError(Exception):
# pass
. Output only the next line. | except (ImportDataError, Exception) as e: |
Given the code snippet: <|code_start|>
def _is_dead(self, container):
return container['State'] == 'exited'
def _is_unhealthy(self, container):
return 'unhealthy' in container['Status']
def remove_database(container):
template = container['Labels']['com.myaas.template']
name = container['Labels']['com.myaas.instance']
try:
logger.info(f'removing {name}')
backend = get_enabled_backend().Database
backend(client, template, name).remove()
except Exception as e:
logger.exception(
f"Failed to remove database {template} {name}")
@click.command()
@click.option('-e', '--expired', is_flag=True, default=False, help='Remove expired containers.')
@click.option('-d', '--dead', is_flag=True, default=False, help='Remove exited containers.')
@click.option('-u', '--unhealthy', is_flag=True, default=False, help='Remove unhealthy containers.')
@click.option('--dry-run', is_flag=True, default=False, help='Only print name of containers that would be removed and exit.')
def cleanup(expired, dead, unhealthy, dry_run):
if not (expired or dead or unhealthy):
raise click.UsageError("at least one filter must be enabled, use --help for more information")
cf = ContainerFilter(expired, dead, unhealthy)
<|code_end|>
, generate the next line using the imports in this file:
import signal
import logging
import click
from time import sleep
from datetime import datetime
from .settings import DEBUG
from .utils.database import get_myaas_containers, get_enabled_backend
from .utils.container import client
and context (functions, classes, or occasionally code) from other files:
# Path: src/myaas/settings.py
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
. Output only the next line. | databases = cf.filter(get_myaas_containers()) |
Predict the next line after this snippet: <|code_start|> logger.info("%s is dead, queued for deletion", name)
return True
if self._unhealthy and self._is_unhealthy(container):
logger.info("%s is unhealthy, queued for deletion", name)
return True
return False
def _is_expired(self, container):
if 'com.myaas.expiresAt' in container['Labels']:
expiry_ts = round(float(container['Labels']['com.myaas.expiresAt'])) # noqa
else:
# asume a 24 hours TTL
expiry_ts = int(container['Created']) + 86400
return datetime.utcnow() >= datetime.utcfromtimestamp(expiry_ts)
def _is_dead(self, container):
return container['State'] == 'exited'
def _is_unhealthy(self, container):
return 'unhealthy' in container['Status']
def remove_database(container):
template = container['Labels']['com.myaas.template']
name = container['Labels']['com.myaas.instance']
try:
logger.info(f'removing {name}')
<|code_end|>
using the current file's imports:
import signal
import logging
import click
from time import sleep
from datetime import datetime
from .settings import DEBUG
from .utils.database import get_myaas_containers, get_enabled_backend
from .utils.container import client
and any relevant context from other files:
# Path: src/myaas/settings.py
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
. Output only the next line. | backend = get_enabled_backend().Database |
Predict the next line for this snippet: <|code_start|> return True
if self._unhealthy and self._is_unhealthy(container):
logger.info("%s is unhealthy, queued for deletion", name)
return True
return False
def _is_expired(self, container):
if 'com.myaas.expiresAt' in container['Labels']:
expiry_ts = round(float(container['Labels']['com.myaas.expiresAt'])) # noqa
else:
# asume a 24 hours TTL
expiry_ts = int(container['Created']) + 86400
return datetime.utcnow() >= datetime.utcfromtimestamp(expiry_ts)
def _is_dead(self, container):
return container['State'] == 'exited'
def _is_unhealthy(self, container):
return 'unhealthy' in container['Status']
def remove_database(container):
template = container['Labels']['com.myaas.template']
name = container['Labels']['com.myaas.instance']
try:
logger.info(f'removing {name}')
backend = get_enabled_backend().Database
<|code_end|>
with the help of current file imports:
import signal
import logging
import click
from time import sleep
from datetime import datetime
from .settings import DEBUG
from .utils.database import get_myaas_containers, get_enabled_backend
from .utils.container import client
and context from other files:
# Path: src/myaas/settings.py
# DEBUG = config('MYAAS_DEBUG', default=False, cast=bool)
#
# Path: src/myaas/utils/database.py
# def get_myaas_containers():
# return filter(_is_database_container, list_containers())
#
# def get_enabled_backend():
# return importlib.import_module(settings.BACKEND)
#
# Path: src/myaas/utils/container.py
# def find_container(name):
# def list_containers(all=True):
# def translate_host_basedir(path):
# def get_random_cpuset(cores_to_assign):
# def get_mapped_cpuset():
, which may contain function names, class names, or code. Output only the next line. | backend(client, template, name).remove() |
Next line prediction: <|code_start|>
logger = logging.getLogger(__name__)
def get_enabled_backend():
return importlib.import_module(settings.BACKEND)
def get_myaas_containers():
<|code_end|>
. Use current file imports:
(import logging
import importlib
from .. import settings
from .container import list_containers)
and context including class names, function names, or small code snippets from other files:
# Path: src/myaas/utils/container.py
# def list_containers(all=True):
# client = docker.Client()
# return client.containers(all=all)
. Output only the next line. | return filter(_is_database_container, list_containers()) |
Next line prediction: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import
current_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(current_path)
root = os.path.join(current_path, '..')
sys.path.append(root)
LOG = logging
def run_worker(name=None, logging_level='error',
concurrency='100', pool='gevent',
queue='default', sort_time_limit='10'):
if not name:
name = '%s_%s_%d' % (socket.getfqdn(), queue, randint(10, 99))
argv = ['worker',
'-n', name, # node name
'-l', logging_level,
'-P', pool, # concurrency pool: pre_fork, thread, eventlet, gevent
'-c', concurrency,
'-Q', queue,
#'--soft-time-limit', sort_time_limit,
'-D',
'-f', os.path.join(root, 'logs/%s.log' % name),
'--pidfile=%s' % os.path.join(root, 'pids/%s.pid' % name)]
<|code_end|>
. Use current file imports:
(import sys
import os
import socket
import logging
from celeryapp.crawl_tasks import app
from random import randint)
and context including class names, function names, or small code snippets from other files:
# Path: celeryapp/crawl_tasks.py
# def request(self, dict_item, **kwargs):
# def request_priority(self, dict_item, **kwargs):
# def parse(self, dict_item, **kwargs):
# def parse_priority(self, dict_item, **kwargs):
# def pipeline(self, dict_item, **kwargs):
# def schedule(self, tasks=None, check_task=False, save_filter=False):
# def crawl_synchronous(self, dict_item, **kwargs):
. Output only the next line. | app.worker_main(argv) |
Given the code snippet: <|code_start|>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, '..'))
sys.path.append(root_path)
# chagne work dir
os.chdir(root_path)
<|code_end|>
, generate the next line using the imports in this file:
import os
import sys
import config
import celeryconfig
import logging
import gevent
import json
import time
from celeryapp.crawl_tasks import app
from celeryapp.celery import get_message_queue_size
from celery import group
from gevent import monkey
from gevent.pool import Pool
from gevent.lock import Semaphore
and context (functions, classes, or occasionally code) from other files:
# Path: celeryapp/crawl_tasks.py
# def request(self, dict_item, **kwargs):
# def request_priority(self, dict_item, **kwargs):
# def parse(self, dict_item, **kwargs):
# def parse_priority(self, dict_item, **kwargs):
# def pipeline(self, dict_item, **kwargs):
# def schedule(self, tasks=None, check_task=False, save_filter=False):
# def crawl_synchronous(self, dict_item, **kwargs):
#
# Path: celeryapp/celery.py
# def get_message_queue_size(queue_name):
# client = redis.Redis(IP.findall(config.broker)[0])
# length = client.llen(queue_name)
# return length
. Output only the next line. | tasks = app.tasks |
Given snippet: <|code_start|>
for crawler in celeryconfig.crawlers:
filepath = os.path.join(root_path, 'data/%s' % crawler.get('scheduler').filter_file_name)
filepath = os.path.abspath(filepath)
if not os.path.exists(filepath):
#tasks[crawler['name'] + '.schedule'].delay()
for task in tasks[crawler['name']+'.schedule'].scheduler.init_generator():
if task.get('priority', None):
#app.tasks[self.crawler_name+'.request_priority'].delay(task)
tasks[crawler['name']+'.request_priority'].apply_async((task, ), compression='zlib')
else:
#tasks[crawler['name']+'.request'].delay(task)
tasks[crawler['name']+'.request'].apply_async((task, ), compression='zlib')
#for dict_item in crawler['scheduler'].generator():
# group(tasks[crawler['name'] + '.request'].s(dict_item)
# | tasks[crawler['name'] + '.parse'].s()
# | tasks[crawler['name'] + '.pipeline'].s()).delay()
monkey.patch_all()
scheduel_lock = Semaphore()
filtered_newtasks = []
def schedule():
global filtered_newtasks
while 1:
try:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import os
import sys
import config
import celeryconfig
import logging
import gevent
import json
import time
from celeryapp.crawl_tasks import app
from celeryapp.celery import get_message_queue_size
from celery import group
from gevent import monkey
from gevent.pool import Pool
from gevent.lock import Semaphore
and context:
# Path: celeryapp/crawl_tasks.py
# def request(self, dict_item, **kwargs):
# def request_priority(self, dict_item, **kwargs):
# def parse(self, dict_item, **kwargs):
# def parse_priority(self, dict_item, **kwargs):
# def pipeline(self, dict_item, **kwargs):
# def schedule(self, tasks=None, check_task=False, save_filter=False):
# def crawl_synchronous(self, dict_item, **kwargs):
#
# Path: celeryapp/celery.py
# def get_message_queue_size(queue_name):
# client = redis.Redis(IP.findall(config.broker)[0])
# length = client.llen(queue_name)
# return length
which might include code, classes, or functions. Output only the next line. | if (get_message_queue_size('parse') < config.max_task_queue_size * 2 and |
Continue the code snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import
# registe all crawlers's task
for crawler in celeryconfig.crawlers:
# set the crawler_name so we know the namek when parser add new task
# and the know call corresponding registed task
crawler.get('requestHandler').crawler_name = crawler.get('name')
crawler.get('parseHandler').crawler_name = crawler.get('name')
crawler.get('scheduler').crawler_name = crawler.get('name')
crawler.get('pipeline').crawler_name = crawler.get('name')
# -.-.-.-.-.-.-.-.-.-.-.-.-.-.request task-.-.-.-.-.-.-.-.-.-.-.-.-.
<|code_end|>
. Use current file imports:
from celeryapp.celery import app
from celeryapp.celery import get_message_queue_size
from celery import group
from celery.exceptions import Retry
import datetime
import config
import celeryconfig
and context (classes, functions, or code) from other files:
# Path: celeryapp/celery.py
# def save_state(sender=None, conf=None, **kwargs):
# def get_message_queue_size(queue_name):
# IP = re.compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d{1,6})?')
#
# Path: celeryapp/celery.py
# def get_message_queue_size(queue_name):
# client = redis.Redis(IP.findall(config.broker)[0])
# length = client.llen(queue_name)
# return length
. Output only the next line. | @app.task(name=crawler.get('name')+'.request', |
Here is a snippet: <|code_start|> self.scheduler.save_filter()
return
# initcail tasks:
if not tasks and not check_task:
for task in self.scheduler.init_generator():
if task.get('priority', None):
#app.tasks[self.crawler_name+'.request_priority'].delay(task)
app.tasks[self.crawler_name+'.request_priority'].apply_async((task, ), compression='zlib')
else:
#app.tasks[self.crawler_name+'.request'].delay(task)
app.tasks[self.crawler_name+'.request'].apply_async((task, ), compression='zlib')
#group(app.tasks[self.crawler_name + '.request'].s(task)
# | app.tasks[self.crawler_name + '.parse'].s()
# | app.tasks[self.crawler_name + '.pipeline'].s()
# ).delay()
#app.tasks[self.crawler_name+'.schedule'].apply_async(
# args=[],
# kwargs = {
# 'check_task': True,
# },
# eta=datetime.datetime.now())
# add new tasks, call by task.apply
elif tasks and not check_task:
self.scheduler.add_new_task(tasks)
#app.tasks[self.crawler_name+'.new_task'].delay(task)
# schedule task
elif check_task:
#i = app.control.inspect()
timedelta = config.new_task_check_interval
<|code_end|>
. Write the next line using the current file imports:
from celeryapp.celery import app
from celeryapp.celery import get_message_queue_size
from celery import group
from celery.exceptions import Retry
import datetime
import config
import celeryconfig
and context from other files:
# Path: celeryapp/celery.py
# def save_state(sender=None, conf=None, **kwargs):
# def get_message_queue_size(queue_name):
# IP = re.compile(r'\d{1,3}(?:\.\d{1,3}){3}(?::\d{1,6})?')
#
# Path: celeryapp/celery.py
# def get_message_queue_size(queue_name):
# client = redis.Redis(IP.findall(config.broker)[0])
# length = client.llen(queue_name)
# return length
, which may include functions, classes, or code. Output only the next line. | if (get_message_queue_size('request') < config.max_task_queue_size): |
Using the snippet: <|code_start|>__author__ = 'Stuart Gordon Reid'
__email__ = 'stuartgordonreid@gmail.com'
__website__ = 'http://www.stuartreid.co.za'
"""
File description
"""
__author__ = 'stuartreid'
<|code_end|>
, determine the next line of code. You have imports:
from Optimizers.GeneticAlgorithm import GeneticAlgorithm
from Optimizers.Solution import Solution
and context (class names, function names, or code) available:
# Path: Optimizers/GeneticAlgorithm.py
# class GeneticAlgorithm(Optimizer):
# """
# This class contains a generic implementation of the Genetic Algorithm. It uses Selection, Mutation, and Crossover
# objects to evolve a population of candidate solutions to a particular optimization problem.
# """
#
# def __init__(self, problem, parameters=None):
# """
# Initialization method for the Genetic Algorithm
# :param problem:
# :param parameters[0]: Crossover rate (% of the population which is used in crossover)
# :param parameters[1]: Crossover strategy e.g. sexual, asexual
# :param parameters[2]: Mutation rate (% of the population which is mutated)
# :param parameters[3]: Mutator strategy e.g. Brownian Individuals
# :param parameters[4]: Selection rate (% of the population to be culled)
# :param parameters[5]: Selection strategy e.g. Rank Selection vs. Random Selection
# :param parameters[6]: Population size # individuals.
# """
# if parameters is None:
# parameters = [0.5, "sexual", 0.05, "brownian", 0.5, "rank", 50]
# super(GeneticAlgorithm, self).__init__(problem, parameters)
# self.population = []
# for i in range(parameters[6]):
# random_solution = random.sample(xrange(problem.lower_bound, problem.upper_bound), problem.dimension)
# self.population.append(Individual(random_solution, problem))
#
# def optimize(self, iterations=1000, stopping=True):
# pass
#
# def select(self):
# pass
#
# def crossover(self):
# pass
#
# def mutate(self):
# pass
#
# Path: Optimizers/Solution.py
# class Solution(object):
# solution = []
#
# def __init__(self, solution, problem):
# """
# Abstract initialization method for a solution to some optimization function
# :param solution: a numpy array (much faster than lists)
# """
# self.solution = solution
# self.problem = problem
# return
#
# def __len__(self):
# """
# Overload of the len operator for the Solution class
# :rtype : Sized?
# """
# return len(self.solution)
#
# def update(self, solution):
# """
# This method is used for updating a solution
# """
# self.solution = solution
#
# def get(self):
# """
# This method is used to retrieve the numpy array for direct manipulation
# """
# return self.solution
#
# def evaluate(self):
# return self.problem.evaluate(self.solution)
#
# def __gt__(self, other):
# assert isinstance(other, Solution)
# if self.problem.optimization is "min":
# return self.evaluate() < other.evaluate()
# elif self.problem.optimization is "max":
# return self.evaluate() > other.evaluate()
#
# def deep_copy(self):
# copy = Solution(None, self.problem)
# copy.solution = []
# for i in range(len(self.solution)):
# copy.solution.append(self.solution[i])
# return copy
. Output only the next line. | class GeneticProgramming(GeneticAlgorithm): |
Given the following code snippet before the placeholder: <|code_start|>__author__ = 'Stuart Gordon Reid'
__email__ = 'stuartgordonreid@gmail.com'
__website__ = 'http://www.stuartreid.co.za'
"""
File description
"""
<|code_end|>
, predict the next line using imports from the current file:
import random
from Optimizers.Optimizer import Optimizer
from Optimizers.Solution import Solution
and context including class names, function names, and sometimes code from other files:
# Path: Optimizers/Optimizer.py
# class Optimizer(object):
# __metaclass__ = abc.ABCMeta
#
# @abc.abstractmethod
# def __init__(self, problem, parameters):
# """
# This method initialized the Optimizer with an objective function and a set of parameters
# :param problem: this is the problem being optimized
# :param parameters: set of algorithm control parameters
# """
# self.problem = problem
# self.parameters = parameters
# return
#
# @abc.abstractmethod
# def optimize(self, iterations=1000, stopping=True):
# """
# This is the generic optimization method to be overloaded by each optimizer
# :param stopping: whether or not the algorithm should use early stopping
# :param iterations: the number of iterations to optimize for, default is 1000
# """
# return
#
# def fitness(self, candidate):
# """
# This is the generic optimization method to be overloaded by each optimizer
# :param candidate:
# """
# assert isinstance(self.problem, Problems.Function)
# assert isinstance(candidate, Solution)
# return self.problem.evaluate(candidate)
#
# Path: Optimizers/Solution.py
# class Solution(object):
# solution = []
#
# def __init__(self, solution, problem):
# """
# Abstract initialization method for a solution to some optimization function
# :param solution: a numpy array (much faster than lists)
# """
# self.solution = solution
# self.problem = problem
# return
#
# def __len__(self):
# """
# Overload of the len operator for the Solution class
# :rtype : Sized?
# """
# return len(self.solution)
#
# def update(self, solution):
# """
# This method is used for updating a solution
# """
# self.solution = solution
#
# def get(self):
# """
# This method is used to retrieve the numpy array for direct manipulation
# """
# return self.solution
#
# def evaluate(self):
# return self.problem.evaluate(self.solution)
#
# def __gt__(self, other):
# assert isinstance(other, Solution)
# if self.problem.optimization is "min":
# return self.evaluate() < other.evaluate()
# elif self.problem.optimization is "max":
# return self.evaluate() > other.evaluate()
#
# def deep_copy(self):
# copy = Solution(None, self.problem)
# copy.solution = []
# for i in range(len(self.solution)):
# copy.solution.append(self.solution[i])
# return copy
. Output only the next line. | class GeneticAlgorithm(Optimizer): |
Continue the code snippet: <|code_start|> :param problem:
:param parameters[0]: Crossover rate (% of the population which is used in crossover)
:param parameters[1]: Crossover strategy e.g. sexual, asexual
:param parameters[2]: Mutation rate (% of the population which is mutated)
:param parameters[3]: Mutator strategy e.g. Brownian Individuals
:param parameters[4]: Selection rate (% of the population to be culled)
:param parameters[5]: Selection strategy e.g. Rank Selection vs. Random Selection
:param parameters[6]: Population size # individuals.
"""
if parameters is None:
parameters = [0.5, "sexual", 0.05, "brownian", 0.5, "rank", 50]
super(GeneticAlgorithm, self).__init__(problem, parameters)
self.population = []
for i in range(parameters[6]):
random_solution = random.sample(xrange(problem.lower_bound, problem.upper_bound), problem.dimension)
self.population.append(Individual(random_solution, problem))
def optimize(self, iterations=1000, stopping=True):
pass
def select(self):
pass
def crossover(self):
pass
def mutate(self):
pass
<|code_end|>
. Use current file imports:
import random
from Optimizers.Optimizer import Optimizer
from Optimizers.Solution import Solution
and context (classes, functions, or code) from other files:
# Path: Optimizers/Optimizer.py
# class Optimizer(object):
# __metaclass__ = abc.ABCMeta
#
# @abc.abstractmethod
# def __init__(self, problem, parameters):
# """
# This method initialized the Optimizer with an objective function and a set of parameters
# :param problem: this is the problem being optimized
# :param parameters: set of algorithm control parameters
# """
# self.problem = problem
# self.parameters = parameters
# return
#
# @abc.abstractmethod
# def optimize(self, iterations=1000, stopping=True):
# """
# This is the generic optimization method to be overloaded by each optimizer
# :param stopping: whether or not the algorithm should use early stopping
# :param iterations: the number of iterations to optimize for, default is 1000
# """
# return
#
# def fitness(self, candidate):
# """
# This is the generic optimization method to be overloaded by each optimizer
# :param candidate:
# """
# assert isinstance(self.problem, Problems.Function)
# assert isinstance(candidate, Solution)
# return self.problem.evaluate(candidate)
#
# Path: Optimizers/Solution.py
# class Solution(object):
# solution = []
#
# def __init__(self, solution, problem):
# """
# Abstract initialization method for a solution to some optimization function
# :param solution: a numpy array (much faster than lists)
# """
# self.solution = solution
# self.problem = problem
# return
#
# def __len__(self):
# """
# Overload of the len operator for the Solution class
# :rtype : Sized?
# """
# return len(self.solution)
#
# def update(self, solution):
# """
# This method is used for updating a solution
# """
# self.solution = solution
#
# def get(self):
# """
# This method is used to retrieve the numpy array for direct manipulation
# """
# return self.solution
#
# def evaluate(self):
# return self.problem.evaluate(self.solution)
#
# def __gt__(self, other):
# assert isinstance(other, Solution)
# if self.problem.optimization is "min":
# return self.evaluate() < other.evaluate()
# elif self.problem.optimization is "max":
# return self.evaluate() > other.evaluate()
#
# def deep_copy(self):
# copy = Solution(None, self.problem)
# copy.solution = []
# for i in range(len(self.solution)):
# copy.solution.append(self.solution[i])
# return copy
. Output only the next line. | class Individual(Solution): |
Given snippet: <|code_start|>__author__ = 'Stuart Gordon Reid'
__email__ = 'stuartgordonreid@gmail.com'
__website__ = 'http://www.stuartreid.co.za'
"""
http://www-optima.amp.i.kyoto-u.ac.jp/member/student/hedar/Hedar_files/TestGO_files/Page295.htm
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from Problems.Problem import Problem
and context:
# Path: Problems/Problem.py
# class Problem(object):
# __metaclass__ = abc.ABCMeta
# dimension = 0
# upper_bound = float("+inf")
# lower_bound = float("-inf")
# optimization = "min"
#
# @abc.abstractmethod
# def __init__(self, dimension, upper_bound=float("+inf"), lower_bound=float("-inf"), optimization="min"):
# """
# This method should be overloaded and used to initialize the problem
# :param dimension: the dimension on the problem
# """
# self.dimension = dimension
# self.upper_bound = upper_bound
# self.lower_bound = lower_bound
# self.optimization = optimization
# return
#
# @abc.abstractmethod
# def evaluate(self, candidate):
# """
# This method should be overloaded and used to evaluate a candidate solution to the problem
# :param candidate: the candidate solution to the problem
# """
# assert isinstance(candidate, Optimizers.Solution)
# return
which might include code, classes, or functions. Output only the next line. | class Ackley(Problem): |
Based on the snippet: <|code_start|>
def __init__(self, stock_tickers=None, start_date=None, end_date=None):
self.stock_tickers = stock_tickers
self.start_date = self.get_date(start_date)
self.end_date = self.get_date(end_date)
def download_csv(self):
for ticker in self.stock_tickers:
link = "http://finance.google.com/finance/historical?q=JSE:" + ticker
if self.start_date is not None:
link += "&startdate=" + self.start_date
if self.end_date is not None:
link += "&enddate=" + self.end_date
link += "&output=csv"
print "Download link: " + link
# TODO: Check this method when I get home
try:
urlretrieve(link, "Download.csv")
except:
link.replace("q=JSE:", "q=")
urlretrieve(link, "Download.csv")
@staticmethod
def get_date(date):
if date is None:
return None
assert isinstance(date, str)
string_values = date.split('-')
<|code_end|>
, predict the immediate next line with the help of imports:
from urllib import urlretrieve
from Helpers.Switch import switch
and context (classes, functions, sometimes code) from other files:
# Path: Helpers/Switch.py
# class switch(object):
# """
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
# """
# def __init__(self, value):
# self.value = value
# self.fall = False
#
# def __iter__(self):
# """Return the match method once, then stop"""
# yield self.match
# raise StopIteration
#
# def match(self, *args):
# """Indicate whether or not to enter a case suite"""
# if self.fall or not args:
# return True
# elif self.value in args: # changed for v1.5, see below
# self.fall = True
# return True
# else:
# return False
. Output only the next line. | for case in switch(string_values[1]): |
Based on the snippet: <|code_start|>__author__ = 'Stuart Gordon Reid'
__email__ = 'stuartgordonreid@gmail.com'
__website__ = 'http://www.stuartreid.co.za'
"""
File description
"""
<|code_end|>
, predict the immediate next line with the help of imports:
from Problems.Problem import Problem
and context (classes, functions, sometimes code) from other files:
# Path: Problems/Problem.py
# class Problem(object):
# __metaclass__ = abc.ABCMeta
# dimension = 0
# upper_bound = float("+inf")
# lower_bound = float("-inf")
# optimization = "min"
#
# @abc.abstractmethod
# def __init__(self, dimension, upper_bound=float("+inf"), lower_bound=float("-inf"), optimization="min"):
# """
# This method should be overloaded and used to initialize the problem
# :param dimension: the dimension on the problem
# """
# self.dimension = dimension
# self.upper_bound = upper_bound
# self.lower_bound = lower_bound
# self.optimization = optimization
# return
#
# @abc.abstractmethod
# def evaluate(self, candidate):
# """
# This method should be overloaded and used to evaluate a candidate solution to the problem
# :param candidate: the candidate solution to the problem
# """
# assert isinstance(candidate, Optimizers.Solution)
# return
. Output only the next line. | class Cigar(Problem): |
Predict the next line for this snippet: <|code_start|>
__author__ = 'Stuart Gordon Reid'
__email__ = 'stuartgordonreid@gmail.com'
__website__ = 'http://www.stuartreid.co.za'
"""
File description
"""
<|code_end|>
with the help of current file imports:
from Sorters.Sorter import Sorter
import random
and context from other files:
# Path: Sorters/Sorter.py
# class Sorter():
# """
# This abstract base class contains code for a generic sorter. A sorter is an algorithm which receives some form of
# data and sorts it in a particular order
# """
# __metaclass__ = abc.ABCMeta
#
# @abc.abstractmethod
# def __init__(self, data, ordering="ascending"):
# """
# Initialization method for a genetic sorter
# :param data: the list of data to be sorted by the algorithm
# :param ordering: the order in which to sort that data
# """
# self.data = data
# self.ordering = ordering
#
# @abc.abstractmethod
# def sort(self):
# """
# Abstract sort method to be overloaded by sorting algorithms
# :return: returns the sorted data (list)
# """
# return self.data
, which may contain function names, class names, or code. Output only the next line. | class BitonicSorter(Sorter): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.