Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Predict the next line for this snippet: <|code_start|>
class CatalogueIntegrationTest (unittest.TestCase):
def setUp(self):
self._data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_load(self):
catalogue = tacl.Catalogue()
catalogue.load(os.path.join(self._data_dir, 'catalogue2.txt'))
expected_dict = {'T1': 'A', 'T2': 'B', 'T3': 'C', 'T5': 'A'}
self.assertEqual(catalogue, expected_dict)
def test_load_relabelled_text(self):
catalogue = tacl.Catalogue()
path = os.path.join(self._data_dir, 'catalogue_relabel.txt')
<|code_end|>
with the help of current file imports:
import os.path
import tempfile
import unittest
import tacl
from tacl.exceptions import MalformedCatalogueError
and context from other files:
# Path: tacl/exceptions.py
# class MalformedCatalogueError(TACLError):
#
# pass
, which may contain function names, class names, or code. Output only the next line. | self.assertRaises(MalformedCatalogueError, catalogue.load, path) |
Given the following code snippet before the placeholder: <|code_start|> """
return self._ordered_labels or self.labels
@property
def labels(self):
"""Returns the distinct labels defined in the catalogue.
:rtype: `list`
"""
return sorted(set(self.values()))
def load(self, path):
"""Loads the data from `path` into the catalogue.
:param path: path to catalogue file
:type path: `str`
"""
fieldnames = ['work', 'label']
with open(path, 'r', encoding='utf-8', newline='') as fh:
reader = csv.DictReader(fh, delimiter=' ', fieldnames=fieldnames,
skipinitialspace=True)
for row in reader:
work, label = row['work'], row['label']
if label:
if label not in self._ordered_labels:
self._ordered_labels.append(label)
if work in self:
raise MalformedCatalogueError(
<|code_end|>
, predict the next line using imports from the current file:
import copy
import csv
import os
from .constants import CATALOGUE_WORK_RELABELLED_ERROR
from .exceptions import MalformedCatalogueError
and context including class names, function names, and sometimes code from other files:
# Path: tacl/constants.py
# CATALOGUE_WORK_RELABELLED_ERROR = 'Catalogue file labels "{}" more than once.'
#
# Path: tacl/exceptions.py
# class MalformedCatalogueError(TACLError):
#
# pass
. Output only the next line. | CATALOGUE_WORK_RELABELLED_ERROR.format(work)) |
Next line prediction: <|code_start|>
"""
return self._ordered_labels or self.labels
@property
def labels(self):
"""Returns the distinct labels defined in the catalogue.
:rtype: `list`
"""
return sorted(set(self.values()))
def load(self, path):
"""Loads the data from `path` into the catalogue.
:param path: path to catalogue file
:type path: `str`
"""
fieldnames = ['work', 'label']
with open(path, 'r', encoding='utf-8', newline='') as fh:
reader = csv.DictReader(fh, delimiter=' ', fieldnames=fieldnames,
skipinitialspace=True)
for row in reader:
work, label = row['work'], row['label']
if label:
if label not in self._ordered_labels:
self._ordered_labels.append(label)
if work in self:
<|code_end|>
. Use current file imports:
(import copy
import csv
import os
from .constants import CATALOGUE_WORK_RELABELLED_ERROR
from .exceptions import MalformedCatalogueError)
and context including class names, function names, or small code snippets from other files:
# Path: tacl/constants.py
# CATALOGUE_WORK_RELABELLED_ERROR = 'Catalogue file labels "{}" more than once.'
#
# Path: tacl/exceptions.py
# class MalformedCatalogueError(TACLError):
#
# pass
. Output only the next line. | raise MalformedCatalogueError( |
Continue the code snippet: <|code_start|>
def _get_rows_from_results(self, results):
return self._get_rows_from_csv(results.csv(
io.StringIO(newline='')))
def _test_required_columns(self, cols, cmd, *args, **kwargs):
"""Tests that when `cmd` is run with `args` and `kwargs`, it raises a
`MalformedResultsError when each of `cols` is not present in
the results. Further tests that that exception is not raised
when other columns are not present.
This test is designed to test Results methods only.
"""
input_results = (
['AB', '2', 'T1', 'base', '4', 'A'],
['AB', '2', 'T1', 'a', '3', 'A'],
['AB', '2', 'T2', 'base', '2', 'A'],
['ABC', '3', 'T1', 'base', '2', 'A'],
['ABC', '3', 'T1', 'a', '0', 'A'],
['AB', '2', 'T3', 'base', '2', 'B'],
['BC', '2', 'T1', 'base', '3', 'A'],
)
for col in tacl.constants.QUERY_FIELDNAMES:
fs = list(tacl.constants.QUERY_FIELDNAMES[:])
index = fs.index(col)
fs[index] = 'dummy'
fh = self._create_csv(input_results, fieldnames=fs)
results = tacl.Results(fh, self._tokenizer)
if col in cols:
<|code_end|>
. Use current file imports:
import csv
import filecmp
import io
import os.path
import shlex
import subprocess
import unittest
import unittest.mock
import tacl
from tacl.exceptions import MalformedResultsError
and context (classes, functions, or code) from other files:
# Path: tacl/exceptions.py
# class MalformedResultsError(TACLError):
#
# pass
. Output only the next line. | self.assertRaises(MalformedResultsError, getattr(results, cmd), |
Next line prediction: <|code_start|> Z = np.logical_xor(X, Y).astype(int)
est_goettingen = _estimate(Z)
assert np.isclose(-0.5849625007211562, est_goettingen['avg'][((1,), (2,),)][2]), (
'Average Shared is not -0.5849...')
def test_average_pid_source_copy():
"""Test Goettingen estimator on copied source."""
Z = X
est_goettingen = _estimate(Z)
assert np.isclose(0.5849625007211562, est_goettingen['avg'][((1,),)][2], atol=1.e-7), (
'Unique information 1 is not 1 for SxPID estimator ({0}).'.format(
est_goettingen['avg'][((1,),)][2]))
assert np.isclose(-0.4150374992788438, est_goettingen['avg'][((2,),)][2], atol=1.e-7), (
'Unique information 2 is not 0 for SxPID estimator ({0}).'.format(
est_goettingen['avg'][((2,),)][2]))
assert np.isclose(0.4150374992788438, est_goettingen['avg'][((1,), (2,),)][2], atol=1.e-7), (
'Shared information is not 0 for SxPID estimator ({0}).'.format(
est_goettingen['avg'][((1,), (2,),)][2]))
assert np.isclose(0.4150374992788438, est_goettingen['avg'][((1, 2,),)][2], atol=1.e-7), (
'Synergy is not 0 for SxPID estimator ({0}).'.format(
est_goettingen['avg'][((1, 2,),)][2]))
def _estimate(T):
"""Estimate PID for a given target."""
# Goettingen estimator
<|code_end|>
. Use current file imports:
(import time as tm
import numpy as np
import pytest
from idtxl.estimators_multivariate_pid import SxPID)
and context including class names, function names, or small code snippets from other files:
# Path: idtxl/estimators_multivariate_pid.py
# class SxPID(Estimator):
# """Estimate partial information decomposition for multiple inputs.
#
# Implementation of the multivariate partial information decomposition (PID)
# estimator for discrete data with (up to 4 inputs) and one output. The
# estimator finds shared information, unique information and synergistic
# information between the multiple inputs s1, s2, ..., sn with respect to the
# output t for each realization (t, s1, ..., sn) and then average them
# according to their distribution weights p(t, s1, ..., sn). Both the
# pointwise (on the realization level) PID and the averaged PID are returned
# (see the 'return' of 'estimate()').
#
# The algorithm uses recursion to compute the partial information
# decomposition.
#
# References:
#
# - Makkeh, A. & Wibral, M. (2020). A differentiable pointwise partial
# Information Decomposition estimator. https://github.com/Abzinger/SxPID.
#
# Args:
# settings : dict
# estimation parameters (with default parameters)
#
# - verbose : bool [optional] - print output to console
# (default=False)
# """
#
# def __init__(self, settings):
# # get estimation parameters
# self.settings = settings.copy()
# self.settings.setdefault('verbose', False)
#
# def is_parallel():
# return False
#
# def is_analytic_null_estimator(self):
# return False
#
# def estimate(self, s, t):
# """
# Args:
# s : list of numpy arrays
# 1D arrays containing realizations of a discrete random variable
# t : numpy array
# 1D array containing realizations of a discrete random variable
#
# Returns:
# dict of dict
# {
# 'ptw' -> { realization -> {alpha -> [float, float, float]} }
#
# 'avg' -> {alpha -> [float, float, float]}
# }
# where the list of floats is ordered
# [informative, misinformative, informative - misinformative]
# ptw stands for pointwise decomposition
# avg stands for average decomposition
# """
# s, t, self.settings = _check_input(s, t, self.settings)
# pdf = _get_pdf_dict(s, t)
#
# # Read lattices from a file
# # Stored as {
# # n -> [{alpha -> children}, (alpha_1,...) ]
# # }
# # children is a list of tuples
# lattices = lt.lattices
# num_source_vars = len(s)
# retval_ptw, retval_avg = pid_goettingen.pid(
# num_source_vars,
# pdf_orig=pdf,
# chld=lattices[num_source_vars][0],
# achain=lattices[num_source_vars][1],
# printing=self.settings['verbose'])
#
# # TODO AskM: Trivariate: does it make sense to name the alphas
# # for example shared_syn_s1_s2__syn_s1_s3 ?
# results = {
# 'ptw': retval_ptw,
# 'avg': retval_avg,
# }
# return results
. Output only the next line. | pid_goettingen = SxPID(SETTINGS) |
Here is a snippet: <|code_start|>"""Estimate partial information decomposition (PID).
Estimate PID for multiple sources (up to 4 sources) and one target process
using SxPID estimator.
Note:
Written for Python 3.4+
"""
<|code_end|>
. Write the next line using the current file imports:
import numpy as np
from .single_process_analysis import SingleProcessAnalysis
from .estimator import find_estimator
from .results import ResultsMultivariatePID
and context from other files:
# Path: idtxl/single_process_analysis.py
# class SingleProcessAnalysis(NetworkAnalysis):
# def __init__(self):
# super().__init__()
#
# Path: idtxl/estimator.py
# def find_estimator(est):
# """Return estimator class.
#
# Return an estimator class. If input is a class, check if it implements
# methods 'estimate' and 'is_parallel' necessary for network analysis
# (see abstract class 'Estimator' for documentation). If input is a string,
# search for class with that name in IDTxl and return it.
#
# Args:
# est : str | Class
# name of an estimator class implemented in IDTxl or custom estimator
# class
#
# Returns
# Class
# Estimator class
# """
# if inspect.isclass(est):
# # Test if provided class implements the Estimator class. This
# # constraint may be relaxed in the future.
# if not np.issubclass_(est, Estimator):
# raise RuntimeError('Provided class should implement abstract class'
# ' Estimator.')
# return est
# elif type(est) is str:
# module_list = _package_contents()
# estimator = None
# for m in module_list:
# try:
# module = importlib.import_module('.' + m, __package__)
# return getattr(module, est)
# except AttributeError:
# pass
# if not estimator:
# raise RuntimeError('Estimator {0} not found.'.format(est))
# else:
# raise TypeError('Please provide an estimator class or the name of an '
# 'estimator as string.')
#
# Path: idtxl/results.py
# class ResultsMultivariatePID(ResultsNetworkAnalysis):
# """Store results of Multivariate Partial Information Decomposition (PID)
# analysis.
#
# Provide a container for results of Multivariate Partial Information
# Decomposition (PID) algorithms.
#
# Note that for convenience all dictionaries in this class can additionally
# be accessed using dot-notation:
#
# >>> res_pid._single_target[2].source_1
#
# or
#
# >>> res_pid._single_target[2].['source_1'].
#
# Attributes:
# settings : dict
# settings used for estimation of information theoretic measures and
# statistical testing
# data_properties : dict
# data properties, contains
#
# - n_nodes : int - total number of nodes in the network
# - n_realisations : int - number of samples available for
# analysis given the settings (e.g., a high maximum lag used in
# network inference, results in fewer data points available for
# estimation)
# - normalised : bool - indicates if data were z-standardised
# before the estimation
#
# targets_analysed : list
# list of analysed targets
# """
#
# def __init__(self, n_nodes, n_realisations, normalised):
# super().__init__(n_nodes, n_realisations, normalised)
#
# def get_single_target(self, target):
# """Return results for a single target in the network.
#
# Results for single targets include for each target
#
# - source_i : tuple - source variable i
# - selected_vars_sources : list of tuples - source variables used in PID
# estimation
# - avg : dict - avg pid {alpha -> float} where alpha is a redundancy
# lattice node
# - ptw : dict of dicts - ptw pid {rlz -> {alpha -> float} } where rlz is
# a single realisation of the random variables and alpha is a redundancy
# lattice node
# - current_value : tuple - current value used for analysis, described by
# target and sample index in the data
# - [estimator-specific settings]
#
# Args:
# target : int
# target id
#
# Returns:
# dict
# Results for single target. Note that for convenience
# dictionary entries can either be accessed via keywords
# (result['selected_vars_sources']) or via dot-notation
# (result.selected_vars_sources).
# """
# return super(ResultsMultivariatePID,
# self).get_single_target(target, fdr=False)
, which may include functions, classes, or code. Output only the next line. | class MultivariatePID(SingleProcessAnalysis): |
Predict the next line after this snippet: <|code_start|> index of target processes
sources : list of ints
indices of the multiple source processes for the target
Returns: ResultsMultivariatePID instance results of
network inference, see documentation of
ResultsPID()
"""
# Check input and initialise values for analysis.
self._initialise(settings, data, target, sources)
# Estimate PID and significance.
self._calculate_pid(data)
# Add analyis info.
results = ResultsMultivariatePID(
n_nodes=data.n_processes,
n_realisations=data.n_realisations(self.current_value),
normalised=data.normalise)
results._add_single_result(
settings=self.settings,
target=self.target,
results=self.results)
self._reset()
return results
def _initialise(self, settings, data, target, sources):
"""Check input, set initial or default values for analysis settings."""
# Check requested PID estimator.
try:
<|code_end|>
using the current file's imports:
import numpy as np
from .single_process_analysis import SingleProcessAnalysis
from .estimator import find_estimator
from .results import ResultsMultivariatePID
and any relevant context from other files:
# Path: idtxl/single_process_analysis.py
# class SingleProcessAnalysis(NetworkAnalysis):
# def __init__(self):
# super().__init__()
#
# Path: idtxl/estimator.py
# def find_estimator(est):
# """Return estimator class.
#
# Return an estimator class. If input is a class, check if it implements
# methods 'estimate' and 'is_parallel' necessary for network analysis
# (see abstract class 'Estimator' for documentation). If input is a string,
# search for class with that name in IDTxl and return it.
#
# Args:
# est : str | Class
# name of an estimator class implemented in IDTxl or custom estimator
# class
#
# Returns
# Class
# Estimator class
# """
# if inspect.isclass(est):
# # Test if provided class implements the Estimator class. This
# # constraint may be relaxed in the future.
# if not np.issubclass_(est, Estimator):
# raise RuntimeError('Provided class should implement abstract class'
# ' Estimator.')
# return est
# elif type(est) is str:
# module_list = _package_contents()
# estimator = None
# for m in module_list:
# try:
# module = importlib.import_module('.' + m, __package__)
# return getattr(module, est)
# except AttributeError:
# pass
# if not estimator:
# raise RuntimeError('Estimator {0} not found.'.format(est))
# else:
# raise TypeError('Please provide an estimator class or the name of an '
# 'estimator as string.')
#
# Path: idtxl/results.py
# class ResultsMultivariatePID(ResultsNetworkAnalysis):
# """Store results of Multivariate Partial Information Decomposition (PID)
# analysis.
#
# Provide a container for results of Multivariate Partial Information
# Decomposition (PID) algorithms.
#
# Note that for convenience all dictionaries in this class can additionally
# be accessed using dot-notation:
#
# >>> res_pid._single_target[2].source_1
#
# or
#
# >>> res_pid._single_target[2].['source_1'].
#
# Attributes:
# settings : dict
# settings used for estimation of information theoretic measures and
# statistical testing
# data_properties : dict
# data properties, contains
#
# - n_nodes : int - total number of nodes in the network
# - n_realisations : int - number of samples available for
# analysis given the settings (e.g., a high maximum lag used in
# network inference, results in fewer data points available for
# estimation)
# - normalised : bool - indicates if data were z-standardised
# before the estimation
#
# targets_analysed : list
# list of analysed targets
# """
#
# def __init__(self, n_nodes, n_realisations, normalised):
# super().__init__(n_nodes, n_realisations, normalised)
#
# def get_single_target(self, target):
# """Return results for a single target in the network.
#
# Results for single targets include for each target
#
# - source_i : tuple - source variable i
# - selected_vars_sources : list of tuples - source variables used in PID
# estimation
# - avg : dict - avg pid {alpha -> float} where alpha is a redundancy
# lattice node
# - ptw : dict of dicts - ptw pid {rlz -> {alpha -> float} } where rlz is
# a single realisation of the random variables and alpha is a redundancy
# lattice node
# - current_value : tuple - current value used for analysis, described by
# target and sample index in the data
# - [estimator-specific settings]
#
# Args:
# target : int
# target id
#
# Returns:
# dict
# Results for single target. Note that for convenience
# dictionary entries can either be accessed via keywords
# (result['selected_vars_sources']) or via dot-notation
# (result.selected_vars_sources).
# """
# return super(ResultsMultivariatePID,
# self).get_single_target(target, fdr=False)
. Output only the next line. | EstimatorClass = find_estimator(settings['pid_estimator']) |
Next line prediction: <|code_start|> sources : list of lists
indices of the multiple source processes for each target, e.g.,
[[0, 1, 2], [1, 0, 3]], all must lists be of the same lenght and
list of lists must have the same length as targets
Returns:
ResultsMultivariatePID instance
results of network inference, see documentation of
ResultsMultivariatePID()
"""
# Set defaults for PID estimation.
settings.setdefault('verbose', True)
settings.setdefault('lags_pid', np.array([[1 for i in range(len(sources[0]))]] * len(targets)))
# Check inputs.
if not len(targets) == len(sources) == len(settings['lags_pid']):
raise RuntimeError('Lists of targets, sources, and lags must have'
'the same lengths.')
for lis_1 in sources:
for lis_2 in sources:
if not len(lis_1) == len(lis_2):
raise RuntimeError('Lists in the list sources must have'
'the same lengths.')
#^ if
#^ for
#^ for
list_of_lags = settings['lags_pid']
# Perform PID estimation for each target individually
<|code_end|>
. Use current file imports:
(import numpy as np
from .single_process_analysis import SingleProcessAnalysis
from .estimator import find_estimator
from .results import ResultsMultivariatePID)
and context including class names, function names, or small code snippets from other files:
# Path: idtxl/single_process_analysis.py
# class SingleProcessAnalysis(NetworkAnalysis):
# def __init__(self):
# super().__init__()
#
# Path: idtxl/estimator.py
# def find_estimator(est):
# """Return estimator class.
#
# Return an estimator class. If input is a class, check if it implements
# methods 'estimate' and 'is_parallel' necessary for network analysis
# (see abstract class 'Estimator' for documentation). If input is a string,
# search for class with that name in IDTxl and return it.
#
# Args:
# est : str | Class
# name of an estimator class implemented in IDTxl or custom estimator
# class
#
# Returns
# Class
# Estimator class
# """
# if inspect.isclass(est):
# # Test if provided class implements the Estimator class. This
# # constraint may be relaxed in the future.
# if not np.issubclass_(est, Estimator):
# raise RuntimeError('Provided class should implement abstract class'
# ' Estimator.')
# return est
# elif type(est) is str:
# module_list = _package_contents()
# estimator = None
# for m in module_list:
# try:
# module = importlib.import_module('.' + m, __package__)
# return getattr(module, est)
# except AttributeError:
# pass
# if not estimator:
# raise RuntimeError('Estimator {0} not found.'.format(est))
# else:
# raise TypeError('Please provide an estimator class or the name of an '
# 'estimator as string.')
#
# Path: idtxl/results.py
# class ResultsMultivariatePID(ResultsNetworkAnalysis):
# """Store results of Multivariate Partial Information Decomposition (PID)
# analysis.
#
# Provide a container for results of Multivariate Partial Information
# Decomposition (PID) algorithms.
#
# Note that for convenience all dictionaries in this class can additionally
# be accessed using dot-notation:
#
# >>> res_pid._single_target[2].source_1
#
# or
#
# >>> res_pid._single_target[2].['source_1'].
#
# Attributes:
# settings : dict
# settings used for estimation of information theoretic measures and
# statistical testing
# data_properties : dict
# data properties, contains
#
# - n_nodes : int - total number of nodes in the network
# - n_realisations : int - number of samples available for
# analysis given the settings (e.g., a high maximum lag used in
# network inference, results in fewer data points available for
# estimation)
# - normalised : bool - indicates if data were z-standardised
# before the estimation
#
# targets_analysed : list
# list of analysed targets
# """
#
# def __init__(self, n_nodes, n_realisations, normalised):
# super().__init__(n_nodes, n_realisations, normalised)
#
# def get_single_target(self, target):
# """Return results for a single target in the network.
#
# Results for single targets include for each target
#
# - source_i : tuple - source variable i
# - selected_vars_sources : list of tuples - source variables used in PID
# estimation
# - avg : dict - avg pid {alpha -> float} where alpha is a redundancy
# lattice node
# - ptw : dict of dicts - ptw pid {rlz -> {alpha -> float} } where rlz is
# a single realisation of the random variables and alpha is a redundancy
# lattice node
# - current_value : tuple - current value used for analysis, described by
# target and sample index in the data
# - [estimator-specific settings]
#
# Args:
# target : int
# target id
#
# Returns:
# dict
# Results for single target. Note that for convenience
# dictionary entries can either be accessed via keywords
# (result['selected_vars_sources']) or via dot-notation
# (result.selected_vars_sources).
# """
# return super(ResultsMultivariatePID,
# self).get_single_target(target, fdr=False)
. Output only the next line. | results = ResultsMultivariatePID( |
Next line prediction: <|code_start|>"""Estimate partial information decomposition (PID).
Estimate PID for two source and one target process using different estimators.
Note:
Written for Python 3.4+
"""
<|code_end|>
. Use current file imports:
(import numpy as np
from .single_process_analysis import SingleProcessAnalysis
from .estimator import find_estimator
from .results import ResultsPID)
and context including class names, function names, or small code snippets from other files:
# Path: idtxl/single_process_analysis.py
# class SingleProcessAnalysis(NetworkAnalysis):
# def __init__(self):
# super().__init__()
#
# Path: idtxl/estimator.py
# def find_estimator(est):
# """Return estimator class.
#
# Return an estimator class. If input is a class, check if it implements
# methods 'estimate' and 'is_parallel' necessary for network analysis
# (see abstract class 'Estimator' for documentation). If input is a string,
# search for class with that name in IDTxl and return it.
#
# Args:
# est : str | Class
# name of an estimator class implemented in IDTxl or custom estimator
# class
#
# Returns
# Class
# Estimator class
# """
# if inspect.isclass(est):
# # Test if provided class implements the Estimator class. This
# # constraint may be relaxed in the future.
# if not np.issubclass_(est, Estimator):
# raise RuntimeError('Provided class should implement abstract class'
# ' Estimator.')
# return est
# elif type(est) is str:
# module_list = _package_contents()
# estimator = None
# for m in module_list:
# try:
# module = importlib.import_module('.' + m, __package__)
# return getattr(module, est)
# except AttributeError:
# pass
# if not estimator:
# raise RuntimeError('Estimator {0} not found.'.format(est))
# else:
# raise TypeError('Please provide an estimator class or the name of an '
# 'estimator as string.')
#
# Path: idtxl/results.py
# class ResultsPID(ResultsNetworkAnalysis):
# """Store results of Partial Information Decomposition (PID) analysis.
#
# Provide a container for results of Partial Information Decomposition (PID)
# algorithms.
#
# Note that for convenience all dictionaries in this class can additionally
# be accessed using dot-notation:
#
# >>> res_pid._single_target[2].source_1
#
# or
#
# >>> res_pid._single_target[2].['source_1'].
#
# Attributes:
# settings : dict
# settings used for estimation of information theoretic measures and
# statistical testing
# data_properties : dict
# data properties, contains
#
# - n_nodes : int - total number of nodes in the network
# - n_realisations : int - number of samples available for
# analysis given the settings (e.g., a high maximum lag used in
# network inference, results in fewer data points available for
# estimation)
# - normalised : bool - indicates if data were z-standardised
# before the estimation
#
# targets_analysed : list
# list of analysed targets
# """
#
# def __init__(self, n_nodes, n_realisations, normalised):
# super().__init__(n_nodes, n_realisations, normalised)
#
# def get_single_target(self, target):
# """Return results for a single target in the network.
#
# Results for single targets include for each target
#
# - source_1 : tuple - source variable 1
# - source_2 : tuple - source variable 2
# - selected_vars_sources : list of tuples - source variables used in PID
# estimation
# - s1_unq : float - unique information in source 1
# - s2_unq : float - unique information in source 2
# - syn_s1_s2 : float - synergistic information in sources 1 and 2
# - shd_s1_s2 : float - shared information in sources 1 and 2
# - current_value : tuple - current value used for analysis, described by
# target and sample index in the data
# - [estimator-specific settings]
#
# Args:
# target : int
# target id
#
# Returns:
# dict
# Results for single target. Note that for convenience
# dictionary entries can either be accessed via keywords
# (result['selected_vars_sources']) or via dot-notation
# (result.selected_vars_sources).
# """
# return super(ResultsPID,
# self).get_single_target(target, fdr=False)
. Output only the next line. | class BivariatePID(SingleProcessAnalysis): |
Given the following code snippet before the placeholder: <|code_start|> index of target processes
sources : list of ints
indices of the two source processes for the target
Returns: ResultsPID instance results of
network inference, see documentation of
ResultsPID()
"""
# Check input and initialise values for analysis.
self._initialise(settings, data, target, sources)
# Estimate PID and significance.
self._calculate_pid(data)
# Add analyis info.
results = ResultsPID(
n_nodes=data.n_processes,
n_realisations=data.n_realisations(self.current_value),
normalised=data.normalise)
results._add_single_result(
settings=self.settings,
target=self.target,
results=self.results)
self._reset()
return results
def _initialise(self, settings, data, target, sources):
"""Check input, set initial or default values for analysis settings."""
# Check requested PID estimator.
try:
<|code_end|>
, predict the next line using imports from the current file:
import numpy as np
from .single_process_analysis import SingleProcessAnalysis
from .estimator import find_estimator
from .results import ResultsPID
and context including class names, function names, and sometimes code from other files:
# Path: idtxl/single_process_analysis.py
# class SingleProcessAnalysis(NetworkAnalysis):
# def __init__(self):
# super().__init__()
#
# Path: idtxl/estimator.py
# def find_estimator(est):
# """Return estimator class.
#
# Return an estimator class. If input is a class, check if it implements
# methods 'estimate' and 'is_parallel' necessary for network analysis
# (see abstract class 'Estimator' for documentation). If input is a string,
# search for class with that name in IDTxl and return it.
#
# Args:
# est : str | Class
# name of an estimator class implemented in IDTxl or custom estimator
# class
#
# Returns
# Class
# Estimator class
# """
# if inspect.isclass(est):
# # Test if provided class implements the Estimator class. This
# # constraint may be relaxed in the future.
# if not np.issubclass_(est, Estimator):
# raise RuntimeError('Provided class should implement abstract class'
# ' Estimator.')
# return est
# elif type(est) is str:
# module_list = _package_contents()
# estimator = None
# for m in module_list:
# try:
# module = importlib.import_module('.' + m, __package__)
# return getattr(module, est)
# except AttributeError:
# pass
# if not estimator:
# raise RuntimeError('Estimator {0} not found.'.format(est))
# else:
# raise TypeError('Please provide an estimator class or the name of an '
# 'estimator as string.')
#
# Path: idtxl/results.py
# class ResultsPID(ResultsNetworkAnalysis):
# """Store results of Partial Information Decomposition (PID) analysis.
#
# Provide a container for results of Partial Information Decomposition (PID)
# algorithms.
#
# Note that for convenience all dictionaries in this class can additionally
# be accessed using dot-notation:
#
# >>> res_pid._single_target[2].source_1
#
# or
#
# >>> res_pid._single_target[2].['source_1'].
#
# Attributes:
# settings : dict
# settings used for estimation of information theoretic measures and
# statistical testing
# data_properties : dict
# data properties, contains
#
# - n_nodes : int - total number of nodes in the network
# - n_realisations : int - number of samples available for
# analysis given the settings (e.g., a high maximum lag used in
# network inference, results in fewer data points available for
# estimation)
# - normalised : bool - indicates if data were z-standardised
# before the estimation
#
# targets_analysed : list
# list of analysed targets
# """
#
# def __init__(self, n_nodes, n_realisations, normalised):
# super().__init__(n_nodes, n_realisations, normalised)
#
# def get_single_target(self, target):
# """Return results for a single target in the network.
#
# Results for single targets include for each target
#
# - source_1 : tuple - source variable 1
# - source_2 : tuple - source variable 2
# - selected_vars_sources : list of tuples - source variables used in PID
# estimation
# - s1_unq : float - unique information in source 1
# - s2_unq : float - unique information in source 2
# - syn_s1_s2 : float - synergistic information in sources 1 and 2
# - shd_s1_s2 : float - shared information in sources 1 and 2
# - current_value : tuple - current value used for analysis, described by
# target and sample index in the data
# - [estimator-specific settings]
#
# Args:
# target : int
# target id
#
# Returns:
# dict
# Results for single target. Note that for convenience
# dictionary entries can either be accessed via keywords
# (result['selected_vars_sources']) or via dot-notation
# (result.selected_vars_sources).
# """
# return super(ResultsPID,
# self).get_single_target(target, fdr=False)
. Output only the next line. | EstimatorClass = find_estimator(settings['pid_estimator']) |
Based on the snippet: <|code_start|> documentation of analyse_single_target() for details, can
contain
- lags_pid : list of lists of ints [optional] - lags in samples
between sources and target (default=[[1, 1], [1, 1] ...])
data : Data instance
raw data for analysis
targets : list of int
index of target processes
sources : list of lists
indices of the two source processes for each target, e.g.,
[[0, 2], [1, 0]], must have the same length as targets
Returns:
ResultsPID instance
results of network inference, see documentation of
ResultsPID()
"""
# Set defaults for PID estimation.
settings.setdefault('verbose', True)
settings.setdefault('lags_pid', np.array([[1, 1]] * len(targets)))
# Check inputs.
if not len(targets) == len(sources) == len(settings['lags_pid']):
raise RuntimeError('Lists of targets, sources, and lags must have'
'the same lengths.')
list_of_lags = settings['lags_pid']
# Perform PID estimation for each target individually
<|code_end|>
, predict the immediate next line with the help of imports:
import numpy as np
from .single_process_analysis import SingleProcessAnalysis
from .estimator import find_estimator
from .results import ResultsPID
and context (classes, functions, sometimes code) from other files:
# Path: idtxl/single_process_analysis.py
# class SingleProcessAnalysis(NetworkAnalysis):
# def __init__(self):
# super().__init__()
#
# Path: idtxl/estimator.py
# def find_estimator(est):
# """Return estimator class.
#
# Return an estimator class. If input is a class, check if it implements
# methods 'estimate' and 'is_parallel' necessary for network analysis
# (see abstract class 'Estimator' for documentation). If input is a string,
# search for class with that name in IDTxl and return it.
#
# Args:
# est : str | Class
# name of an estimator class implemented in IDTxl or custom estimator
# class
#
# Returns
# Class
# Estimator class
# """
# if inspect.isclass(est):
# # Test if provided class implements the Estimator class. This
# # constraint may be relaxed in the future.
# if not np.issubclass_(est, Estimator):
# raise RuntimeError('Provided class should implement abstract class'
# ' Estimator.')
# return est
# elif type(est) is str:
# module_list = _package_contents()
# estimator = None
# for m in module_list:
# try:
# module = importlib.import_module('.' + m, __package__)
# return getattr(module, est)
# except AttributeError:
# pass
# if not estimator:
# raise RuntimeError('Estimator {0} not found.'.format(est))
# else:
# raise TypeError('Please provide an estimator class or the name of an '
# 'estimator as string.')
#
# Path: idtxl/results.py
# class ResultsPID(ResultsNetworkAnalysis):
# """Store results of Partial Information Decomposition (PID) analysis.
#
# Provide a container for results of Partial Information Decomposition (PID)
# algorithms.
#
# Note that for convenience all dictionaries in this class can additionally
# be accessed using dot-notation:
#
# >>> res_pid._single_target[2].source_1
#
# or
#
# >>> res_pid._single_target[2].['source_1'].
#
# Attributes:
# settings : dict
# settings used for estimation of information theoretic measures and
# statistical testing
# data_properties : dict
# data properties, contains
#
# - n_nodes : int - total number of nodes in the network
# - n_realisations : int - number of samples available for
# analysis given the settings (e.g., a high maximum lag used in
# network inference, results in fewer data points available for
# estimation)
# - normalised : bool - indicates if data were z-standardised
# before the estimation
#
# targets_analysed : list
# list of analysed targets
# """
#
# def __init__(self, n_nodes, n_realisations, normalised):
# super().__init__(n_nodes, n_realisations, normalised)
#
# def get_single_target(self, target):
# """Return results for a single target in the network.
#
# Results for single targets include for each target
#
# - source_1 : tuple - source variable 1
# - source_2 : tuple - source variable 2
# - selected_vars_sources : list of tuples - source variables used in PID
# estimation
# - s1_unq : float - unique information in source 1
# - s2_unq : float - unique information in source 2
# - syn_s1_s2 : float - synergistic information in sources 1 and 2
# - shd_s1_s2 : float - shared information in sources 1 and 2
# - current_value : tuple - current value used for analysis, described by
# target and sample index in the data
# - [estimator-specific settings]
#
# Args:
# target : int
# target id
#
# Returns:
# dict
# Results for single target. Note that for convenience
# dictionary entries can either be accessed via keywords
# (result['selected_vars_sources']) or via dot-notation
# (result.selected_vars_sources).
# """
# return super(ResultsPID,
# self).get_single_target(target, fdr=False)
. Output only the next line. | results = ResultsPID( |
Given the code snippet: <|code_start|> reason="Jpype is missing, JIDT estimators are not available")
SEED = 0
def _assert_result(results, expected_res, estimator, measure, tol=0.05):
# Compare estimates with analytic results and print output.
print('{0} - {1} result: {2:.4f} nats expected to be close to {3:.4f} '
'nats.'.format(estimator, measure, results, expected_res))
assert np.isclose(results, expected_res, atol=tol), (
'{0} calculation failed (error larger than {1}).'.format(measure, tol))
def _compare_result(res1, res2, estimator1, estimator2, measure, tol=0.05):
# Compare estimates with each other and print output.
print('{0} vs. {1} - {2} result: {3:.4f} nats vs. {4:.4f} '
'nats.'.format(estimator1, estimator2, measure, res1, res2))
assert np.isclose(res1, res2, atol=tol), (
'{0} calculation failed (error larger than '
'{1}).'.format(measure, tol))
def _get_gauss_data(n=10000, covariance=0.4, expand=True, seed=None):
"""Generate correlated and uncorrelated Gaussian variables.
Generate two sets of random normal data, where one set has a given
covariance and the second is uncorrelated.
"""
np.random.seed(seed)
corr_expected = covariance / (1 * np.sqrt(covariance**2 + (1-covariance)**2))
<|code_end|>
, generate the next line using the imports in this file:
import pytest
import random as rn
import numpy as np
import idtxl.idtxl_exceptions as ex
import jpype
from scipy.special import digamma
from idtxl.estimators_jidt import (JidtKraskovCMI, JidtKraskovMI,
JidtKraskovAIS, JidtKraskovTE,
JidtDiscreteCMI, JidtDiscreteMI,
JidtDiscreteAIS, JidtDiscreteTE,
JidtGaussianCMI, JidtGaussianMI,
JidtGaussianAIS, JidtGaussianTE)
from idtxl.idtxl_utils import calculate_mi
and context (functions, classes, or occasionally code) from other files:
# Path: idtxl/idtxl_utils.py
# def calculate_mi(corr):
# """Calculate mutual information from correlation coefficient."""
# return -0.5 * np.log(1 - corr**2)
. Output only the next line. | expected_mi = calculate_mi(corr_expected) |
Next line prediction: <|code_start|>
@bp.route('/api/v1/deepanimebot/classify_by_url')
def api_v1_classify():
maybe_image_url = request.args.get('url')
if maybe_image_url is None:
return jsonify(error='provide url of image as `url` query param'), 400
message = None
try:
y = current_app.extensions['classifier'].classify(url=maybe_image_url)
return jsonify(y=y)
except exc.TimeoutError:
current_app.logger.debug("timed out while classifying {}".format(maybe_image_url))
message = messages.took_too_long()
except exc.NotImage:
current_app.logger.debug("no image found at {}".format(maybe_image_url))
message = messages.not_an_image()
except Exception as e:
current_app.logger.error("error while classifying {}: {}".format(maybe_image_url, e))
message = messages.something_went_wrong()
return jsonify(error=message), 500
def create_app():
app = Flask(__name__)
app.config.setdefault('DATASET_PATH', 'data/data.h5py')
app.config.setdefault('INPUT_SHAPE', 128)
app.config.setdefault('MODEL_NAME', 'deep_anime_model')
app.register_blueprint(bp)
<|code_end|>
. Use current file imports:
(from flask import Flask, Blueprint, current_app, request, render_template, jsonify
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
from deepanimebot.messages import Messages)
and context including class names, function names, or small code snippets from other files:
# Path: deepanimebot/classifiers.py
# def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
# def classify(self, *args, **kwargs):
# def __init__(self, dataset_path, input_shape, model_name='model'):
# def classify(self, cvimage):
# def __init__(self, image_classifier):
# def classify(self, url=None):
# def __init__(self, base_url):
# def classify(self, **params):
# class MockClassifier(object):
# class ImageClassifier(object):
# class URLClassifier(object):
# class RemoteClassifier(object):
#
# Path: deepanimebot/exceptions.py
# class NotImage(Exception): pass
# class RemoteError(Exception): pass
#
# Path: deepanimebot/messages.py
# class Messages(object):
# '''Each method is expected to return a message of length under TWEET_MAX_LENGTH.
# '''
# @staticmethod
# def took_too_long():
# return at_random(
# "It took too long to get the image. Try again?",
# )
#
# @staticmethod
# def something_went_wrong():
# return at_random(
# "Something went wrong. Try again later?",
# )
#
# @staticmethod
# def not_an_image():
# return at_random(
# "That doesn't look like an image",
# "Are you sure it's an image?",
# )
#
# @staticmethod
# def unknown_image():
# return at_random(
# 'I have no clue!',
# 'Unknown',
# )
#
# @classmethod
# def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
# if not len(y):
# return cls.unknown_image()
#
# pred_lines = []
# max_category_length = 0
# max_category_length_index = 0
#
# for i, pred in enumerate(y[:top_n]):
# pred_lines.append(deploy.Prediction(
# "{}.".format(pred.rank),
# pred.category,
# "{:.2%}".format(pred.probability),
# ))
# if max_category_length < len(pred.category):
# max_category_length_index = i
# max_category_length = len(pred.category)
#
# newline_count = len(pred_lines)
# pred_length = sum(sum(map(len, pred)) + len(pred) - 1 for pred in pred_lines)
# current_length = len(preface) + newline_count + pred_length
#
# # truncate category name(s) if needed
# if max_length is not None and current_length > max_length:
# lengthy_pred = pred_lines[max_category_length_index]
# excess_length = current_length - max_length
# # don't penalize the longest category if it's going to be truncated too much
# if len(lengthy_pred.category) * 0.5 < excess_length:
# subtract_from_everyone_length = int(math.ceil(excess_length / len(pred_lines)))
# pred_lines = [
# deploy.Prediction(
# pred.rank, pred.category[:-subtract_from_everyone_length], pred.probability)
# for pred in pred_lines]
# else:
# shortened_pred = deploy.Prediction(
# lengthy_pred.rank, lengthy_pred.category[:-excess_length], lengthy_pred.probability)
# pred_lines[max_category_length_index] = shortened_pred
#
# reply = "{}\n{}".format(preface, "\n".join(" ".join(pred) for pred in pred_lines))
# return reply[:max_length] if max_length is not None else reply
. Output only the next line. | app.extensions['classifier'] = classifiers.URLClassifier( |
Given snippet: <|code_start|># -*- coding: utf-8 -*-
bp = Blueprint('bp', __name__, template_folder='templates')
@bp.route('/')
def root():
return render_template('index.html')
@bp.route('/api/v1/deepanimebot/classify_by_url')
def api_v1_classify():
maybe_image_url = request.args.get('url')
if maybe_image_url is None:
return jsonify(error='provide url of image as `url` query param'), 400
message = None
try:
y = current_app.extensions['classifier'].classify(url=maybe_image_url)
return jsonify(y=y)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from flask import Flask, Blueprint, current_app, request, render_template, jsonify
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
from deepanimebot.messages import Messages
and context:
# Path: deepanimebot/classifiers.py
# def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
# def classify(self, *args, **kwargs):
# def __init__(self, dataset_path, input_shape, model_name='model'):
# def classify(self, cvimage):
# def __init__(self, image_classifier):
# def classify(self, url=None):
# def __init__(self, base_url):
# def classify(self, **params):
# class MockClassifier(object):
# class ImageClassifier(object):
# class URLClassifier(object):
# class RemoteClassifier(object):
#
# Path: deepanimebot/exceptions.py
# class NotImage(Exception): pass
# class RemoteError(Exception): pass
#
# Path: deepanimebot/messages.py
# class Messages(object):
# '''Each method is expected to return a message of length under TWEET_MAX_LENGTH.
# '''
# @staticmethod
# def took_too_long():
# return at_random(
# "It took too long to get the image. Try again?",
# )
#
# @staticmethod
# def something_went_wrong():
# return at_random(
# "Something went wrong. Try again later?",
# )
#
# @staticmethod
# def not_an_image():
# return at_random(
# "That doesn't look like an image",
# "Are you sure it's an image?",
# )
#
# @staticmethod
# def unknown_image():
# return at_random(
# 'I have no clue!',
# 'Unknown',
# )
#
# @classmethod
# def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
# if not len(y):
# return cls.unknown_image()
#
# pred_lines = []
# max_category_length = 0
# max_category_length_index = 0
#
# for i, pred in enumerate(y[:top_n]):
# pred_lines.append(deploy.Prediction(
# "{}.".format(pred.rank),
# pred.category,
# "{:.2%}".format(pred.probability),
# ))
# if max_category_length < len(pred.category):
# max_category_length_index = i
# max_category_length = len(pred.category)
#
# newline_count = len(pred_lines)
# pred_length = sum(sum(map(len, pred)) + len(pred) - 1 for pred in pred_lines)
# current_length = len(preface) + newline_count + pred_length
#
# # truncate category name(s) if needed
# if max_length is not None and current_length > max_length:
# lengthy_pred = pred_lines[max_category_length_index]
# excess_length = current_length - max_length
# # don't penalize the longest category if it's going to be truncated too much
# if len(lengthy_pred.category) * 0.5 < excess_length:
# subtract_from_everyone_length = int(math.ceil(excess_length / len(pred_lines)))
# pred_lines = [
# deploy.Prediction(
# pred.rank, pred.category[:-subtract_from_everyone_length], pred.probability)
# for pred in pred_lines]
# else:
# shortened_pred = deploy.Prediction(
# lengthy_pred.rank, lengthy_pred.category[:-excess_length], lengthy_pred.probability)
# pred_lines[max_category_length_index] = shortened_pred
#
# reply = "{}\n{}".format(preface, "\n".join(" ".join(pred) for pred in pred_lines))
# return reply[:max_length] if max_length is not None else reply
which might include code, classes, or functions. Output only the next line. | except exc.TimeoutError: |
Given the code snippet: <|code_start|># -*- coding: utf-8 -*-
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'fixtures', '1920x1080.png')
def test_fetch_cvimage_from_url(monkeypatch):
with open(TEST_IMAGE_PATH, 'rb') as f:
image = f.read()
monkeypatch.setattr(requests, 'get', mocks.mock_get(image))
<|code_end|>
, generate the next line using the imports in this file:
import os
import time
import json
import h5py
import cv2
import pytest
import requests
import six
import deploy
import data
import mocks
from multiprocessing import TimeoutError
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
and context (functions, classes, or occasionally code) from other files:
# Path: deepanimebot/classifiers.py
# def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
# def classify(self, *args, **kwargs):
# def __init__(self, dataset_path, input_shape, model_name='model'):
# def classify(self, cvimage):
# def __init__(self, image_classifier):
# def classify(self, url=None):
# def __init__(self, base_url):
# def classify(self, **params):
# class MockClassifier(object):
# class ImageClassifier(object):
# class URLClassifier(object):
# class RemoteClassifier(object):
#
# Path: deepanimebot/exceptions.py
# class NotImage(Exception): pass
# class RemoteError(Exception): pass
. Output only the next line. | image = classifiers.fetch_cvimage_from_url('this url is ignored') |
Continue the code snippet: <|code_start|> cvimage = cv2.imread(TEST_IMAGE_PATH)
# TODO: add fixture for weights and refactor so that model is loaded from a workspace directory
classifier = classifiers.ImageClassifier('ignored path', 128, 'deep_anime_model')
y = classifier.classify(cvimage)
assert isinstance(y, list)
assert isinstance(y[0], deploy.Prediction)
def create_url_classifier(monkeypatch):
# TODO: add fixture for categories and mean. (95 is a magic number corresponding to the deployed model)
monkeypatch.setattr(data, 'get_categories', lambda: dict((str(n), n) for n in range(95)))
monkeypatch.setattr(data, 'get_mean', lambda path: None)
# TODO: add fixture for weights and refactor so that model is loaded from a workspace directory
image_classifier = classifiers.ImageClassifier('ignored path', 128, 'deep_anime_model')
return classifiers.URLClassifier(image_classifier)
def test_url_classifier_classify(monkeypatch):
with open(TEST_IMAGE_PATH, 'rb') as f:
image = f.read()
monkeypatch.setattr(requests, 'get', mocks.mock_get(image))
url_classifier = create_url_classifier(monkeypatch)
y = url_classifier.classify(TEST_IMAGE_PATH)
assert isinstance(y, list)
assert isinstance(y[0], deploy.Prediction)
def test_url_classifier_classify_none(monkeypatch):
monkeypatch.setattr(classifiers, 'fetch_cvimage_from_url', lambda url: None)
url_classifier = create_url_classifier(monkeypatch)
<|code_end|>
. Use current file imports:
import os
import time
import json
import h5py
import cv2
import pytest
import requests
import six
import deploy
import data
import mocks
from multiprocessing import TimeoutError
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
and context (classes, functions, or code) from other files:
# Path: deepanimebot/classifiers.py
# def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
# def classify(self, *args, **kwargs):
# def __init__(self, dataset_path, input_shape, model_name='model'):
# def classify(self, cvimage):
# def __init__(self, image_classifier):
# def classify(self, url=None):
# def __init__(self, base_url):
# def classify(self, **params):
# class MockClassifier(object):
# class ImageClassifier(object):
# class URLClassifier(object):
# class RemoteClassifier(object):
#
# Path: deepanimebot/exceptions.py
# class NotImage(Exception): pass
# class RemoteError(Exception): pass
. Output only the next line. | with pytest.raises(exc.NotImage): |
Given the following code snippet before the placeholder: <|code_start|> if retweeted_status is None:
return False
return retweeted_status.author.screen_name == screen_name
def status_mentions(status, screen_name):
for mention in status.entities.get('user_mentions', []):
if mention['screen_name'] == screen_name:
return True
return False
def url_from_entities(entities):
for media in entities.get('media', []):
if media['type'] == 'photo':
return media['media_url']
for url in entities.get('urls', []):
return url['expanded_url']
def main(args):
if args.debug:
logger.setLevel(logging.DEBUG)
auth = tweepy.OAuthHandler(args.consumer_key, args.consumer_secret)
auth.set_access_token(args.access_token, args.access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
screen_name = api.me().screen_name
if args.classifier == 'mock':
<|code_end|>
, predict the next line using imports from the current file:
import functools
import logging
import os
import random
import time
import tweepy
import deploy
import gceutil
import configargparse
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
from deepanimebot import messages
and context including class names, function names, and sometimes code from other files:
# Path: deepanimebot/classifiers.py
# def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
# def classify(self, *args, **kwargs):
# def __init__(self, dataset_path, input_shape, model_name='model'):
# def classify(self, cvimage):
# def __init__(self, image_classifier):
# def classify(self, url=None):
# def __init__(self, base_url):
# def classify(self, **params):
# class MockClassifier(object):
# class ImageClassifier(object):
# class URLClassifier(object):
# class RemoteClassifier(object):
#
# Path: deepanimebot/exceptions.py
# class NotImage(Exception): pass
# class RemoteError(Exception): pass
#
# Path: deepanimebot/messages.py
# class Messages(object):
# class StatusMessages(Messages):
# class DMMessages(Messages):
# def took_too_long():
# def something_went_wrong():
# def not_an_image():
# def unknown_image():
# def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
# def give_me_an_image():
# def give_me_an_image():
. Output only the next line. | classifier = classifiers.MockClassifier() |
Using the snippet: <|code_start|> sender_name = status.author.screen_name
if sender_name == self.screen_name:
return
logger.debug(u"{0} incoming status {1}".format(status.id, status.text))
if retweets_me(status, self.screen_name):
logger.debug("{0} is a retweet".format(status.id))
return
if not status_mentions(status, self.screen_name):
logger.debug("{0} doesn't mention {1}".format(status.id, self.screen_name))
return
prefix = '@{0} '.format(sender_name)
reply = self.get_reply(status.id, status.entities, TWEET_MAX_LENGTH - len(prefix), messages.StatusMessages)
status_text = prefix + reply
if self.silent:
return
return self.api, 'update_status', (status_text,), dict(in_reply_to_status_id=status.id)
def get_reply(self, status_id, entities, max_length, messages):
maybe_image_url = url_from_entities(entities)
if not maybe_image_url:
logger.debug("{0} doesn't have a URL".format(status_id))
return messages.give_me_an_image()
try:
y = self.classifier.classify(url=maybe_image_url)
<|code_end|>
, determine the next line of code. You have imports:
import functools
import logging
import os
import random
import time
import tweepy
import deploy
import gceutil
import configargparse
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
from deepanimebot import messages
and context (class names, function names, or code) available:
# Path: deepanimebot/classifiers.py
# def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
# def classify(self, *args, **kwargs):
# def __init__(self, dataset_path, input_shape, model_name='model'):
# def classify(self, cvimage):
# def __init__(self, image_classifier):
# def classify(self, url=None):
# def __init__(self, base_url):
# def classify(self, **params):
# class MockClassifier(object):
# class ImageClassifier(object):
# class URLClassifier(object):
# class RemoteClassifier(object):
#
# Path: deepanimebot/exceptions.py
# class NotImage(Exception): pass
# class RemoteError(Exception): pass
#
# Path: deepanimebot/messages.py
# class Messages(object):
# class StatusMessages(Messages):
# class DMMessages(Messages):
# def took_too_long():
# def something_went_wrong():
# def not_an_image():
# def unknown_image():
# def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
# def give_me_an_image():
# def give_me_an_image():
. Output only the next line. | except exc.TimeoutError: |
Given snippet: <|code_start|> if not rv:
return
api, action, args, kwargs = rv
end = start + random.randint(1, 5)
sleep = end - time.time()
if sleep > 0:
time.sleep(sleep)
return getattr(api, action)(*args, **kwargs)
return wrapper
class ReplyToTweet(tweepy.StreamListener):
def __init__(self, screen_name, classifier, api=None, silent=False):
super(ReplyToTweet, self).__init__(api)
self.screen_name = screen_name
self.classifier = classifier
self.silent = silent
@wait_like_a_human
def on_direct_message(self, data):
status = data.direct_message
sender_name = status['sender']['screen_name']
if sender_name == self.screen_name:
return
logger.debug(u"{0} incoming dm {1}".format(status['id'], status['text']))
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import functools
import logging
import os
import random
import time
import tweepy
import deploy
import gceutil
import configargparse
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
from deepanimebot import messages
and context:
# Path: deepanimebot/classifiers.py
# def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
# def classify(self, *args, **kwargs):
# def __init__(self, dataset_path, input_shape, model_name='model'):
# def classify(self, cvimage):
# def __init__(self, image_classifier):
# def classify(self, url=None):
# def __init__(self, base_url):
# def classify(self, **params):
# class MockClassifier(object):
# class ImageClassifier(object):
# class URLClassifier(object):
# class RemoteClassifier(object):
#
# Path: deepanimebot/exceptions.py
# class NotImage(Exception): pass
# class RemoteError(Exception): pass
#
# Path: deepanimebot/messages.py
# class Messages(object):
# class StatusMessages(Messages):
# class DMMessages(Messages):
# def took_too_long():
# def something_went_wrong():
# def not_an_image():
# def unknown_image():
# def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
# def give_me_an_image():
# def give_me_an_image():
which might include code, classes, or functions. Output only the next line. | reply = self.get_reply(status['id'], status['entities'], TWEET_MAX_LENGTH - len('d {} '.format(sender_name)), messages.DMMessages) |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
def test_my_guess_honors_max_length_by_truncating_longest():
y = [
deploy.Prediction(2, '567890', 0.024),
deploy.Prediction(7, '012', 0.046),
]
# before truncation:
# '\n2. 567890 2.40%\n7. 012 4.60%'
max_length = 26
<|code_end|>
, predict the immediate next line with the help of imports:
import deploy
from deepanimebot import messages
and context (classes, functions, sometimes code) from other files:
# Path: deepanimebot/messages.py
# class Messages(object):
# class StatusMessages(Messages):
# class DMMessages(Messages):
# def took_too_long():
# def something_went_wrong():
# def not_an_image():
# def unknown_image():
# def my_guess(cls, y, top_n=3, max_length=None, preface="Probable Anime:"):
# def give_me_an_image():
# def give_me_an_image():
. Output only the next line. | reply = messages.Messages.my_guess(y, preface='', max_length=max_length) |
Based on the snippet: <|code_start|># -*- coding: utf-8 -*-
@timeout(30)
def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
req = requests.get(url, timeout=5, stream=True)
content = ''
for chunk in req.iter_content(2048):
content += chunk
if len(content) > maxsize:
req.close()
raise ValueError('Response too large')
img_array = np.asarray(bytearray(content), dtype=np.uint8)
cv2_img_flag = cv2.CV_LOAD_IMAGE_COLOR
image = cv2.imdecode(img_array, cv2_img_flag)
return image
class MockClassifier(object):
def classify(self, *args, **kwargs):
<|code_end|>
, predict the immediate next line with the help of imports:
import cv2
import numpy as np
import h5py
import requests
import data
import deploy
from .decorators import timeout
from . import exceptions as exc
from .shortcuts import at_random
and context (classes, functions, sometimes code) from other files:
# Path: deepanimebot/decorators.py
# def timeout(max_timeout):
# """Timeout decorator, parameter in seconds."""
# def timeout_decorator(f):
# """Wrap the original function."""
# @functools.wraps(f)
# def func_wrapper(self, *args, **kwargs):
# """Closure for function."""
# pool = multiprocessing.pool.ThreadPool(processes=1)
# async_result = pool.apply_async(f, (self,) + args, kwargs)
# timeout = kwargs.pop('timeout_max_timeout', max_timeout) or max_timeout
# # raises a TimeoutError if execution exceeds max_timeout
# return async_result.get(timeout)
# return func_wrapper
# return timeout_decorator
#
# Path: deepanimebot/shortcuts.py
# def at_random(*messages):
# return random.choice(messages)
. Output only the next line. | message = at_random( |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class Messages(object):
'''Each method is expected to return a message of length under TWEET_MAX_LENGTH.
'''
@staticmethod
def took_too_long():
<|code_end|>
, determine the next line of code. You have imports:
import math
import deploy
from deepanimebot.shortcuts import at_random
and context (class names, function names, or code) available:
# Path: deepanimebot/shortcuts.py
# def at_random(*messages):
# return random.choice(messages)
. Output only the next line. | return at_random( |
Using the snippet: <|code_start|># -*- coding: utf-8 -*-
def test_top_n_shows(monkeypatch):
for report, expected in [
(two_shows, [
{'id': '11770', 'name': 'Steins;Gate'},
{'id': '10216', 'name': 'Fullmetal Alchemist: Brotherhood'}]),
(no_shows, []),
]:
monkeypatch.setattr(requests, 'get', mocks.mock_get(report))
<|code_end|>
, determine the next line of code. You have imports:
import requests
import mocks
from examples import anime_names
and context (class names, function names, or code) available:
# Path: examples/anime_names.py
# ANN_REPORTS_URL = 'http://www.animenewsnetwork.com/encyclopedia/reports.xml'
# ANN_DETAILS_URL = 'http://cdn.animenewsnetwork.com/encyclopedia/api.xml'
# ANN_ANIME_RATINGS_REPORT_ID = 172
# TRAILING_KIND_RE = re.compile(r'\s+\([^)]+\)$')
# def get_top_n_shows(n):
# def _extract_item_name(item):
# def list_characters(shows):
# def _extract_anime_characters(root):
# def print_csv(field_items, fileobj=sys.stdout, fields=None):
. Output only the next line. | shows = anime_names.get_top_n_shows(100) |
Given snippet: <|code_start|>
class test_loci(unittest.TestCase):
def setUp(self):
self.solution ={'A':allele("A"),
'T':allele("T"),
'C':allele("C"),
'G':allele("G"),
'-':allele("-")}
self.solution["A"].count = 11
self.solution["A"].freq = 11/18.0
self.solution["T"].count = 4
self.solution["T"].freq = 4/18.0
self.solution["C"].count = 1
self.solution["C"].freq = 1/18.0
self.solution["G"].count = 1
self.solution["G"].freq = 1/18.0
self.solution["-"].count = 1
self.solution["-"].freq = 1/18.0
self.pileup = "AAAAATTAATTAAA-AGC"
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
import pysam
import copy
from scripts.seq_classes import locus, segment, allele
and context:
# Path: scripts/seq_classes.py
# def locus(self,pos):
# loci = [x for x in self.seq if x.pos==pos]
# return loci[0]
#
# class locus(object):
# """ A base which has the following characteristics
# Atrributes :
# Chr : chromosome
# pos : position on chromosome base 0
# counts : a dictionary of { A : the number of As
# T : the number of Ts
# C : the number of Cs
# G : the number of Gs
# - : the number of of deletions
# }
# add consensus.
# add concat_pos
# methods :
# update : update counts
# calc_freqs : calculate the frequency of each base
# consensus : caculate the consensus sequence as this position
# """
# def __init__(self,pos):
# """
# return a base object with chr and pos and starting counts of 0
# Posisition is base 0 because this is python and I want to keep everything
# easy to remember. If it came from python it is base 0.
# """
# self.pos = pos
# # self.alleles = {'A':allele("A"),
# # 'T':allele("T"),
# # 'C':allele("C"),
# # 'G':allele("G"),
# # '-':allele("-")}
# self.alleles = {}
# self.coverage = 0
# self.concat_pos = None
# self.consensus = ''
#
# def update(self,base):
# self.coverage = self.coverage+1
#
# if base in self.alleles.keys():
# self.alleles[base].count+=1
#
# else:
# self.alleles.update({base:allele(base)})
# self.alleles[base].count+=1
#
# self.calc_freqs()
# self.consensus = self.calc_consensus()
# def calc_freqs(self):
# for base in self.alleles.keys():
# self.alleles[base].freq = self.alleles[base].count/self.coverage
#
# def calc_consensus(self,cutoff=None):
# if self.coverage==0:
# return '-'
# else:
# self.calc_freqs()
# v=[y.freq for y in self.alleles.values()] #values
# k=list(self.alleles.keys()) # key
# # check for cutoff method
# if cutoff ==None:
# # Return the most common base
# return k[v.index(max(v))]
#
#
# else:
# # return the base that is above the cutoff
# if type(cutoff)!=float or (cutoff>1 or cutoff<0.5):
# raise ValueError('cutoff must be a float in [0.5,1.0] or nothing')
# indexes = [index for index, value in enumerate(v) if value > cutoff]
# if len(indexes)==1:
# consensus = k[indexes[0]]
# else:
# consensus = "N"
# return consensus
#
# def reprJSON(self): # https://stackoverflow.com/questions/5160077/encoding-nested-python-object-in-json
# d = dict()
# for a, v in self.__dict__.items():
# if (hasattr(v, "reprJSON")):
# d[a] = v.reprJSON()
# elif a=="alleles":
# d["alleles"]={}
# for nt, alle in v.items():
# d["alleles"].update({nt: alle.reprJSON()})
# else:
# d[a] = v
# return d
#
# class segment(object):
# """ A sequence like object made up of locus objects
# Attriutes:
# chr - the name of the chr
# seq - a list of loci
# methods:
# append: append another position
# update: update base count at a position (loci base 1)
# consensus: calcuate the consensus sequence"""
# def __init__(self,chr):
# self.chr = chr
# self.seq = []
# def append_loci(self,loci):
# if type(loci) is not locus:
# raise ValueError('Only class locus can be appended to a segment object')
# # if loci.chr!=self.chr:
# # raise ValueError('The loci chr does not match the segement chr')
# if len(self.seq)>0 and loci.pos!=max([x.pos for x in self.seq])+1:
# raise ValueError('The position of the loci does not match current segment length')
# self.seq.append(loci)
# def consensus(self,cutoff=None):
# seg_consensus = ""
# for loci in self.seq:
# seg_consensus=seg_consensus+loci.calc_consensus(cutoff)
# return seg_consensus
# def locus(self,pos):
# loci = [x for x in self.seq if x.pos==pos]
# return loci[0]
# def calc_coverage(self):
# cov = []
# for loci in self.seq:
# cov.append(loci.coverage)
# return cov
#
# def reprJSON(self): # https://stackoverflow.com/questions/5160077/encoding-nested-python-object-in-json
# d = dict()
# for a, v in self.__dict__.items():
# if hasattr(v, "reprJSON"):
# d[a] = v.reprJSON()
# elif a=="seq":
# d["seq"]=[]
# for l in v:
# d["seq"].append(l.reprJSON())
# else:
# d[a] = v
# return d
which might include code, classes, or functions. Output only the next line. | self.l = locus(pos =0) |
Using the snippet: <|code_start|> "chr": u"PA",
"count": 10,
"freq": 0.10,
"mutationalClass": [
{
u"ORF": u"PA",
u"aminoAcidPos": 6,
u"classification": u"Synonymous",
u"codingPos": 20,
u"codonPos": 2,
u"consensusAA": u"Q",
u"varAA": u"Q"
}
],
"nucleotide": u"G",
"concat_pos": 4728,
"consensus": u"A",
"coverage": 100,
"pos": 44
}]
def tearDown(self):
"""
This method is called after each test
"""
pass
def test_parse_correctly(self):
"""
Test that we can add bases and count correctly
"""
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import copy
import json
from scripts.variantJSONtocsv import parseJson
and context (class names, function names, or code) available:
# Path: scripts/variantJSONtocsv.py
# def parseJson(data):
# parsedDataSet = []
# sample = data["Sample"]
# genome = data["genome"]
# for i in range(0,len(genome)):
# #cycle through each segment
# chr = genome[i]["chr"]
# for j in range(0,len(genome[i]['seq'])):
# consensus = genome[i]['seq'][j]['consensus']
# coverage = genome[i]['seq'][j]['coverage']
# concat_pos = genome[i]['seq'][j]['concat_pos']
# pos = genome[i]['seq'][j]['pos']
# for key in genome[i]['seq'][j]['alleles'].keys():
# freq = genome[i]['seq'][j]['alleles'][key]['freq']
# nt = genome[i]['seq'][j]['alleles'][key]['nucleotide']
# count = genome[i]['seq'][j]['alleles'][key]['count']
# datapoint = {
# "Sample": sample,
# "chr": chr,
# "nucleotide": nt,
# "consensus": consensus,
# "pos": pos,
# "concat_pos": concat_pos,
# "freq": freq,
# "count": count,
# "coverage": coverage,
# "mutationalClass": genome[i]['seq'][j]['alleles'][key]['mutationalClass']
# }
# parsedDataSet.append(datapoint)
# return(parsedDataSet)
. Output only the next line. | x = parseJson(json.loads(self.jsonString)) |
Here is a snippet: <|code_start|>
class test_checkORF(unittest.TestCase):
def tearDown(self):
"""
This method is called after each test
"""
pass
def test_good(self):
sequence = Seq("ATGATGTAA")
<|code_end|>
. Write the next line using the current file imports:
import unittest
import pysam
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Alphabet import generic_dna
from Bio.SeqRecord import SeqRecord
from scripts.seq_classes import allele, checkORF,classify
and context from other files:
# Path: scripts/seq_classes.py
# class allele(object):
# """
# The allele present at a loci and their accompanying data
# """
# def __init__(self,nucleotide):
# self.nucleotide=nucleotide
# self.count = 0
# self.freq = 0
# self.mutationalClass =[]
# def classifyVar(self,sequence,codingRegion,pos):
# self.mutationalClass.append(classify(sequence,codingRegion,pos,self.nucleotide))
# def reprJSON(self): # https://stackoverflow.com/questions/5160077/encoding-nested-python-object-in-json
# d = dict()
# for a, v in self.__dict__.items():
# if (hasattr(v, "reprJSON")):
# d[a] = v.reprJSON()
# else:
# d[a] = v
# return d
#
# def checkORF(seq):
# """
# This function varifies the input is an ORF by checking it has
# i) a start codon
# ii) is made of complete codons mod(lenth(seg),3)=0 taking gaps into account
# iii) Has a stop codon at the end and not before.
# """
# protien = seq.translate()
# # check for ATG at start
# if seq.find('ATG') !=0:
# if seq.find('ATG') ==-1:
# raise ValueError("No start codon")
# if seq.find('ATG')>0:
# raise ValueError("start codon found at position "+ str(seq.find('ATG')))
#
# elif len(seq)% 3!=0:
# raise ValueError("The sequence is not multiple of 3")
#
# elif protien.find('*') != len(protien)-1:
# if protien.find('*') ==-1:
# raise ValueError("No stop codon in ORF")
# else:
# raise ValueError("Internal stop codon found at position "+ str(protien.find('*')))
#
# else:
# return(True)
#
# def classify(sequence,codingRegion,pos,nucleotide):
#
# """
# seqeunce is a seqRecord
# codingRegion is a diction in the form
# {
# "name": "NS1",
# "regions": [
# {
# "start": 26,
# "stop": 719
# }
# ]
# }
# The ouput is a dictionary added to the mutationalClass list
# it is of the form
# {
# ORF: The name of the ORF,
# codonPos: the position in the codon either [0,1,2],
# codingPos: the nucleotide position in the ORF
# aminoAcidPos: The position of the amino acid in the polypetide,
# consensusAA: The consensus amino acid,
# varAA: The variant amino acid,
# classification: Nonsynonymous,Synonymous,indel, stop,
# }
# """
# # Get the coding sequence
# # Get the new position in the coding sequence
# i = 0
# # Has to catch the case where it's outside these regions
# outsideORF=True
# for seg in codingRegion["regions"]:
# if pos >= seg["start"] and pos<seg["stop"]:
# outsideORF=False
# break
# else:
# i+=1
# if outsideORF:
# return({
# "ORF": codingRegion["name"],
# "codonPos": None,
# "codingPos": None,
# "aminoAcidPos": None,
# "consensusAA": None,
# "varAA": None,
# "classification": "Noncoding"
#
# })
#
# codingSequence=""
# for seg in codingRegion["regions"]:
# codingSequence=codingSequence+sequence[seg["start"]:seg["stop"]]
# #consensusSequence = Seq(codingSequence,generic_dna)
# checkORF(codingSequence)
#
# posInSeg = pos-codingRegion["regions"][i]["start"]
# otherRegions = codingRegion["regions"][:i]
# adjustment = 0
# if len(otherRegions)>0:
# for seg in otherRegions:
# adjustment+=seg["stop"]-seg["start"]
#
# codingPos = posInSeg+adjustment
# codonPos = codingPos % 3
# aminoAcidPos = codingPos // 3
#
# consensusProtien = codingSequence.translate()
# consensusAA = consensusProtien[aminoAcidPos]
#
# if nucleotide=="-":
# return({
# "ORF": codingRegion["name"],
# "codonPos": codonPos,
# "codingPos": codingPos,
# "aminoAcidPos": aminoAcidPos,
# "consensusAA": consensusAA,
# "varAA": None,
# "classification": "Indel"
# })
#
# mutantCodingSequence = codingSequence
# mutantCodingSequence = mutantCodingSequence[:codingPos]+ str(nucleotide) + mutantCodingSequence[codingPos+1:] # this is still seq object
# #mutantSequence = Seq(mutantCodingSequence,generic_dna)
# mutantProtein = mutantCodingSequence.translate()
#
# varAA = mutantProtein[aminoAcidPos]
#
# if varAA==consensusAA:
# classification = "Synonymous"
# elif varAA=="*":
# classification = "Stop"
# elif consensusAA=="*" and varAA!=consensusAA:
# classification= "Readthrough"
# elif varAA!=consensusAA:
# classification="Nonsynonymous"
#
# return({
# "ORF": codingRegion["name"],
# "codonPos": codonPos,
# "codingPos": codingPos,
# "aminoAcidPos": aminoAcidPos,
# "consensusAA": consensusAA,
# "varAA": varAA,
# "classification": classification
# })
, which may include functions, classes, or code. Output only the next line. | self.assertTrue(checkORF(sequence)) |
Using the snippet: <|code_start|>
class test_loci(unittest.TestCase):
def setUp(self):
pass
def test_trimming_ends(self):
solution = ["AAAAA",[[2,7]]]
seqs = ["--AAAAA--","TTAAAAATT"]#TTAATTAAA-AGC"
<|code_end|>
, determine the next line of code. You have imports:
import unittest
from scripts.trim_to_coding import get_regions, trim_sequences
and context (class names, function names, or code) available:
# Path: scripts/trim_to_coding.py
# def get_regions(ref_seq):
#
# """
# ref_seq is a string containing gaps '-'
# This function returns a list of lists (legnth two)
# with the start and stop (python) of the non gapped regions.
# """
# gap=True
# gene = []
# region = []
# for i in range(0,len(ref_seq)):
# if ref_seq[i]!='-' and gap==True:
# region.append(i)
# if i ==len(ref_seq)-1: # There is a trailing base at the end
# region.append(i+1)
# gene.append(region)
# gap=False
#
# elif ref_seq[i]=='-' and gap ==False:
# region.append(i)
# gene.append(region)
# region=[]
# gap=True
# #print "setting gap to true"
# elif ref_seq[i]!='-' and gap==False and i ==len(ref_seq)-1: # The last character is not a gap and should be included in this frame
# region.append(i+1)
# gene.append(region)
# return gene
#
# def trim_sequences(aligned_header_seqs):
# """
# The first sequence in this alignment is taken to correspond to the reference sequence.
# The returned variable is a list similar to aligned_headers_seqs, but with
# all positions corresponding to gaps in this reference sequence stripped away.
# In the sample sequence, every character at
# the same position as a gap in the reference sequence is removed. The headers are
# unchanged. The order of sequences in this stripped alignment is also
# unchanged.
# """
#
# if not (isinstance(aligned_header_seqs, list) and len(aligned_header_seqs) >= 2):
# raise ValueError, "Input does not specify at least two aligned sequences."
# ref_seq = aligned_header_seqs[0]# str yields the sequence the reference here is shorter so we count the '-' in this sequence.
#
# regions= get_regions(ref_seq)
#
#
# samp_seq=aligned_header_seqs[1]
#
# sample_sequence = trim_to_regions(samp_seq,regions)
# return([sample_sequence,regions])
. Output only the next line. | output = trim_sequences(seqs) |
Given snippet: <|code_start|>
class TrimmerTest(unittest.TestCase):
def test_trim_passthroughIfLengthsMatch(self):
left_stanza = "@id.1\nACGTACGT\n+\nQQQQQQQQ"
right_stanza = "@id.2\nTGCATGCA\n+\nQQQQQQQQ"
source_left = MockReader(left_stanza)
source_right = MockReader(right_stanza)
dest_left = MockWriter()
dest_right = MockWriter()
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import unittest
from scripts.trimmer import trim
and context:
# Path: scripts/trimmer.py
# def trim(source_left, source_right, dest_left, dest_right):
# fastq_pairs = itertools.izip(
# FastqStanza.stanza_generator(source_left), FastqStanza.stanza_generator(source_right))
# for (left_stanza, right_stanza) in fastq_pairs:
# FastqStanza.trim_to_shortest_sequence_length(left_stanza, right_stanza)
# dest_left.write(left_stanza.as_text())
# dest_right.write(right_stanza.as_text())
which might include code, classes, or functions. Output only the next line. | trim(source_left, source_right, dest_left, dest_right) |
Based on the snippet: <|code_start|>
class DemultiplexerTest(unittest.TestCase):
def test_demultiplex_singleton_identity(self):
left_stanza = "@CAT:DUPE_3:ID_1047:FLAG_1 1\nACGTACGT\n+\nAAAAAAAA"
right_stanza = "@CAT:DUPE_3:ID_1047:FLAG_1 2\nACGTACGT\n+\nAAAAAAAA"
source_left = MockReader(left_stanza)
source_right = MockReader(right_stanza)
dest_left = MockWriter()
dest_right = MockWriter()
barcode_files = {'CAT' : [dest_left, dest_right] }
<|code_end|>
, predict the immediate next line with the help of imports:
import unittest
from scripts.demultiplexer import demultiplex, MismatchedSequenceIdError, UndefinedBarcodeError
and context (classes, functions, sometimes code) from other files:
# Path: scripts/demultiplexer.py
# def demultiplex(left_fastq, right_fastq, barcode_files):
# fastq_pairs = itertools.izip(
# FastqStanza.stanza_generator(left_fastq), FastqStanza.stanza_generator(right_fastq))
# for (left_stanza, right_stanza) in fastq_pairs:
# assert_sequence_ids_match(left_stanza, right_stanza)
# barcode = left_stanza.sequence_id.split(':')[0][1:]
# try:
# (left_out, right_out) = barcode_files[barcode]
# except KeyError:
# raise UndefinedBarcodeError(left_stanza)
# left_out.write(left_stanza.as_text())
# right_out.write(right_stanza.as_text())
#
# class MismatchedSequenceIdError(DemultiplexError):
# def __init__(self, left_stanza, right_stanza):
# super(MismatchedSequenceIdError, self).__init__()
# self.left_sequence_id = left_stanza.sequence_id
# self.right_sequence_id = right_stanza.sequence_id
#
# def __str__(self):
# return repr("left/right sequence_ids do not match [{0}] != [{1}].". \
# format(self.left_sequence_id), self.right_sequence_id)
#
# class UndefinedBarcodeError(DemultiplexError):
# def __init__(self, stanza):
# super(UndefinedBarcodeError, self).__init__()
# self.sequence_id = stanza.sequence_id
#
# def __str__(self):
# return repr("sequence_id [{0}] is not in recognized barcodes." \
# .format(self.sequence_id))
. Output only the next line. | demultiplex(source_left, source_right, barcode_files) |
Given the following code snippet before the placeholder: <|code_start|> right_barcodeB = MockWriter()
barcode_files = { \
'AAACCC' : [left_barcodeA, right_barcodeA], \
'GGGTTT' : [left_barcodeB, right_barcodeB] }
demultiplex(source_left, source_right, barcode_files)
self.assertEqual(left_stanza1.splitlines(), left_barcodeA.lines())
self.assertEqual(right_stanza1.splitlines(), right_barcodeA.lines())
self.assertEqual(left_stanza2.splitlines(), left_barcodeB.lines())
self.assertEqual(right_stanza2.splitlines(), right_barcodeB.lines())
def test_demultiplex_fails_on_unrecognized_barcode(self):
left_stanza = "@CAT:DUPE_3:ID_1047:FLAG_1 1\nACGTACGT\n+\nAAAAAAAA"
right_stanza = "@CAT:DUPE_3:ID_1047:FLAG_1 2\nACGTACGT\n+\nAAAAAAAA"
source_left = MockReader(left_stanza)
source_right = MockReader(right_stanza)
barcode_files = {'TTT' : [MockWriter(), MockWriter()] }
self.assertRaises(UndefinedBarcodeError, demultiplex, source_left, source_right, barcode_files)
def test_demultiplex_fails_on_mismatched_headers(self):
left_stanza = "@CAT:DUPE_3:ID_1047:FLAG_1 1\nACGTACGT\n+\nAAAAAAAA"
right_stanza = "@GTA:DUPE_3:ID_1047:FLAG_1 2\nACGTACGT\n+\nAAAAAAAA"
source_left = MockReader(left_stanza)
source_right = MockReader(right_stanza)
barcode_files = {'CAT' : [MockWriter(), MockWriter()] }
<|code_end|>
, predict the next line using imports from the current file:
import unittest
from scripts.demultiplexer import demultiplex, MismatchedSequenceIdError, UndefinedBarcodeError
and context including class names, function names, and sometimes code from other files:
# Path: scripts/demultiplexer.py
# def demultiplex(left_fastq, right_fastq, barcode_files):
# fastq_pairs = itertools.izip(
# FastqStanza.stanza_generator(left_fastq), FastqStanza.stanza_generator(right_fastq))
# for (left_stanza, right_stanza) in fastq_pairs:
# assert_sequence_ids_match(left_stanza, right_stanza)
# barcode = left_stanza.sequence_id.split(':')[0][1:]
# try:
# (left_out, right_out) = barcode_files[barcode]
# except KeyError:
# raise UndefinedBarcodeError(left_stanza)
# left_out.write(left_stanza.as_text())
# right_out.write(right_stanza.as_text())
#
# class MismatchedSequenceIdError(DemultiplexError):
# def __init__(self, left_stanza, right_stanza):
# super(MismatchedSequenceIdError, self).__init__()
# self.left_sequence_id = left_stanza.sequence_id
# self.right_sequence_id = right_stanza.sequence_id
#
# def __str__(self):
# return repr("left/right sequence_ids do not match [{0}] != [{1}].". \
# format(self.left_sequence_id), self.right_sequence_id)
#
# class UndefinedBarcodeError(DemultiplexError):
# def __init__(self, stanza):
# super(UndefinedBarcodeError, self).__init__()
# self.sequence_id = stanza.sequence_id
#
# def __str__(self):
# return repr("sequence_id [{0}] is not in recognized barcodes." \
# .format(self.sequence_id))
. Output only the next line. | self.assertRaises(MismatchedSequenceIdError, demultiplex, source_left, source_right, barcode_files) |
Predict the next line after this snippet: <|code_start|> def test_demultiplex(self):
left_stanza1 = "@AAACCC:DUPE_3:ID_1047:FLAG_1 1\nACGTACGT\n+\nAAAAAAAA"
right_stanza1 = "@AAACCC:DUPE_3:ID_1047:FLAG_1 2\nACGTACGT\n+\nAAAAAAAA"
left_stanza2 = "@GGGTTT:DUPE_3:ID_1047:FLAG_1 1\nACGTACGT\n+\nAAAAAAAA"
right_stanza2 = "@GGGTTT:DUPE_3:ID_1047:FLAG_1 2\nACGTACGT\n+\nAAAAAAAA"
source_left = MockReader(left_stanza1 + "\n" + left_stanza2)
source_right = MockReader(right_stanza1 + "\n" + right_stanza2)
left_barcodeA = MockWriter()
right_barcodeA = MockWriter()
left_barcodeB = MockWriter()
right_barcodeB = MockWriter()
barcode_files = { \
'AAACCC' : [left_barcodeA, right_barcodeA], \
'GGGTTT' : [left_barcodeB, right_barcodeB] }
demultiplex(source_left, source_right, barcode_files)
self.assertEqual(left_stanza1.splitlines(), left_barcodeA.lines())
self.assertEqual(right_stanza1.splitlines(), right_barcodeA.lines())
self.assertEqual(left_stanza2.splitlines(), left_barcodeB.lines())
self.assertEqual(right_stanza2.splitlines(), right_barcodeB.lines())
def test_demultiplex_fails_on_unrecognized_barcode(self):
left_stanza = "@CAT:DUPE_3:ID_1047:FLAG_1 1\nACGTACGT\n+\nAAAAAAAA"
right_stanza = "@CAT:DUPE_3:ID_1047:FLAG_1 2\nACGTACGT\n+\nAAAAAAAA"
source_left = MockReader(left_stanza)
source_right = MockReader(right_stanza)
barcode_files = {'TTT' : [MockWriter(), MockWriter()] }
<|code_end|>
using the current file's imports:
import unittest
from scripts.demultiplexer import demultiplex, MismatchedSequenceIdError, UndefinedBarcodeError
and any relevant context from other files:
# Path: scripts/demultiplexer.py
# def demultiplex(left_fastq, right_fastq, barcode_files):
# fastq_pairs = itertools.izip(
# FastqStanza.stanza_generator(left_fastq), FastqStanza.stanza_generator(right_fastq))
# for (left_stanza, right_stanza) in fastq_pairs:
# assert_sequence_ids_match(left_stanza, right_stanza)
# barcode = left_stanza.sequence_id.split(':')[0][1:]
# try:
# (left_out, right_out) = barcode_files[barcode]
# except KeyError:
# raise UndefinedBarcodeError(left_stanza)
# left_out.write(left_stanza.as_text())
# right_out.write(right_stanza.as_text())
#
# class MismatchedSequenceIdError(DemultiplexError):
# def __init__(self, left_stanza, right_stanza):
# super(MismatchedSequenceIdError, self).__init__()
# self.left_sequence_id = left_stanza.sequence_id
# self.right_sequence_id = right_stanza.sequence_id
#
# def __str__(self):
# return repr("left/right sequence_ids do not match [{0}] != [{1}].". \
# format(self.left_sequence_id), self.right_sequence_id)
#
# class UndefinedBarcodeError(DemultiplexError):
# def __init__(self, stanza):
# super(UndefinedBarcodeError, self).__init__()
# self.sequence_id = stanza.sequence_id
#
# def __str__(self):
# return repr("sequence_id [{0}] is not in recognized barcodes." \
# .format(self.sequence_id))
. Output only the next line. | self.assertRaises(UndefinedBarcodeError, demultiplex, source_left, source_right, barcode_files) |
Continue the code snippet: <|code_start|># Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
# The httplib2 import is implicitly used when mocking its functionality.
# pylint: disable=unused-import
class TestCases(unittest.TestCase):
@mock.patch('httplib2.Response')
@mock.patch('google.datalab.utils._http.Http.http.request')
def test_get_request_is_invoked(self, mock_request, mock_response):
TestCases._setup_mocks(mock_request, mock_response, '{}')
<|code_end|>
. Use current file imports:
import mock
import unittest
from google.datalab.utils._http import Http
and context (classes, functions, or code) from other files:
# Path: google/datalab/utils/_http.py
# class Http(object):
# """A helper class for making HTTP requests.
# """
#
# # Reuse one Http object across requests to take advantage of Keep-Alive, e.g.
# # for BigQuery queries that requires at least ~5 sequential http requests.
# #
# # TODO(nikhilko):
# # SSL cert validation seemingly fails, and workarounds are not amenable
# # to implementing in library code. So configure the Http object to skip
# # doing so, in the interim.
# http = httplib2.Http()
# http.disable_ssl_certificate_validation = True
#
# def __init__(self):
# pass
#
# @staticmethod
# def request(url, args=None, data=None, headers=None, method=None,
# credentials=None, raw_response=False, stats=None):
# """Issues HTTP requests.
#
# Args:
# url: the URL to request.
# args: optional query string arguments.
# data: optional data to be sent within the request.
# headers: optional headers to include in the request.
# method: optional HTTP method to use. If unspecified this is inferred
# (GET or POST) based on the existence of request data.
# credentials: optional set of credentials to authorize the request.
# raw_response: whether the raw response content should be returned as-is.
# stats: an optional dictionary that, if provided, will be populated with some
# useful info about the request, like 'duration' in seconds and 'data_size' in
# bytes. These may be useful optimizing the access to rate-limited APIs.
# Returns:
# The parsed response object.
# Raises:
# Exception when the HTTP request fails or the response cannot be processed.
# """
# if headers is None:
# headers = {}
#
# headers['user-agent'] = 'GoogleCloudDataLab/1.0'
# # Add querystring to the URL if there are any arguments.
# if args is not None:
# qs = urllib.parse.urlencode(args)
# url = url + '?' + qs
#
# # Setup method to POST if unspecified, and appropriate request headers
# # if there is data to be sent within the request.
# if data is not None:
# if method is None:
# method = 'POST'
#
# if data != '':
# # If there is a content type specified, use it (and the data) as-is.
# # Otherwise, assume JSON, and serialize the data object.
# if 'Content-Type' not in headers:
# data = json.dumps(data)
# headers['Content-Type'] = 'application/json'
# headers['Content-Length'] = str(len(data))
# else:
# if method == 'POST':
# headers['Content-Length'] = '0'
#
# # If the method is still unset, i.e. it was unspecified, and there
# # was no data to be POSTed, then default to GET request.
# if method is None:
# method = 'GET'
#
# http = Http.http
#
# # Authorize with credentials if given
# if credentials is not None:
# # Make a copy of the shared http instance before we modify it.
# http = copy.copy(http)
# http = google_auth_httplib2.AuthorizedHttp(credentials)
# if stats is not None:
# stats['duration'] = datetime.datetime.utcnow()
#
# response = None
# try:
# log.debug('request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals())
# response, content = http.request(url,
# method=method,
# body=data,
# headers=headers)
# if 200 <= response.status < 300:
# if raw_response:
# return content
# if type(content) == str:
# return json.loads(content)
# else:
# return json.loads(str(content, encoding='UTF-8'))
# else:
# raise RequestException(response.status, content)
# except ValueError:
# raise Exception('Failed to process HTTP response.')
# except httplib2.HttpLib2Error:
# raise Exception('Failed to send HTTP request.')
# finally:
# if stats is not None:
# stats['data_size'] = len(data)
# stats['status'] = response.status
# stats['duration'] = (datetime.datetime.utcnow() - stats['duration']).total_seconds()
. Output only the next line. | Http.request('http://www.example.org') |
Predict the next line for this snippet: <|code_start|>class TestCases(unittest.TestCase):
TEST_PROJECT_ID = 'test_project'
def validate(self, mock_http_request, expected_url, expected_args=None, expected_data=None,
expected_headers=None, expected_method=None):
url = mock_http_request.call_args[0][0]
kwargs = mock_http_request.call_args[1]
self.assertEquals(expected_url, url)
if expected_args is not None:
self.assertEquals(expected_args, kwargs['args'])
else:
self.assertNotIn('args', kwargs)
if expected_data is not None:
self.assertEquals(expected_data, kwargs['data'])
else:
self.assertNotIn('data', kwargs)
if expected_headers is not None:
self.assertEquals(expected_headers, kwargs['headers'])
else:
self.assertNotIn('headers', kwargs)
if expected_method is not None:
self.assertEquals(expected_method, kwargs['method'])
else:
self.assertNotIn('method', kwargs)
@mock.patch('google.datalab.Context.default')
@mock.patch('google.datalab.utils.Http.request')
def test_environment_details_get(self, mock_http_request, mock_context_default):
mock_context_default.return_value = TestCases._create_context()
<|code_end|>
with the help of current file imports:
import unittest
import mock
import google.auth
import google.datalab.utils
from google.datalab.contrib.pipeline.composer._api import Api
and context from other files:
# Path: google/datalab/contrib/pipeline/composer/_api.py
# class Api(object):
# """A helper class to issue Composer HTTP requests."""
#
# _ENDPOINT = 'https://composer.googleapis.com/v1alpha1'
# _ENVIRONMENTS_PATH_FORMAT = '/projects/%s/locations/%s/environments/%s'
#
# @staticmethod
# def get_environment_details(zone, environment):
# """ Issues a request to Composer to get the environment details.
#
# Args:
# zone: GCP zone of the composer environment
# environment: name of the Composer environment
# Returns:
# A parsed result object.
# Raises:
# Exception if there is an error performing the operation.
# """
# default_context = google.datalab.Context.default()
# url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,
# environment)))
#
# return google.datalab.utils.Http.request(url, credentials=default_context.credentials)
, which may contain function names, class names, or code. Output only the next line. | Api.get_environment_details('ZONE', 'ENVIRONMENT') |
Based on the snippet: <|code_start|> """ Represents a Composer object that encapsulates a set of functionality relating to the
Cloud Composer service.
This object can be used to generate the python airflow spec.
"""
gcs_file_regexp = re.compile('gs://.*')
def __init__(self, zone, environment):
""" Initializes an instance of a Composer object.
Args:
zone: Zone in which Composer environment has been created.
environment: Name of the Composer environment.
"""
self._zone = zone
self._environment = environment
self._gcs_dag_location = None
def deploy(self, name, dag_string):
bucket_name, file_path = self.gcs_dag_location.split('/', 3)[2:] # setting maxsplit to 3
file_name = '{0}{1}.py'.format(file_path, name)
bucket = storage.Bucket(bucket_name)
file_object = bucket.object(file_name)
file_object.write_stream(dag_string, 'text/plain')
@property
def gcs_dag_location(self):
if not self._gcs_dag_location:
<|code_end|>
, predict the immediate next line with the help of imports:
import google.datalab.storage as storage
import re
from google.datalab.contrib.pipeline.composer._api import Api
and context (classes, functions, sometimes code) from other files:
# Path: google/datalab/contrib/pipeline/composer/_api.py
# class Api(object):
# """A helper class to issue Composer HTTP requests."""
#
# _ENDPOINT = 'https://composer.googleapis.com/v1alpha1'
# _ENVIRONMENTS_PATH_FORMAT = '/projects/%s/locations/%s/environments/%s'
#
# @staticmethod
# def get_environment_details(zone, environment):
# """ Issues a request to Composer to get the environment details.
#
# Args:
# zone: GCP zone of the composer environment
# environment: name of the Composer environment
# Returns:
# A parsed result object.
# Raises:
# Exception if there is an error performing the operation.
# """
# default_context = google.datalab.Context.default()
# url = (Api._ENDPOINT + (Api._ENVIRONMENTS_PATH_FORMAT % (default_context.project_id, zone,
# environment)))
#
# return google.datalab.utils.Http.request(url, credentials=default_context.credentials)
. Output only the next line. | environment_details = Api.get_environment_details(self._zone, self._environment) |
Using the snippet: <|code_start|># Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
class TestCases(unittest.TestCase):
@mock.patch('google.datalab.Context.default')
@mock.patch('google.datalab.storage.Bucket')
@mock.patch('google.datalab.contrib.pipeline.composer._api.Api.get_environment_details')
def test_deploy(self, mock_environment_details, mock_bucket_class, mock_default_context):
# Happy path
mock_environment_details.return_value = {
'config': {
'gcsDagLocation': 'gs://foo_bucket/dags'
}
}
<|code_end|>
, determine the next line of code. You have imports:
import unittest
import mock
from google.datalab.contrib.pipeline.composer._composer import Composer
and context (class names, function names, or code) available:
# Path: google/datalab/contrib/pipeline/composer/_composer.py
# class Composer(object):
# """ Represents a Composer object that encapsulates a set of functionality relating to the
# Cloud Composer service.
#
# This object can be used to generate the python airflow spec.
# """
#
# gcs_file_regexp = re.compile('gs://.*')
#
# def __init__(self, zone, environment):
# """ Initializes an instance of a Composer object.
#
# Args:
# zone: Zone in which Composer environment has been created.
# environment: Name of the Composer environment.
# """
# self._zone = zone
# self._environment = environment
# self._gcs_dag_location = None
#
# def deploy(self, name, dag_string):
# bucket_name, file_path = self.gcs_dag_location.split('/', 3)[2:] # setting maxsplit to 3
# file_name = '{0}{1}.py'.format(file_path, name)
#
# bucket = storage.Bucket(bucket_name)
# file_object = bucket.object(file_name)
# file_object.write_stream(dag_string, 'text/plain')
#
# @property
# def gcs_dag_location(self):
# if not self._gcs_dag_location:
# environment_details = Api.get_environment_details(self._zone, self._environment)
#
# if ('config' not in environment_details or
# 'gcsDagLocation' not in environment_details.get('config')):
# raise ValueError('Dag location unavailable from Composer environment {0}'.format(
# self._environment))
# gcs_dag_location = environment_details['config']['gcsDagLocation']
#
# if gcs_dag_location is None or not self.gcs_file_regexp.match(gcs_dag_location):
# raise ValueError(
# 'Dag location {0} from Composer environment {1} is in incorrect format'.format(
# gcs_dag_location, self._environment))
#
# self._gcs_dag_location = gcs_dag_location
# if gcs_dag_location.endswith('/') is False:
# self._gcs_dag_location = self._gcs_dag_location + '/'
#
# return self._gcs_dag_location
. Output only the next line. | test_composer = Composer('foo_zone', 'foo_environment') |
Given snippet: <|code_start|> return r'(?P<%s>[01])' % self._attr_name
if self._attr_type == 'int':
return r'(?P<%s>[0-9]+)' % self._attr_name
if self._attr_type == 'float':
return r'(?P<%s>[0-9,.]+)' % self._attr_name
if self._attr_type == 'string':
# take my excuses for concatenation - it's because of the `%` symbol
return r'(?P<' + self._attr_name + '>[^/+?$]+)'
def params(self):
"""
dict() with parsed attribute params
"""
return {
# 'attribute_name': self._attr_name,
'attribute_type': self._attr_type,
'length_limit': self._len_limit,
'second_params': self._second_param,
}
def name(self):
"""
Attribute name
"""
return self._attr_name
def _bool_validator(self):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from voluptuous import Schema, Required, Coerce, Boolean, Length, All, Range, In
from voluptuous.error import Error
from m2core.utils.voluptuous_checkers import NotNone
and context:
# Path: m2core/utils/voluptuous_checkers.py
# def NotNone(msg=None):
# """Checks that value is not None neither empty string"""
# def f(v):
# if v is not None:
# if type(v) == str:
# if v == '':
# raise Error(msg or "value must be not empty")
# else:
# return v
# return v
# else:
# raise Error(msg or "value must be not None")
# return f
which might include code, classes, or functions. Output only the next line. | return {Required(self._attr_name): All(NotNone(msg='<%s> should be not None' % self._attr_name), Boolean())} |
Using the snippet: <|code_start|>__author__ = 'Maxim Dutkin (max@dutkin.ru)'
class M2CoreIntEnumTest(unittest.TestCase):
def setUp(self):
<|code_end|>
, determine the next line of code. You have imports:
import unittest
from m2core.common.int_enum import M2CoreIntEnum
and context (class names, function names, or code) available:
# Path: m2core/common/int_enum.py
# class M2CoreIntEnum(IntEnum):
# @classmethod
# def get(cls, member_value: int or str):
# """
# Returns enum member from `int` ID. If no member found - returns `None`
#
# :param member_value:
#
# :return: enum member or `None`
# """
# if type(member_value) is int:
# try:
# return cls(member_value)
# except ValueError:
# return None
# elif type(member_value) is str:
# for m in cls.all():
# if m.name == member_value:
# return m
# return None
# else:
# raise AttributeError('You can load enum members only by `str` name or `int` value')
#
# @classmethod
# def all(cls):
# return [_ for _ in cls]
. Output only the next line. | class SampleEnum(M2CoreIntEnum): |
Given snippet: <|code_start|> self._current_token = None
self._redis = redis_connector
self._redis_scheme = redis_scheme
self.__inited = False
def get_user_id(self) -> None or int:
"""
Returns user id from Redis (if found)
:return:
"""
self._check_inited()
return self._current_user
def get_token(self) -> str:
"""
Returns access token from Redis
:return:
"""
self._check_inited()
return self._current_token
def generate_token(self, user_id: int) -> dict:
"""
Generates token per specified user, stores it in Redis and returns generated token with expiration info
:param user_id:
:return:
"""
# generate token
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from m2core.utils.data_helper import DataHelper
from m2core.data_schemes.db_system_scheme import M2Permission
from m2core.common.options import options
and context:
# Path: m2core/utils/data_helper.py
# class DataHelper:
# @staticmethod
# def random_char(length: int) -> str:
# """
# Generates random char sequence with the given length
# """
# return ''.join(random.choice(string.ascii_letters) for x in range(length))
#
# @staticmethod
# def random_hex_str(length: int) -> str:
# """
# Generates random alpha-numeric sequence, where all chars are from hex set (a-f)
# :param length:
# :return:
# """
# return '%x' % random.randrange(16 ** length)
#
# @staticmethod
# def camel_to_underline(camel) -> str:
# """
# Converts camelcased-string to underlined string, example:
# ConvertMeToUnderline -> convert_me_to_underline
# :param camel: string to convert
# :return: converted string
# """
# s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
# return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#
# Path: m2core/data_schemes/db_system_scheme.py
# class M2Permission(BaseModel):
# __repr_list__ = ['id', 'system_name']
#
# id = Column(BigInteger, primary_key=True)
# name = Column(String(255))
# system_name = Column(String(255), unique=True)
# description = Column(String(500))
# active = Column(Boolean, default=True, server_default='1')
# created = Column(DateTime(timezone=True), server_default=text('now()'), nullable=False)
#
# @property
# def enum_member(self):
# if not hasattr(self, '__enum_member'):
# all_perms = PermissionsEnum.all_platform_permissions
# sys_name = self.system_name
# for p in all_perms:
# if p.sys_name == sys_name:
# setattr(self, '__enum_member', p)
# return p
# raise M2Error(f'No corresponding enum member found for Permission with sys_name=`{sys_name}`')
# else:
# return getattr(self, '__enum_member')
#
# @classmethod
# def from_enum_member(cls, member: Permission):
# entity = cls.s.query(M2RolePermission).filter(M2Permission.system_name == member.sys_name).first()
# if not entity:
# raise M2Error(f'No corresponding permission found for Permission with sys_name=`{member.sys_name}`')
#
# return entity
#
# Path: m2core/common/options.py
# class M2OptionParser(OptionParser):
# def parse_environment(self, final=True):
which might include code, classes, or functions. Output only the next line. | token = '%s_%s' % (DataHelper.random_hex_str(8), DataHelper.random_hex_str(32)) |
Given the code snippet: <|code_start|> self._redis.srem(self._redis_scheme['ROLE_PERMISSIONS']['prefix'] % role_id, permission)
def dump_user_roles(self, user_id: int, role_ids: list):
"""
Stores (rewrites) user roles list in Redis
"""
# delete all existing roles
self._redis.delete(self._redis_scheme['USER_ROLES']['prefix'] % user_id)
# and add new ones
if len(role_ids):
self._redis.sadd(self._redis_scheme['USER_ROLES']['prefix'] % user_id, *role_ids)
def get_user_permissions(self):
"""
Returns all user permissions based on it's roles
"""
self._check_inited()
# get user role ids
redis_val = self._redis.smembers(
self._redis_scheme['USER_ROLES']['prefix'] % self._current_user
)
group_ids = [int(role_id) for role_id in redis_val]
all_permissions = set()
for group_id in group_ids:
# get permissions per each role by role id
permissions = self._redis.smembers(
self._redis_scheme['ROLE_PERMISSIONS']['prefix'] % group_id
)
for p_name in permissions:
<|code_end|>
, generate the next line using the imports in this file:
from m2core.utils.data_helper import DataHelper
from m2core.data_schemes.db_system_scheme import M2Permission
from m2core.common.options import options
and context (functions, classes, or occasionally code) from other files:
# Path: m2core/utils/data_helper.py
# class DataHelper:
# @staticmethod
# def random_char(length: int) -> str:
# """
# Generates random char sequence with the given length
# """
# return ''.join(random.choice(string.ascii_letters) for x in range(length))
#
# @staticmethod
# def random_hex_str(length: int) -> str:
# """
# Generates random alpha-numeric sequence, where all chars are from hex set (a-f)
# :param length:
# :return:
# """
# return '%x' % random.randrange(16 ** length)
#
# @staticmethod
# def camel_to_underline(camel) -> str:
# """
# Converts camelcased-string to underlined string, example:
# ConvertMeToUnderline -> convert_me_to_underline
# :param camel: string to convert
# :return: converted string
# """
# s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
# return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#
# Path: m2core/data_schemes/db_system_scheme.py
# class M2Permission(BaseModel):
# __repr_list__ = ['id', 'system_name']
#
# id = Column(BigInteger, primary_key=True)
# name = Column(String(255))
# system_name = Column(String(255), unique=True)
# description = Column(String(500))
# active = Column(Boolean, default=True, server_default='1')
# created = Column(DateTime(timezone=True), server_default=text('now()'), nullable=False)
#
# @property
# def enum_member(self):
# if not hasattr(self, '__enum_member'):
# all_perms = PermissionsEnum.all_platform_permissions
# sys_name = self.system_name
# for p in all_perms:
# if p.sys_name == sys_name:
# setattr(self, '__enum_member', p)
# return p
# raise M2Error(f'No corresponding enum member found for Permission with sys_name=`{sys_name}`')
# else:
# return getattr(self, '__enum_member')
#
# @classmethod
# def from_enum_member(cls, member: Permission):
# entity = cls.s.query(M2RolePermission).filter(M2Permission.system_name == member.sys_name).first()
# if not entity:
# raise M2Error(f'No corresponding permission found for Permission with sys_name=`{member.sys_name}`')
#
# return entity
#
# Path: m2core/common/options.py
# class M2OptionParser(OptionParser):
# def parse_environment(self, final=True):
. Output only the next line. | p = M2Permission.load_by_params(system_name=p_name) |
Using the snippet: <|code_start|> self._redis_scheme['USER_ROLES']['prefix'] % self._current_user
)
group_ids = [int(role_id) for role_id in redis_val]
all_permissions = set()
for group_id in group_ids:
# get permissions per each role by role id
permissions = self._redis.smembers(
self._redis_scheme['ROLE_PERMISSIONS']['prefix'] % group_id
)
for p_name in permissions:
p = M2Permission.load_by_params(system_name=p_name)
if p:
all_permissions.add(p)
return all_permissions
def _check_inited(self):
"""
Check if instance was inited with user's data or not
"""
if not self.__inited:
raise Exception('Session is not inited')
def init_user(self, _access_token: str):
"""
Init current instance with user access token. This is the place were data per user is requested from Redis
:param _access_token:
"""
redis_val = self._redis.get(
self._redis_scheme['ACCESS_TOKENS_BY_HASH']['prefix'] % _access_token
)
<|code_end|>
, determine the next line of code. You have imports:
from m2core.utils.data_helper import DataHelper
from m2core.data_schemes.db_system_scheme import M2Permission
from m2core.common.options import options
and context (class names, function names, or code) available:
# Path: m2core/utils/data_helper.py
# class DataHelper:
# @staticmethod
# def random_char(length: int) -> str:
# """
# Generates random char sequence with the given length
# """
# return ''.join(random.choice(string.ascii_letters) for x in range(length))
#
# @staticmethod
# def random_hex_str(length: int) -> str:
# """
# Generates random alpha-numeric sequence, where all chars are from hex set (a-f)
# :param length:
# :return:
# """
# return '%x' % random.randrange(16 ** length)
#
# @staticmethod
# def camel_to_underline(camel) -> str:
# """
# Converts camelcased-string to underlined string, example:
# ConvertMeToUnderline -> convert_me_to_underline
# :param camel: string to convert
# :return: converted string
# """
# s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
# return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#
# Path: m2core/data_schemes/db_system_scheme.py
# class M2Permission(BaseModel):
# __repr_list__ = ['id', 'system_name']
#
# id = Column(BigInteger, primary_key=True)
# name = Column(String(255))
# system_name = Column(String(255), unique=True)
# description = Column(String(500))
# active = Column(Boolean, default=True, server_default='1')
# created = Column(DateTime(timezone=True), server_default=text('now()'), nullable=False)
#
# @property
# def enum_member(self):
# if not hasattr(self, '__enum_member'):
# all_perms = PermissionsEnum.all_platform_permissions
# sys_name = self.system_name
# for p in all_perms:
# if p.sys_name == sys_name:
# setattr(self, '__enum_member', p)
# return p
# raise M2Error(f'No corresponding enum member found for Permission with sys_name=`{sys_name}`')
# else:
# return getattr(self, '__enum_member')
#
# @classmethod
# def from_enum_member(cls, member: Permission):
# entity = cls.s.query(M2RolePermission).filter(M2Permission.system_name == member.sys_name).first()
# if not entity:
# raise M2Error(f'No corresponding permission found for Permission with sys_name=`{member.sys_name}`')
#
# return entity
#
# Path: m2core/common/options.py
# class M2OptionParser(OptionParser):
# def parse_environment(self, final=True):
. Output only the next line. | if options.access_token_update_on_check: |
Predict the next line for this snippet: <|code_start|> return self in user_permissions
else:
return self.rule_chain(user_permissions)
def __repr__(self):
if self.rule_chain:
return self.rule_chain.__repr__()
else:
return f'<{self.__class__.__name__}.{self.sys_name}>'
class PermissionsEnumMeta(type):
"""Metaclass for PermissionsEnum"""
def __init__(cls, name, bases, nmspc):
super(PermissionsEnumMeta, cls).__init__(name, bases, nmspc)
if not hasattr(cls, 'registry'):
cls.registry = set()
cls.registry.add(cls)
cls.registry -= set(bases) # Remove base classes
def __str__(cls):
if cls in cls.registry:
return cls.__name__
return cls.__name__ + ": " + ", ".join([sc.__name__ for sc in cls])
class PermissionsEnum(metaclass=PermissionsEnumMeta):
AUTHORIZED = Permission('authorized')
<|code_end|>
with the help of current file imports:
import re
from m2core.utils.decorators import classproperty
and context from other files:
# Path: m2core/utils/decorators.py
# class classproperty(property):
# def __get__(self, obj, objtype=None):
# return super(classproperty, self).__get__(objtype)
#
# def __set__(self, obj, value):
# super(classproperty, self).__set__(type(obj), value)
#
# def __delete__(self, obj):
# super(classproperty, self).__delete__(type(obj))
, which may contain function names, class names, or code. Output only the next line. | @classproperty |
Given the code snippet: <|code_start|> query = query.filter(getattr(cls, _field) == _params[_field])
if order_by:
order_by_params = order_by.split(' ')
order_function = globals()[order_by_params[1]]
query = query.order_by(order_function(getattr(cls, order_by_params[0]), ))
return query
@classproperty
def settable_attributes(cls):
return cls.columns + cls.hybrid_properties + cls.settable_relations
def set_and_save(self, **_params):
"""
Updates instance and permanently saves changes to DB
:param _params: data to save
"""
self.set(**_params)
return self.save()
def set(self, **_params):
"""
Sets fields in instance without saving
:param _params: data to save
"""
try:
for name in _params.keys():
if name in self.settable_attributes:
setattr(self, name, _params[name])
else:
<|code_end|>
, generate the next line using the imports in this file:
from .session_mixin import SessionMixin
from sqlalchemy import func, text
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import RelationshipProperty, class_mapper
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.engine import reflection
from m2core.utils.error import M2Error
from m2core.utils.decorators import classproperty
import operator
import copy
and context (functions, classes, or occasionally code) from other files:
# Path: m2core/db/sqlalchemy_mixins/session_mixin.py
# class SessionMixin:
# __abstract__ = True
#
# @classmethod
# def set_db_session(cls, session) -> scoped_session or Session:
# """
# Sets DB Session during M2Core initialization with this method
# """
# cls._db_session = session
#
# @classproperty
# def s(cls) -> scoped_session or Session:
# """
# Returns DB Session
# """
# if cls._db_session:
# return cls._db_session
# else:
# raise M2Error('No DB session defined')
#
# @classmethod
# def set_redis_session(cls, session) -> scoped_session or Session:
# """
# Sets Redis Session during M2Core initialization with this method
# """
# cls._redis_session = session
#
# @classproperty
# def r(cls) -> StrictRedis:
# """
# Returns Redis Session
# """
# if cls._redis_session:
# return cls._redis_session
# else:
# raise M2Error('No Redis session defined')
#
# @classproperty
# def sh(cls):
# """
# Returns instance of Session Helper
# :return:
# """
# if not cls.r:
# raise M2Error('No Redis session defined')
# return cls._sh_cls(cls.r['connector'], cls.r['scheme'])
#
# @classmethod
# def set_sh(cls, sh_cls):
# """
# Sets DB Session during M2Core initialization with this method
# """
# cls._sh_cls = sh_cls
#
# @classproperty
# def q(cls) -> Query:
# """
# Returns prepared Query taken from DB Session
# """
# if not cls.s:
# raise M2Error('No DB session defined')
# return cls.s.query(cls)
#
# Path: m2core/utils/error.py
# class M2Error(Exception):
# """
# Raise this error wherever you want you exception to be raised, logged and showed up to user with custom message
# """
# def __init__(self, msg: str='', show_to_user: bool=False):
# self.error_message = msg
# self.show_to_user = show_to_user
# self._log()
#
# def _log(self):
# logger.error(self.error_message)
#
# def __repr__(self):
# return self.error_message
#
# Path: m2core/utils/decorators.py
# class classproperty(property):
# def __get__(self, obj, objtype=None):
# return super(classproperty, self).__get__(objtype)
#
# def __set__(self, obj, value):
# super(classproperty, self).__set__(type(obj), value)
#
# def __delete__(self, obj):
# super(classproperty, self).__delete__(type(obj))
. Output only the next line. | raise M2Error('Error while trying to set non-existent property `%s`' % name) |
Given snippet: <|code_start|># import all handlers
# import core
if __name__ == '__main__':
# INIT M2CORE
options.config_name = 'config.py'
m2core = M2Core()
# setup core logger level
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from m2core.common.options import options
from example.handlers import *
from m2core import M2Core, logger as core_logger
from tornado.gen import coroutine, sleep
import logging
and context:
# Path: m2core/common/options.py
# class M2OptionParser(OptionParser):
# def parse_environment(self, final=True):
#
# Path: m2core/m2core.py
# class M2Core:
# def requires_permission(handler_method_func):
# def decorated(handler_instance, *args, **kwargs):
# def user_can(handler_method):
# def decorated(handler_instance, *args, **kwargs):
# def tryex(*errors):
# def decorator(func):
# def new_func(handler, *args, **kwargs):
# def __recreate_db(self):
# def __init__(self):
# def __make_app(self):
# def route(self, human_route: str=None, handler_cls: Type[RequestHandler]=None, rule_group: str=None,
# extra: dict=None, **kwargs):
# def add_endpoint(self, human_route: str, handler_class: Type[RequestHandler], extra_params: dict = dict()):
# def add_endpoint_method_permissions(self, human_route: str, method: str, permissions: list or None):
# def add_endpoint_permissions(self, human_route: str, permissions: dict):
# def add_redis_scheme(self, _redis_scheme: dict):
# def thread_pool(self) -> ThreadPoolExecutor:
# def db_engine(self) -> Engine:
# def db_session(self) -> scoped_session:
# def redis_session(self) -> redis.StrictRedis:
# def redis_tables(self) -> dict:
# def app(self) -> tornado.web.Application:
# def __make_db_session(self):
# def __make_thread_pool(self):
# def __make_redis_session(self):
# def add_callback(self, callback: callable, *args, **kwargs):
# def extended(self, callback: callable):
# def call_me_maybe(*args, **kwargs):
# def add_custom_response_headers(self, headers: dict()):
# def run(self):
# def run_with_recreate(self):
# def run_for_test(self) -> tornado.web.Application:
# def add_test_user(self, at: str, user_id: int=None, permissions: set=None):
# def get_test_user(self, at: str) -> dict:
# def sync_permissions():
# def dump_roles():
which might include code, classes, or functions. Output only the next line. | core_logger.setLevel(logging.DEBUG) |
Here is a snippet: <|code_start|>__author__ = 'Maxim Dutkin (max@dutkin.ru)'
class Rules(defaultdict):
def validator(self, human_route: str=None):
return self[human_route]['validator']
def docs(self, human_route: str=None, method: str=None):
return self[human_route]['docs'].get(method.upper())
def permissions(self, human_route: str=None, method: str=None):
return self[human_route]['permissions'].get(method.upper())
def group(self, human_route: str=None):
return self[human_route]['group']
def add_meta(self, human_route, handler_cls: Type[RequestHandler], rule_group: str, method_permissions: dict) \
-> UrlParser:
# add documentation per each method in SUPPORTED_METHODS
for method in handler_cls.__dict__:
method_upper = method.upper()
if method_upper in handler_cls.SUPPORTED_METHODS:
self[human_route]['docs'][method_upper] = handler_cls.__dict__[method].__doc__
# also add permissions per each method. default - skip check
<|code_end|>
. Write the next line using the current file imports:
from collections import defaultdict
from tornado.web import RequestHandler
from typing import Type
from m2core.common.permissions import PermissionsEnum
from m2core.utils.url_parser import UrlParser
and context from other files:
# Path: m2core/common/permissions.py
# class PermissionsEnum(metaclass=PermissionsEnumMeta):
# AUTHORIZED = Permission('authorized')
#
# @classproperty
# def ALL(cls):
# cache_var_name = '__all_cache'
# cached_perms = getattr(cls, cache_var_name, None)
# if cached_perms is None:
# setattr(cls, cache_var_name, set())
# cached_perms = getattr(cls, cache_var_name)
# for attr_name in dir(cls):
# if not attr_name.startswith('_') and attr_name != 'ALL':
# attr = getattr(cls, attr_name)
# if type(attr) is Permission:
# cached_perms.add(attr)
# return cached_perms
#
# @classproperty
# def SKIP(cls):
# return lambda p: True
#
# @classproperty
# def all_platform_permissions(cls):
# sub_classes = PermissionsEnum.registry
# all_perms = set()
# for c in sub_classes:
# all_perms |= c.ALL
#
# return all_perms
#
# Path: m2core/utils/url_parser.py
# class UrlParser:
# """
# Great class for making url masks, generating validators for url parameters, url attribute descriptions.
# Here are examples of different url masks which you can comfortably use:
#
# /users/:{id} -> :id - attribute, any type
# /users/:{id:int} -> :id - int attribute, any length
# /users/:{id:int(2)} -> :id - int attribute, length is 2 numbers
# /users/:{id:float} -> :id - float attribute
# /users/:{id:float(3)} -> :id - float attribute, length is 3 numbers including `,`
# /users/:{id:float(2,5)} -> :id - float attribute, length is between 2 and 5 numbers including `,`
# /users/:{id:string} -> :id - string, any length, without `/` symbol
# /users/:{id:string(2)} -> :id - string, length is 2 symbols, without `/` symbol
# /users/:{id:bool} -> :id - bool flag, accepts only `0` or `1`
# /users/:{id:int(0,[0-100])} -> :id - int, any length (0), but value must be between `0` and `100`
# /users/:{id:float(0,[0-100])} -> :id - float, any length (0), but value must be between `0` and `100`
# /users/:{id:string(0,[string1;string2;string3])} -> :id - string, any length (0), but value must be in list
# of values: ('string1', 'string2', 'string3')
# """
#
# def __init__(self, url: str):
# self.original_url = url
# self.url_attributes = []
# self._full_match = ''
# self.__parse()
#
# def __parse(self):
# # check for `:{}`
# m = re.finditer(r':{(?P<attr_name>\w+)(?:(?::(?P<attr_type>|int|float|string|bool))'
# r'(?:\((?P<type_params>[^)]+)\))?)?}', self.original_url)
# for match in m:
# attr = UrlParserAttr(match.group('attr_name'), match.group('attr_type'), match.group('type_params'))
# self.url_attributes.append({'instance': attr, 'full_match': match.group(0)})
#
# def tornado_url(self):
# result = self.original_url
# for attr in self.url_attributes:
# result = result.replace(attr['full_match'], attr['instance'].replacement(), 1)
# return result
#
# def params(self):
# result = dict()
# for param in self.url_attributes:
# result[param['instance'].name()] = param['instance'].params()
# return result
#
# def validator_schema(self):
# result = dict()
# for attr in self.url_attributes:
# result.update(attr['instance'].validator())
# return Schema(result)
#
# def __repr__(self):
# return self.original_url
, which may include functions, classes, or code. Output only the next line. | permissions = PermissionsEnum.SKIP |
Given snippet: <|code_start|>__author__ = 'Maxim Dutkin (max@dutkin.ru)'
class Rules(defaultdict):
def validator(self, human_route: str=None):
return self[human_route]['validator']
def docs(self, human_route: str=None, method: str=None):
return self[human_route]['docs'].get(method.upper())
def permissions(self, human_route: str=None, method: str=None):
return self[human_route]['permissions'].get(method.upper())
def group(self, human_route: str=None):
return self[human_route]['group']
def add_meta(self, human_route, handler_cls: Type[RequestHandler], rule_group: str, method_permissions: dict) \
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import defaultdict
from tornado.web import RequestHandler
from typing import Type
from m2core.common.permissions import PermissionsEnum
from m2core.utils.url_parser import UrlParser
and context:
# Path: m2core/common/permissions.py
# class PermissionsEnum(metaclass=PermissionsEnumMeta):
# AUTHORIZED = Permission('authorized')
#
# @classproperty
# def ALL(cls):
# cache_var_name = '__all_cache'
# cached_perms = getattr(cls, cache_var_name, None)
# if cached_perms is None:
# setattr(cls, cache_var_name, set())
# cached_perms = getattr(cls, cache_var_name)
# for attr_name in dir(cls):
# if not attr_name.startswith('_') and attr_name != 'ALL':
# attr = getattr(cls, attr_name)
# if type(attr) is Permission:
# cached_perms.add(attr)
# return cached_perms
#
# @classproperty
# def SKIP(cls):
# return lambda p: True
#
# @classproperty
# def all_platform_permissions(cls):
# sub_classes = PermissionsEnum.registry
# all_perms = set()
# for c in sub_classes:
# all_perms |= c.ALL
#
# return all_perms
#
# Path: m2core/utils/url_parser.py
# class UrlParser:
# """
# Great class for making url masks, generating validators for url parameters, url attribute descriptions.
# Here are examples of different url masks which you can comfortably use:
#
# /users/:{id} -> :id - attribute, any type
# /users/:{id:int} -> :id - int attribute, any length
# /users/:{id:int(2)} -> :id - int attribute, length is 2 numbers
# /users/:{id:float} -> :id - float attribute
# /users/:{id:float(3)} -> :id - float attribute, length is 3 numbers including `,`
# /users/:{id:float(2,5)} -> :id - float attribute, length is between 2 and 5 numbers including `,`
# /users/:{id:string} -> :id - string, any length, without `/` symbol
# /users/:{id:string(2)} -> :id - string, length is 2 symbols, without `/` symbol
# /users/:{id:bool} -> :id - bool flag, accepts only `0` or `1`
# /users/:{id:int(0,[0-100])} -> :id - int, any length (0), but value must be between `0` and `100`
# /users/:{id:float(0,[0-100])} -> :id - float, any length (0), but value must be between `0` and `100`
# /users/:{id:string(0,[string1;string2;string3])} -> :id - string, any length (0), but value must be in list
# of values: ('string1', 'string2', 'string3')
# """
#
# def __init__(self, url: str):
# self.original_url = url
# self.url_attributes = []
# self._full_match = ''
# self.__parse()
#
# def __parse(self):
# # check for `:{}`
# m = re.finditer(r':{(?P<attr_name>\w+)(?:(?::(?P<attr_type>|int|float|string|bool))'
# r'(?:\((?P<type_params>[^)]+)\))?)?}', self.original_url)
# for match in m:
# attr = UrlParserAttr(match.group('attr_name'), match.group('attr_type'), match.group('type_params'))
# self.url_attributes.append({'instance': attr, 'full_match': match.group(0)})
#
# def tornado_url(self):
# result = self.original_url
# for attr in self.url_attributes:
# result = result.replace(attr['full_match'], attr['instance'].replacement(), 1)
# return result
#
# def params(self):
# result = dict()
# for param in self.url_attributes:
# result[param['instance'].name()] = param['instance'].params()
# return result
#
# def validator_schema(self):
# result = dict()
# for attr in self.url_attributes:
# result.update(attr['instance'].validator())
# return Schema(result)
#
# def __repr__(self):
# return self.original_url
which might include code, classes, or functions. Output only the next line. | -> UrlParser: |
Given the code snippet: <|code_start|>
class CreatedMixin:
"""
Stored in UTC without any offset and timezone
"""
created = Column(DateTime(timezone=False), default=datetime.utcnow, server_default=text('now()'), nullable=False)
updated = Column(DateTime(timezone=False), default=datetime.utcnow, server_default=text('now()'),
onupdate=datetime.utcnow, nullable=False)
CreatedMixin.created._creation_order = 9998
CreatedMixin.updated._creation_order = 9999
class SortMixin:
sort_order = Column(BigInteger, default=0, server_default='0', nullable=False)
SortMixin.sort_order._creation_order = 9997
class M2PermissionCheckMixin:
def can(self, permission_rule: Permission) -> bool:
return permission_rule(self.permissions)
<|code_end|>
, generate the next line using the imports in this file:
from datetime import datetime
from sqlalchemy import (
BigInteger,
Boolean,
Column,
DateTime,
ForeignKey,
String,
UniqueConstraint,
text,
)
from sqlalchemy.exc import SQLAlchemyError
from m2core.bases.base_model import BaseModel
from m2core.common.permissions import Permission, PermissionsEnum
from m2core.utils.error import M2Error
from typing import List
and context (functions, classes, or occasionally code) from other files:
# Path: m2core/bases/base_model.py
# class BaseModel(MetaBase, EnchantedMixin):
# """
# Use this BaseModel class
# """
# pass
#
# Path: m2core/common/permissions.py
# class Permission:
# def __init__(self, name: str=None, sys_name: str=None, description: str=None):
# if name is None or not len(name):
# raise AttributeError('`name` param should not be `None` or it\'s length should be > 0')
# self._name = name
# self._sys_name = sys_name
# self._description = description
# self.rule_chain = None
#
# @property
# def sys_name(self):
# return self._sys_name or re.sub(r'\W', '_', self._name).upper()
#
# @property
# def name(self):
# return self._name
#
# @property
# def description(self):
# return self._description
#
# def copy(self):
# return Permission(name=self._name, sys_name=self._sys_name)
#
# def __check_type(self, _type):
# return self.rule_chain is not None and type(self.rule_chain) is _type
#
# def __do_magic(self, other: 'Permission', _m_class: type(BasePermissionRule)):
# l_side = self.rule_chain
# r_side = other.rule_chain
# result_perm = other.copy()
# if l_side and r_side:
# # we get there only after both sides have been evaluated and compiler met distinct operator
# if type(l_side) is _m_class:
# l_side.append(r_side)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, r_side)
# elif l_side:
# if type(l_side) is _m_class:
# l_side.append(other)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, other)
# elif r_side:
# result_perm.rule_chain = _m_class(self, r_side)
# else:
# result_perm.rule_chain = _m_class(self, other)
#
# return result_perm
#
# def __and__(self, other):
# _m_class = And
# return self.__do_magic(other, _m_class)
#
# def __or__(self, other):
# _m_class = Or
# return self.__do_magic(other, _m_class)
#
# def __invert__(self):
# result_perm = self.copy()
# result_perm.rule_chain = Not(self.rule_chain or self)
# return result_perm
#
# def __call__(self, user_permissions):
# if self.rule_chain is None:
# return self in user_permissions
# else:
# return self.rule_chain(user_permissions)
#
# def __repr__(self):
# if self.rule_chain:
# return self.rule_chain.__repr__()
# else:
# return f'<{self.__class__.__name__}.{self.sys_name}>'
#
# class PermissionsEnum(metaclass=PermissionsEnumMeta):
# AUTHORIZED = Permission('authorized')
#
# @classproperty
# def ALL(cls):
# cache_var_name = '__all_cache'
# cached_perms = getattr(cls, cache_var_name, None)
# if cached_perms is None:
# setattr(cls, cache_var_name, set())
# cached_perms = getattr(cls, cache_var_name)
# for attr_name in dir(cls):
# if not attr_name.startswith('_') and attr_name != 'ALL':
# attr = getattr(cls, attr_name)
# if type(attr) is Permission:
# cached_perms.add(attr)
# return cached_perms
#
# @classproperty
# def SKIP(cls):
# return lambda p: True
#
# @classproperty
# def all_platform_permissions(cls):
# sub_classes = PermissionsEnum.registry
# all_perms = set()
# for c in sub_classes:
# all_perms |= c.ALL
#
# return all_perms
#
# Path: m2core/utils/error.py
# class M2Error(Exception):
# """
# Raise this error wherever you want you exception to be raised, logged and showed up to user with custom message
# """
# def __init__(self, msg: str='', show_to_user: bool=False):
# self.error_message = msg
# self.show_to_user = show_to_user
# self._log()
#
# def _log(self):
# logger.error(self.error_message)
#
# def __repr__(self):
# return self.error_message
. Output only the next line. | class M2Permission(BaseModel): |
Here is a snippet: <|code_start|>
class CreatedMixin:
"""
Stored in UTC without any offset and timezone
"""
created = Column(DateTime(timezone=False), default=datetime.utcnow, server_default=text('now()'), nullable=False)
updated = Column(DateTime(timezone=False), default=datetime.utcnow, server_default=text('now()'),
onupdate=datetime.utcnow, nullable=False)
CreatedMixin.created._creation_order = 9998
CreatedMixin.updated._creation_order = 9999
class SortMixin:
sort_order = Column(BigInteger, default=0, server_default='0', nullable=False)
SortMixin.sort_order._creation_order = 9997
class M2PermissionCheckMixin:
<|code_end|>
. Write the next line using the current file imports:
from datetime import datetime
from sqlalchemy import (
BigInteger,
Boolean,
Column,
DateTime,
ForeignKey,
String,
UniqueConstraint,
text,
)
from sqlalchemy.exc import SQLAlchemyError
from m2core.bases.base_model import BaseModel
from m2core.common.permissions import Permission, PermissionsEnum
from m2core.utils.error import M2Error
from typing import List
and context from other files:
# Path: m2core/bases/base_model.py
# class BaseModel(MetaBase, EnchantedMixin):
# """
# Use this BaseModel class
# """
# pass
#
# Path: m2core/common/permissions.py
# class Permission:
# def __init__(self, name: str=None, sys_name: str=None, description: str=None):
# if name is None or not len(name):
# raise AttributeError('`name` param should not be `None` or it\'s length should be > 0')
# self._name = name
# self._sys_name = sys_name
# self._description = description
# self.rule_chain = None
#
# @property
# def sys_name(self):
# return self._sys_name or re.sub(r'\W', '_', self._name).upper()
#
# @property
# def name(self):
# return self._name
#
# @property
# def description(self):
# return self._description
#
# def copy(self):
# return Permission(name=self._name, sys_name=self._sys_name)
#
# def __check_type(self, _type):
# return self.rule_chain is not None and type(self.rule_chain) is _type
#
# def __do_magic(self, other: 'Permission', _m_class: type(BasePermissionRule)):
# l_side = self.rule_chain
# r_side = other.rule_chain
# result_perm = other.copy()
# if l_side and r_side:
# # we get there only after both sides have been evaluated and compiler met distinct operator
# if type(l_side) is _m_class:
# l_side.append(r_side)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, r_side)
# elif l_side:
# if type(l_side) is _m_class:
# l_side.append(other)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, other)
# elif r_side:
# result_perm.rule_chain = _m_class(self, r_side)
# else:
# result_perm.rule_chain = _m_class(self, other)
#
# return result_perm
#
# def __and__(self, other):
# _m_class = And
# return self.__do_magic(other, _m_class)
#
# def __or__(self, other):
# _m_class = Or
# return self.__do_magic(other, _m_class)
#
# def __invert__(self):
# result_perm = self.copy()
# result_perm.rule_chain = Not(self.rule_chain or self)
# return result_perm
#
# def __call__(self, user_permissions):
# if self.rule_chain is None:
# return self in user_permissions
# else:
# return self.rule_chain(user_permissions)
#
# def __repr__(self):
# if self.rule_chain:
# return self.rule_chain.__repr__()
# else:
# return f'<{self.__class__.__name__}.{self.sys_name}>'
#
# class PermissionsEnum(metaclass=PermissionsEnumMeta):
# AUTHORIZED = Permission('authorized')
#
# @classproperty
# def ALL(cls):
# cache_var_name = '__all_cache'
# cached_perms = getattr(cls, cache_var_name, None)
# if cached_perms is None:
# setattr(cls, cache_var_name, set())
# cached_perms = getattr(cls, cache_var_name)
# for attr_name in dir(cls):
# if not attr_name.startswith('_') and attr_name != 'ALL':
# attr = getattr(cls, attr_name)
# if type(attr) is Permission:
# cached_perms.add(attr)
# return cached_perms
#
# @classproperty
# def SKIP(cls):
# return lambda p: True
#
# @classproperty
# def all_platform_permissions(cls):
# sub_classes = PermissionsEnum.registry
# all_perms = set()
# for c in sub_classes:
# all_perms |= c.ALL
#
# return all_perms
#
# Path: m2core/utils/error.py
# class M2Error(Exception):
# """
# Raise this error wherever you want you exception to be raised, logged and showed up to user with custom message
# """
# def __init__(self, msg: str='', show_to_user: bool=False):
# self.error_message = msg
# self.show_to_user = show_to_user
# self._log()
#
# def _log(self):
# logger.error(self.error_message)
#
# def __repr__(self):
# return self.error_message
, which may include functions, classes, or code. Output only the next line. | def can(self, permission_rule: Permission) -> bool: |
Based on the snippet: <|code_start|>
CreatedMixin.created._creation_order = 9998
CreatedMixin.updated._creation_order = 9999
class SortMixin:
sort_order = Column(BigInteger, default=0, server_default='0', nullable=False)
SortMixin.sort_order._creation_order = 9997
class M2PermissionCheckMixin:
def can(self, permission_rule: Permission) -> bool:
return permission_rule(self.permissions)
class M2Permission(BaseModel):
__repr_list__ = ['id', 'system_name']
id = Column(BigInteger, primary_key=True)
name = Column(String(255))
system_name = Column(String(255), unique=True)
description = Column(String(500))
active = Column(Boolean, default=True, server_default='1')
created = Column(DateTime(timezone=True), server_default=text('now()'), nullable=False)
@property
def enum_member(self):
if not hasattr(self, '__enum_member'):
<|code_end|>
, predict the immediate next line with the help of imports:
from datetime import datetime
from sqlalchemy import (
BigInteger,
Boolean,
Column,
DateTime,
ForeignKey,
String,
UniqueConstraint,
text,
)
from sqlalchemy.exc import SQLAlchemyError
from m2core.bases.base_model import BaseModel
from m2core.common.permissions import Permission, PermissionsEnum
from m2core.utils.error import M2Error
from typing import List
and context (classes, functions, sometimes code) from other files:
# Path: m2core/bases/base_model.py
# class BaseModel(MetaBase, EnchantedMixin):
# """
# Use this BaseModel class
# """
# pass
#
# Path: m2core/common/permissions.py
# class Permission:
# def __init__(self, name: str=None, sys_name: str=None, description: str=None):
# if name is None or not len(name):
# raise AttributeError('`name` param should not be `None` or it\'s length should be > 0')
# self._name = name
# self._sys_name = sys_name
# self._description = description
# self.rule_chain = None
#
# @property
# def sys_name(self):
# return self._sys_name or re.sub(r'\W', '_', self._name).upper()
#
# @property
# def name(self):
# return self._name
#
# @property
# def description(self):
# return self._description
#
# def copy(self):
# return Permission(name=self._name, sys_name=self._sys_name)
#
# def __check_type(self, _type):
# return self.rule_chain is not None and type(self.rule_chain) is _type
#
# def __do_magic(self, other: 'Permission', _m_class: type(BasePermissionRule)):
# l_side = self.rule_chain
# r_side = other.rule_chain
# result_perm = other.copy()
# if l_side and r_side:
# # we get there only after both sides have been evaluated and compiler met distinct operator
# if type(l_side) is _m_class:
# l_side.append(r_side)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, r_side)
# elif l_side:
# if type(l_side) is _m_class:
# l_side.append(other)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, other)
# elif r_side:
# result_perm.rule_chain = _m_class(self, r_side)
# else:
# result_perm.rule_chain = _m_class(self, other)
#
# return result_perm
#
# def __and__(self, other):
# _m_class = And
# return self.__do_magic(other, _m_class)
#
# def __or__(self, other):
# _m_class = Or
# return self.__do_magic(other, _m_class)
#
# def __invert__(self):
# result_perm = self.copy()
# result_perm.rule_chain = Not(self.rule_chain or self)
# return result_perm
#
# def __call__(self, user_permissions):
# if self.rule_chain is None:
# return self in user_permissions
# else:
# return self.rule_chain(user_permissions)
#
# def __repr__(self):
# if self.rule_chain:
# return self.rule_chain.__repr__()
# else:
# return f'<{self.__class__.__name__}.{self.sys_name}>'
#
# class PermissionsEnum(metaclass=PermissionsEnumMeta):
# AUTHORIZED = Permission('authorized')
#
# @classproperty
# def ALL(cls):
# cache_var_name = '__all_cache'
# cached_perms = getattr(cls, cache_var_name, None)
# if cached_perms is None:
# setattr(cls, cache_var_name, set())
# cached_perms = getattr(cls, cache_var_name)
# for attr_name in dir(cls):
# if not attr_name.startswith('_') and attr_name != 'ALL':
# attr = getattr(cls, attr_name)
# if type(attr) is Permission:
# cached_perms.add(attr)
# return cached_perms
#
# @classproperty
# def SKIP(cls):
# return lambda p: True
#
# @classproperty
# def all_platform_permissions(cls):
# sub_classes = PermissionsEnum.registry
# all_perms = set()
# for c in sub_classes:
# all_perms |= c.ALL
#
# return all_perms
#
# Path: m2core/utils/error.py
# class M2Error(Exception):
# """
# Raise this error wherever you want you exception to be raised, logged and showed up to user with custom message
# """
# def __init__(self, msg: str='', show_to_user: bool=False):
# self.error_message = msg
# self.show_to_user = show_to_user
# self._log()
#
# def _log(self):
# logger.error(self.error_message)
#
# def __repr__(self):
# return self.error_message
. Output only the next line. | all_perms = PermissionsEnum.all_platform_permissions |
Based on the snippet: <|code_start|> sort_order = Column(BigInteger, default=0, server_default='0', nullable=False)
SortMixin.sort_order._creation_order = 9997
class M2PermissionCheckMixin:
def can(self, permission_rule: Permission) -> bool:
return permission_rule(self.permissions)
class M2Permission(BaseModel):
__repr_list__ = ['id', 'system_name']
id = Column(BigInteger, primary_key=True)
name = Column(String(255))
system_name = Column(String(255), unique=True)
description = Column(String(500))
active = Column(Boolean, default=True, server_default='1')
created = Column(DateTime(timezone=True), server_default=text('now()'), nullable=False)
@property
def enum_member(self):
if not hasattr(self, '__enum_member'):
all_perms = PermissionsEnum.all_platform_permissions
sys_name = self.system_name
for p in all_perms:
if p.sys_name == sys_name:
setattr(self, '__enum_member', p)
return p
<|code_end|>
, predict the immediate next line with the help of imports:
from datetime import datetime
from sqlalchemy import (
BigInteger,
Boolean,
Column,
DateTime,
ForeignKey,
String,
UniqueConstraint,
text,
)
from sqlalchemy.exc import SQLAlchemyError
from m2core.bases.base_model import BaseModel
from m2core.common.permissions import Permission, PermissionsEnum
from m2core.utils.error import M2Error
from typing import List
and context (classes, functions, sometimes code) from other files:
# Path: m2core/bases/base_model.py
# class BaseModel(MetaBase, EnchantedMixin):
# """
# Use this BaseModel class
# """
# pass
#
# Path: m2core/common/permissions.py
# class Permission:
# def __init__(self, name: str=None, sys_name: str=None, description: str=None):
# if name is None or not len(name):
# raise AttributeError('`name` param should not be `None` or it\'s length should be > 0')
# self._name = name
# self._sys_name = sys_name
# self._description = description
# self.rule_chain = None
#
# @property
# def sys_name(self):
# return self._sys_name or re.sub(r'\W', '_', self._name).upper()
#
# @property
# def name(self):
# return self._name
#
# @property
# def description(self):
# return self._description
#
# def copy(self):
# return Permission(name=self._name, sys_name=self._sys_name)
#
# def __check_type(self, _type):
# return self.rule_chain is not None and type(self.rule_chain) is _type
#
# def __do_magic(self, other: 'Permission', _m_class: type(BasePermissionRule)):
# l_side = self.rule_chain
# r_side = other.rule_chain
# result_perm = other.copy()
# if l_side and r_side:
# # we get there only after both sides have been evaluated and compiler met distinct operator
# if type(l_side) is _m_class:
# l_side.append(r_side)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, r_side)
# elif l_side:
# if type(l_side) is _m_class:
# l_side.append(other)
# result_perm.rule_chain = l_side
# else:
# result_perm.rule_chain = _m_class(l_side, other)
# elif r_side:
# result_perm.rule_chain = _m_class(self, r_side)
# else:
# result_perm.rule_chain = _m_class(self, other)
#
# return result_perm
#
# def __and__(self, other):
# _m_class = And
# return self.__do_magic(other, _m_class)
#
# def __or__(self, other):
# _m_class = Or
# return self.__do_magic(other, _m_class)
#
# def __invert__(self):
# result_perm = self.copy()
# result_perm.rule_chain = Not(self.rule_chain or self)
# return result_perm
#
# def __call__(self, user_permissions):
# if self.rule_chain is None:
# return self in user_permissions
# else:
# return self.rule_chain(user_permissions)
#
# def __repr__(self):
# if self.rule_chain:
# return self.rule_chain.__repr__()
# else:
# return f'<{self.__class__.__name__}.{self.sys_name}>'
#
# class PermissionsEnum(metaclass=PermissionsEnumMeta):
# AUTHORIZED = Permission('authorized')
#
# @classproperty
# def ALL(cls):
# cache_var_name = '__all_cache'
# cached_perms = getattr(cls, cache_var_name, None)
# if cached_perms is None:
# setattr(cls, cache_var_name, set())
# cached_perms = getattr(cls, cache_var_name)
# for attr_name in dir(cls):
# if not attr_name.startswith('_') and attr_name != 'ALL':
# attr = getattr(cls, attr_name)
# if type(attr) is Permission:
# cached_perms.add(attr)
# return cached_perms
#
# @classproperty
# def SKIP(cls):
# return lambda p: True
#
# @classproperty
# def all_platform_permissions(cls):
# sub_classes = PermissionsEnum.registry
# all_perms = set()
# for c in sub_classes:
# all_perms |= c.ALL
#
# return all_perms
#
# Path: m2core/utils/error.py
# class M2Error(Exception):
# """
# Raise this error wherever you want you exception to be raised, logged and showed up to user with custom message
# """
# def __init__(self, msg: str='', show_to_user: bool=False):
# self.error_message = msg
# self.show_to_user = show_to_user
# self._log()
#
# def _log(self):
# logger.error(self.error_message)
#
# def __repr__(self):
# return self.error_message
. Output only the next line. | raise M2Error(f'No corresponding enum member found for Permission with sys_name=`{sys_name}`') |
Using the snippet: <|code_start|> def authorize(cls, _email: str, _password: str) -> dict or None:
"""
Authorize user and save his access token to Redis
:param _email: user email
:param _password: user password
"""
user_obj = cls.q.filter(
func.lower(cls.email) == _email.lower(),
# cls.password == func.crypt(_password, cls.password) <- this could be used for check in DB
).first()
if not user_obj:
return None
# check authorization via python bcrypt, not postgres bcrypt
if not bcrypt.checkpw(str.encode(_password), str.encode(user_obj.get('password'))):
return None
access_token = cls.sh.generate_token(user_obj.get('id'))
access_token['user_info'] = user_obj.data('password')
return access_token
def add_role(self, _role_name: str):
"""
Adds new role for user. User can have unlimited number of roles. If he already has this role - do nothing
:param _role_name: role name
"""
role = M2Role.load_by_params(name=_role_name)
if not role:
raise M2Error('Trying to add non-existent role', True)
<|code_end|>
, determine the next line of code. You have imports:
from sqlalchemy import Column, BigInteger, Integer, String, func
from m2core.data_schemes.db_system_scheme import M2UserRole, M2Role, M2Error, BaseModel, CreatedMixin
import bcrypt
and context (class names, function names, or code) available:
# Path: m2core/data_schemes/db_system_scheme.py
# class CreatedMixin:
# class SortMixin:
# class M2PermissionCheckMixin:
# class M2Permission(BaseModel):
# class M2Role(BaseModel):
# class M2RolePermission(BaseModel):
# class M2UserMixin:
# class M2UserRole(BaseModel):
# def can(self, permission_rule: Permission) -> bool:
# def enum_member(self):
# def from_enum_member(cls, member: Permission):
# def get_role_permissions(self) -> List[str]:
# def dump_role_permissions(self):
# def set_permissions(self, permissions: list):
# def add_permission(self, permission_system_name: str):
. Output only the next line. | M2UserRole.load_or_create(user_id=self.get('id'), role_id=role.get('id')) |
Given the following code snippet before the placeholder: <|code_start|> name = Column(String(255), info={'custom_param_for_json_scheme_1': '11111', 'custom_param_for_json_scheme_2': True})
gender = Column(Integer, nullable=False)
@classmethod
def authorize(cls, _email: str, _password: str) -> dict or None:
"""
Authorize user and save his access token to Redis
:param _email: user email
:param _password: user password
"""
user_obj = cls.q.filter(
func.lower(cls.email) == _email.lower(),
# cls.password == func.crypt(_password, cls.password) <- this could be used for check in DB
).first()
if not user_obj:
return None
# check authorization via python bcrypt, not postgres bcrypt
if not bcrypt.checkpw(str.encode(_password), str.encode(user_obj.get('password'))):
return None
access_token = cls.sh.generate_token(user_obj.get('id'))
access_token['user_info'] = user_obj.data('password')
return access_token
def add_role(self, _role_name: str):
"""
Adds new role for user. User can have unlimited number of roles. If he already has this role - do nothing
:param _role_name: role name
"""
<|code_end|>
, predict the next line using imports from the current file:
from sqlalchemy import Column, BigInteger, Integer, String, func
from m2core.data_schemes.db_system_scheme import M2UserRole, M2Role, M2Error, BaseModel, CreatedMixin
import bcrypt
and context including class names, function names, and sometimes code from other files:
# Path: m2core/data_schemes/db_system_scheme.py
# class CreatedMixin:
# class SortMixin:
# class M2PermissionCheckMixin:
# class M2Permission(BaseModel):
# class M2Role(BaseModel):
# class M2RolePermission(BaseModel):
# class M2UserMixin:
# class M2UserRole(BaseModel):
# def can(self, permission_rule: Permission) -> bool:
# def enum_member(self):
# def from_enum_member(cls, member: Permission):
# def get_role_permissions(self) -> List[str]:
# def dump_role_permissions(self):
# def set_permissions(self, permissions: list):
# def add_permission(self, permission_system_name: str):
. Output only the next line. | role = M2Role.load_by_params(name=_role_name) |
Given the following code snippet before the placeholder: <|code_start|>
@classmethod
def authorize(cls, _email: str, _password: str) -> dict or None:
"""
Authorize user and save his access token to Redis
:param _email: user email
:param _password: user password
"""
user_obj = cls.q.filter(
func.lower(cls.email) == _email.lower(),
# cls.password == func.crypt(_password, cls.password) <- this could be used for check in DB
).first()
if not user_obj:
return None
# check authorization via python bcrypt, not postgres bcrypt
if not bcrypt.checkpw(str.encode(_password), str.encode(user_obj.get('password'))):
return None
access_token = cls.sh.generate_token(user_obj.get('id'))
access_token['user_info'] = user_obj.data('password')
return access_token
def add_role(self, _role_name: str):
"""
Adds new role for user. User can have unlimited number of roles. If he already has this role - do nothing
:param _role_name: role name
"""
role = M2Role.load_by_params(name=_role_name)
if not role:
<|code_end|>
, predict the next line using imports from the current file:
from sqlalchemy import Column, BigInteger, Integer, String, func
from m2core.data_schemes.db_system_scheme import M2UserRole, M2Role, M2Error, BaseModel, CreatedMixin
import bcrypt
and context including class names, function names, and sometimes code from other files:
# Path: m2core/data_schemes/db_system_scheme.py
# class CreatedMixin:
# class SortMixin:
# class M2PermissionCheckMixin:
# class M2Permission(BaseModel):
# class M2Role(BaseModel):
# class M2RolePermission(BaseModel):
# class M2UserMixin:
# class M2UserRole(BaseModel):
# def can(self, permission_rule: Permission) -> bool:
# def enum_member(self):
# def from_enum_member(cls, member: Permission):
# def get_role_permissions(self) -> List[str]:
# def dump_role_permissions(self):
# def set_permissions(self, permissions: list):
# def add_permission(self, permission_system_name: str):
. Output only the next line. | raise M2Error('Trying to add non-existent role', True) |
Next line prediction: <|code_start|>
class SessionMixin:
__abstract__ = True
@classmethod
def set_db_session(cls, session) -> scoped_session or Session:
"""
Sets DB Session during M2Core initialization with this method
"""
cls._db_session = session
<|code_end|>
. Use current file imports:
(from redis import StrictRedis
from sqlalchemy.orm import Session, scoped_session, Query
from m2core.utils.decorators import classproperty
from m2core.utils.error import M2Error)
and context including class names, function names, or small code snippets from other files:
# Path: m2core/utils/decorators.py
# class classproperty(property):
# def __get__(self, obj, objtype=None):
# return super(classproperty, self).__get__(objtype)
#
# def __set__(self, obj, value):
# super(classproperty, self).__set__(type(obj), value)
#
# def __delete__(self, obj):
# super(classproperty, self).__delete__(type(obj))
#
# Path: m2core/utils/error.py
# class M2Error(Exception):
# """
# Raise this error wherever you want you exception to be raised, logged and showed up to user with custom message
# """
# def __init__(self, msg: str='', show_to_user: bool=False):
# self.error_message = msg
# self.show_to_user = show_to_user
# self._log()
#
# def _log(self):
# logger.error(self.error_message)
#
# def __repr__(self):
# return self.error_message
. Output only the next line. | @classproperty |
Based on the snippet: <|code_start|>
class SessionMixin:
__abstract__ = True
@classmethod
def set_db_session(cls, session) -> scoped_session or Session:
"""
Sets DB Session during M2Core initialization with this method
"""
cls._db_session = session
@classproperty
def s(cls) -> scoped_session or Session:
"""
Returns DB Session
"""
if cls._db_session:
return cls._db_session
else:
<|code_end|>
, predict the immediate next line with the help of imports:
from redis import StrictRedis
from sqlalchemy.orm import Session, scoped_session, Query
from m2core.utils.decorators import classproperty
from m2core.utils.error import M2Error
and context (classes, functions, sometimes code) from other files:
# Path: m2core/utils/decorators.py
# class classproperty(property):
# def __get__(self, obj, objtype=None):
# return super(classproperty, self).__get__(objtype)
#
# def __set__(self, obj, value):
# super(classproperty, self).__set__(type(obj), value)
#
# def __delete__(self, obj):
# super(classproperty, self).__delete__(type(obj))
#
# Path: m2core/utils/error.py
# class M2Error(Exception):
# """
# Raise this error wherever you want you exception to be raised, logged and showed up to user with custom message
# """
# def __init__(self, msg: str='', show_to_user: bool=False):
# self.error_message = msg
# self.show_to_user = show_to_user
# self._log()
#
# def _log(self):
# logger.error(self.error_message)
#
# def __repr__(self):
# return self.error_message
. Output only the next line. | raise M2Error('No DB session defined') |
Given the following code snippet before the placeholder: <|code_start|># import all handlers
# import permissions
# import core
# INIT M2CORE
options.config_name = 'config.py'
m2core = M2Core()
if __name__ == '__main__':
# setup core logger level
core_logger.setLevel(logging.DEBUG)
# setup project logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# add some headers per every response - CORS in this example
m2core.add_custom_response_headers({
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET,PUT,POST,DELETE,OPTIONS',
'Access-Control-Expose-Headers': '',
'Access-Control-Max-Age': '1728000',
'Access-Control-Allow-Headers': 'Content-Type, X-Access-Token'
})
# ENDPOINTS
human_route = r'/docs.js'
<|code_end|>
, predict the next line using imports from the current file:
from m2core.common.options import options
from example.handlers import *
from example.common.permissions import PlatformPermissions
from m2core import M2Core, logger as core_logger
from tornado.gen import coroutine, sleep
import logging
and context including class names, function names, and sometimes code from other files:
# Path: m2core/common/options.py
# class M2OptionParser(OptionParser):
# def parse_environment(self, final=True):
#
# Path: example/common/permissions.py
# class PlatformPermissions(PermissionsEnum):
# AUTHORIZED = Permission(
# name='Authorized users',
# sys_name='authorized',
# description='All authorized users will have this permission by default (overwrites default `AUTHORIZED` '
# 'permission)'
# )
# VIEW_SOME_INFO = Permission(
# name='View some info',
# sys_name='view_some_info',
# description='Example of some view permissions'
# )
# EDIT_SOME_INFO = Permission(
# name='Edit some info',
# sys_name='edit_some_info',
# description='Example of some edit permissions'
# )
# DELETE_SOME_INFO = Permission(
# name='Delete some info',
# sys_name='delete_some_info',
# description='Example of some delete permissions'
# )
# ADMIN = Permission(
# name='Admin privilege',
# sys_name='admin',
# description='Example of admin (super user) permissions'
# )
#
# Path: m2core/m2core.py
# class M2Core:
# def requires_permission(handler_method_func):
# def decorated(handler_instance, *args, **kwargs):
# def user_can(handler_method):
# def decorated(handler_instance, *args, **kwargs):
# def tryex(*errors):
# def decorator(func):
# def new_func(handler, *args, **kwargs):
# def __recreate_db(self):
# def __init__(self):
# def __make_app(self):
# def route(self, human_route: str=None, handler_cls: Type[RequestHandler]=None, rule_group: str=None,
# extra: dict=None, **kwargs):
# def add_endpoint(self, human_route: str, handler_class: Type[RequestHandler], extra_params: dict = dict()):
# def add_endpoint_method_permissions(self, human_route: str, method: str, permissions: list or None):
# def add_endpoint_permissions(self, human_route: str, permissions: dict):
# def add_redis_scheme(self, _redis_scheme: dict):
# def thread_pool(self) -> ThreadPoolExecutor:
# def db_engine(self) -> Engine:
# def db_session(self) -> scoped_session:
# def redis_session(self) -> redis.StrictRedis:
# def redis_tables(self) -> dict:
# def app(self) -> tornado.web.Application:
# def __make_db_session(self):
# def __make_thread_pool(self):
# def __make_redis_session(self):
# def add_callback(self, callback: callable, *args, **kwargs):
# def extended(self, callback: callable):
# def call_me_maybe(*args, **kwargs):
# def add_custom_response_headers(self, headers: dict()):
# def run(self):
# def run_with_recreate(self):
# def run_for_test(self) -> tornado.web.Application:
# def add_test_user(self, at: str, user_id: int=None, permissions: set=None):
# def get_test_user(self, at: str) -> dict:
# def sync_permissions():
# def dump_roles():
. Output only the next line. | m2core.route(human_route, RestApiDocsHandler, get=PlatformPermissions.SKIP) |
Given snippet: <|code_start|># import all handlers
# import permissions
# import core
# INIT M2CORE
options.config_name = 'config.py'
m2core = M2Core()
if __name__ == '__main__':
# setup core logger level
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from m2core.common.options import options
from example.handlers import *
from example.common.permissions import PlatformPermissions
from m2core import M2Core, logger as core_logger
from tornado.gen import coroutine, sleep
import logging
and context:
# Path: m2core/common/options.py
# class M2OptionParser(OptionParser):
# def parse_environment(self, final=True):
#
# Path: example/common/permissions.py
# class PlatformPermissions(PermissionsEnum):
# AUTHORIZED = Permission(
# name='Authorized users',
# sys_name='authorized',
# description='All authorized users will have this permission by default (overwrites default `AUTHORIZED` '
# 'permission)'
# )
# VIEW_SOME_INFO = Permission(
# name='View some info',
# sys_name='view_some_info',
# description='Example of some view permissions'
# )
# EDIT_SOME_INFO = Permission(
# name='Edit some info',
# sys_name='edit_some_info',
# description='Example of some edit permissions'
# )
# DELETE_SOME_INFO = Permission(
# name='Delete some info',
# sys_name='delete_some_info',
# description='Example of some delete permissions'
# )
# ADMIN = Permission(
# name='Admin privilege',
# sys_name='admin',
# description='Example of admin (super user) permissions'
# )
#
# Path: m2core/m2core.py
# class M2Core:
# def requires_permission(handler_method_func):
# def decorated(handler_instance, *args, **kwargs):
# def user_can(handler_method):
# def decorated(handler_instance, *args, **kwargs):
# def tryex(*errors):
# def decorator(func):
# def new_func(handler, *args, **kwargs):
# def __recreate_db(self):
# def __init__(self):
# def __make_app(self):
# def route(self, human_route: str=None, handler_cls: Type[RequestHandler]=None, rule_group: str=None,
# extra: dict=None, **kwargs):
# def add_endpoint(self, human_route: str, handler_class: Type[RequestHandler], extra_params: dict = dict()):
# def add_endpoint_method_permissions(self, human_route: str, method: str, permissions: list or None):
# def add_endpoint_permissions(self, human_route: str, permissions: dict):
# def add_redis_scheme(self, _redis_scheme: dict):
# def thread_pool(self) -> ThreadPoolExecutor:
# def db_engine(self) -> Engine:
# def db_session(self) -> scoped_session:
# def redis_session(self) -> redis.StrictRedis:
# def redis_tables(self) -> dict:
# def app(self) -> tornado.web.Application:
# def __make_db_session(self):
# def __make_thread_pool(self):
# def __make_redis_session(self):
# def add_callback(self, callback: callable, *args, **kwargs):
# def extended(self, callback: callable):
# def call_me_maybe(*args, **kwargs):
# def add_custom_response_headers(self, headers: dict()):
# def run(self):
# def run_with_recreate(self):
# def run_for_test(self) -> tornado.web.Application:
# def add_test_user(self, at: str, user_id: int=None, permissions: set=None):
# def get_test_user(self, at: str) -> dict:
# def sync_permissions():
# def dump_roles():
which might include code, classes, or functions. Output only the next line. | core_logger.setLevel(logging.DEBUG) |
Given the code snippet: <|code_start|># Copyright 2012, 2014 Richard Dymond (rjdymond@gmail.com)
#
# This file is part of Pyskool.
#
# Pyskool is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Pyskool is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pyskool. If not, see <http://www.gnu.org/licenses/>.
class Udg:
def __init__(self, attr, data, mask=None):
self.attr = attr
self.data = data
self.mask = mask
# If paper is set, it will override attr (see _get_all_colours() and
# _build_image_data_bd_any() on PngWriter)
self.paper = None
_BLANK_UDG = Udg(0, [0] * 8)
class SkoolMemory:
def __init__(self, snafile, custom):
<|code_end|>
, generate the next line using the imports in this file:
from .snapshot import get_snapshot
and context (functions, classes, or occasionally code) from other files:
# Path: pyskool/snapshot.py
# def get_snapshot(fname):
# ext = fname[-4:].lower()
# if ext not in ('.sna', '.z80', '.szx', '.tzx'):
# raise SnapshotError("{0}: Unknown file type '{1}'".format(fname, ext[1:]))
# with open(fname, 'rb') as f:
# data = bytearray(f.read()) # PY: 'return f.read()' in Python 3
# if ext == '.sna':
# ram = data[27:49179]
# elif ext == '.z80':
# ram = _read_z80(data)
# elif ext == '.szx':
# ram = _read_szx(data)
# elif ext == '.tzx':
# ram = _read_tzx(data)
# if len(ram) != 49152:
# raise SnapshotError("RAM size is {0}".format(len(ram)))
# mem = [0] * 16384
# mem.extend(ram)
# return mem
. Output only the next line. | self.snapshot = get_snapshot(snafile) |
Using the snippet: <|code_start|>#
# This file is part of Pyskool.
#
# Pyskool is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Pyskool is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pyskool. If not, see <http://www.gnu.org/licenses/>.
"""
Defines the :class:`Staircase` class.
"""
class Staircase:
"""A staircase.
:param bottom: The coordinates of the bottom of the staircase.
:param top: The coordinates of the top of the staircase.
:param force: If `True`, the staircase must be ascended or descended when
approached (like the staircase in Back to Skool that leads up
from or down to the stage).
"""
def __init__(self, bottom, top, force=False):
<|code_end|>
, determine the next line of code. You have imports:
from .location import Location
and context (class names, function names, or code) available:
# Path: pyskool/location.py
# class Location:
# """A location in the skool specified by a pair of coordinates.
#
# :param coords: The coordinates of this location.
# """
# def __init__(self, coords):
# self.x = coords[0]
# self.y = coords[1]
#
# def coords(self):
# """Return the coordinates of this location as a 2-tuple."""
# return self.x, self.y
#
# def __str__(self):
# """Return a display string for this location: '`(x, y)`'."""
# return '(%s, %s)' % (self.x, self.y)
. Output only the next line. | self.bottom = Location(bottom) |
Given snippet: <|code_start|>---------------
The keys to move Eric around are:
* 'q' or up arrow - go up stairs, or continue walking in the same direction
* 'a' or down arrow - go down stairs, or continue walking in the same direction
* 'o' or left arrow - left
* 'p' or right arrow - right
* 'f' - fire catapult
* 'h' - hit
* 'j' - jump
* 's' - sit/stand
* 'w' - write on a blackboard (press Enter/Return to finish)
Other useful keys are:
* Escape - quit the game
* End - pause/resume
* Insert - take a screenshot
* F2 - save the game
* F6 - load the most recently saved game
* F11 - switch between full-screen and windowed mode
* F12 - show/hide the menu
For full instructions, see the `documentation`_.
.. _documentation: https://pyskool.ca/docs/pyskool/play.html
"""
setup(
name='pyskool',
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from distutils.core import setup
from pyskool import version
and context:
# Path: pyskool.py
which might include code, classes, or functions. Output only the next line. | version=version, |
Given the code snippet: <|code_start|> i = 0
while i < len(args):
arg = args[i]
if arg == '-q':
verbose = False
elif arg.startswith('-'):
print_usage()
else:
p_args.append(arg)
i += 1
if len(p_args) != 1:
print_usage()
return p_args[0], verbose
def print_usage():
sys.stderr.write("""Usage: {0} [options] DIRECTORY
Creates the stock Pyskool sound files in a subdirectory named 'sounds' in
DIRECTORY.
Options:
-q Be quiet
""".format(os.path.basename(sys.argv[0])))
sys.exit(1)
###############################################################################
# Begin
###############################################################################
odir, verbose = parse_args(sys.argv[1:])
sounds_dir = os.path.join(odir, 'sounds')
<|code_end|>
, generate the next line using the imports in this file:
import sys
import os
from pyskool.skoolsound import (create_sounds, SKOOL_DAZE, BACK_TO_SKOOL,
SKOOL_DAZE_TAKE_TOO, EZAD_LOOKS, BACK_TO_SKOOL_DAZE)
and context (functions, classes, or occasionally code) from other files:
# Path: pyskool/skoolsound.py
# def create_sounds(game, odir, verbose=True, force=False, sample_rate=44100, max_amplitude=65536):
# wrote_wavs = False
# for sound in SOUNDS[game]:
# delays_f, subdir, fname = FILES[sound]
# sounds_dir = os.path.join(odir, subdir)
# if not os.path.isdir(sounds_dir):
# os.makedirs(sounds_dir)
# wav = os.path.join(sounds_dir, fname + '.wav')
# if force or not os.path.isfile(wav):
# if verbose:
# print('Writing {0}'.format(wav))
# samples = delays_to_samples(delays_f(), sample_rate, max_amplitude)
# write_wav(samples, wav, sample_rate)
# wrote_wavs = True
# if verbose and not wrote_wavs:
# print("All sound files present")
#
# SKOOL_DAZE = 'skool_daze'
#
# BACK_TO_SKOOL = 'back_to_skool'
#
# SKOOL_DAZE_TAKE_TOO = 'skool_daze_take_too'
#
# EZAD_LOOKS = 'ezad_looks'
#
# BACK_TO_SKOOL_DAZE = 'back_to_skool_daze'
. Output only the next line. | create_sounds(SKOOL_DAZE, sounds_dir, verbose) |
Predict the next line for this snippet: <|code_start|> i = 0
while i < len(args):
arg = args[i]
if arg == '-q':
verbose = False
elif arg.startswith('-'):
print_usage()
else:
p_args.append(arg)
i += 1
if len(p_args) != 1:
print_usage()
return p_args[0], verbose
def print_usage():
sys.stderr.write("""Usage: {0} [options] DIRECTORY
Creates the stock Pyskool sound files in a subdirectory named 'sounds' in
DIRECTORY.
Options:
-q Be quiet
""".format(os.path.basename(sys.argv[0])))
sys.exit(1)
###############################################################################
# Begin
###############################################################################
odir, verbose = parse_args(sys.argv[1:])
sounds_dir = os.path.join(odir, 'sounds')
<|code_end|>
with the help of current file imports:
import sys
import os
from pyskool.skoolsound import (create_sounds, SKOOL_DAZE, BACK_TO_SKOOL,
SKOOL_DAZE_TAKE_TOO, EZAD_LOOKS, BACK_TO_SKOOL_DAZE)
and context from other files:
# Path: pyskool/skoolsound.py
# def create_sounds(game, odir, verbose=True, force=False, sample_rate=44100, max_amplitude=65536):
# wrote_wavs = False
# for sound in SOUNDS[game]:
# delays_f, subdir, fname = FILES[sound]
# sounds_dir = os.path.join(odir, subdir)
# if not os.path.isdir(sounds_dir):
# os.makedirs(sounds_dir)
# wav = os.path.join(sounds_dir, fname + '.wav')
# if force or not os.path.isfile(wav):
# if verbose:
# print('Writing {0}'.format(wav))
# samples = delays_to_samples(delays_f(), sample_rate, max_amplitude)
# write_wav(samples, wav, sample_rate)
# wrote_wavs = True
# if verbose and not wrote_wavs:
# print("All sound files present")
#
# SKOOL_DAZE = 'skool_daze'
#
# BACK_TO_SKOOL = 'back_to_skool'
#
# SKOOL_DAZE_TAKE_TOO = 'skool_daze_take_too'
#
# EZAD_LOOKS = 'ezad_looks'
#
# BACK_TO_SKOOL_DAZE = 'back_to_skool_daze'
, which may contain function names, class names, or code. Output only the next line. | create_sounds(SKOOL_DAZE, sounds_dir, verbose) |
Given snippet: <|code_start|> while i < len(args):
arg = args[i]
if arg == '-q':
verbose = False
elif arg.startswith('-'):
print_usage()
else:
p_args.append(arg)
i += 1
if len(p_args) != 1:
print_usage()
return p_args[0], verbose
def print_usage():
sys.stderr.write("""Usage: {0} [options] DIRECTORY
Creates the stock Pyskool sound files in a subdirectory named 'sounds' in
DIRECTORY.
Options:
-q Be quiet
""".format(os.path.basename(sys.argv[0])))
sys.exit(1)
###############################################################################
# Begin
###############################################################################
odir, verbose = parse_args(sys.argv[1:])
sounds_dir = os.path.join(odir, 'sounds')
create_sounds(SKOOL_DAZE, sounds_dir, verbose)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import os
from pyskool.skoolsound import (create_sounds, SKOOL_DAZE, BACK_TO_SKOOL,
SKOOL_DAZE_TAKE_TOO, EZAD_LOOKS, BACK_TO_SKOOL_DAZE)
and context:
# Path: pyskool/skoolsound.py
# def create_sounds(game, odir, verbose=True, force=False, sample_rate=44100, max_amplitude=65536):
# wrote_wavs = False
# for sound in SOUNDS[game]:
# delays_f, subdir, fname = FILES[sound]
# sounds_dir = os.path.join(odir, subdir)
# if not os.path.isdir(sounds_dir):
# os.makedirs(sounds_dir)
# wav = os.path.join(sounds_dir, fname + '.wav')
# if force or not os.path.isfile(wav):
# if verbose:
# print('Writing {0}'.format(wav))
# samples = delays_to_samples(delays_f(), sample_rate, max_amplitude)
# write_wav(samples, wav, sample_rate)
# wrote_wavs = True
# if verbose and not wrote_wavs:
# print("All sound files present")
#
# SKOOL_DAZE = 'skool_daze'
#
# BACK_TO_SKOOL = 'back_to_skool'
#
# SKOOL_DAZE_TAKE_TOO = 'skool_daze_take_too'
#
# EZAD_LOOKS = 'ezad_looks'
#
# BACK_TO_SKOOL_DAZE = 'back_to_skool_daze'
which might include code, classes, or functions. Output only the next line. | create_sounds(BACK_TO_SKOOL, sounds_dir, verbose) |
Here is a snippet: <|code_start|> arg = args[i]
if arg == '-q':
verbose = False
elif arg.startswith('-'):
print_usage()
else:
p_args.append(arg)
i += 1
if len(p_args) != 1:
print_usage()
return p_args[0], verbose
def print_usage():
sys.stderr.write("""Usage: {0} [options] DIRECTORY
Creates the stock Pyskool sound files in a subdirectory named 'sounds' in
DIRECTORY.
Options:
-q Be quiet
""".format(os.path.basename(sys.argv[0])))
sys.exit(1)
###############################################################################
# Begin
###############################################################################
odir, verbose = parse_args(sys.argv[1:])
sounds_dir = os.path.join(odir, 'sounds')
create_sounds(SKOOL_DAZE, sounds_dir, verbose)
create_sounds(BACK_TO_SKOOL, sounds_dir, verbose)
<|code_end|>
. Write the next line using the current file imports:
import sys
import os
from pyskool.skoolsound import (create_sounds, SKOOL_DAZE, BACK_TO_SKOOL,
SKOOL_DAZE_TAKE_TOO, EZAD_LOOKS, BACK_TO_SKOOL_DAZE)
and context from other files:
# Path: pyskool/skoolsound.py
# def create_sounds(game, odir, verbose=True, force=False, sample_rate=44100, max_amplitude=65536):
# wrote_wavs = False
# for sound in SOUNDS[game]:
# delays_f, subdir, fname = FILES[sound]
# sounds_dir = os.path.join(odir, subdir)
# if not os.path.isdir(sounds_dir):
# os.makedirs(sounds_dir)
# wav = os.path.join(sounds_dir, fname + '.wav')
# if force or not os.path.isfile(wav):
# if verbose:
# print('Writing {0}'.format(wav))
# samples = delays_to_samples(delays_f(), sample_rate, max_amplitude)
# write_wav(samples, wav, sample_rate)
# wrote_wavs = True
# if verbose and not wrote_wavs:
# print("All sound files present")
#
# SKOOL_DAZE = 'skool_daze'
#
# BACK_TO_SKOOL = 'back_to_skool'
#
# SKOOL_DAZE_TAKE_TOO = 'skool_daze_take_too'
#
# EZAD_LOOKS = 'ezad_looks'
#
# BACK_TO_SKOOL_DAZE = 'back_to_skool_daze'
, which may include functions, classes, or code. Output only the next line. | create_sounds(SKOOL_DAZE_TAKE_TOO, sounds_dir, verbose) |
Using the snippet: <|code_start|> if arg == '-q':
verbose = False
elif arg.startswith('-'):
print_usage()
else:
p_args.append(arg)
i += 1
if len(p_args) != 1:
print_usage()
return p_args[0], verbose
def print_usage():
sys.stderr.write("""Usage: {0} [options] DIRECTORY
Creates the stock Pyskool sound files in a subdirectory named 'sounds' in
DIRECTORY.
Options:
-q Be quiet
""".format(os.path.basename(sys.argv[0])))
sys.exit(1)
###############################################################################
# Begin
###############################################################################
odir, verbose = parse_args(sys.argv[1:])
sounds_dir = os.path.join(odir, 'sounds')
create_sounds(SKOOL_DAZE, sounds_dir, verbose)
create_sounds(BACK_TO_SKOOL, sounds_dir, verbose)
create_sounds(SKOOL_DAZE_TAKE_TOO, sounds_dir, verbose)
<|code_end|>
, determine the next line of code. You have imports:
import sys
import os
from pyskool.skoolsound import (create_sounds, SKOOL_DAZE, BACK_TO_SKOOL,
SKOOL_DAZE_TAKE_TOO, EZAD_LOOKS, BACK_TO_SKOOL_DAZE)
and context (class names, function names, or code) available:
# Path: pyskool/skoolsound.py
# def create_sounds(game, odir, verbose=True, force=False, sample_rate=44100, max_amplitude=65536):
# wrote_wavs = False
# for sound in SOUNDS[game]:
# delays_f, subdir, fname = FILES[sound]
# sounds_dir = os.path.join(odir, subdir)
# if not os.path.isdir(sounds_dir):
# os.makedirs(sounds_dir)
# wav = os.path.join(sounds_dir, fname + '.wav')
# if force or not os.path.isfile(wav):
# if verbose:
# print('Writing {0}'.format(wav))
# samples = delays_to_samples(delays_f(), sample_rate, max_amplitude)
# write_wav(samples, wav, sample_rate)
# wrote_wavs = True
# if verbose and not wrote_wavs:
# print("All sound files present")
#
# SKOOL_DAZE = 'skool_daze'
#
# BACK_TO_SKOOL = 'back_to_skool'
#
# SKOOL_DAZE_TAKE_TOO = 'skool_daze_take_too'
#
# EZAD_LOOKS = 'ezad_looks'
#
# BACK_TO_SKOOL_DAZE = 'back_to_skool_daze'
. Output only the next line. | create_sounds(EZAD_LOOKS, sounds_dir, verbose) |
Given snippet: <|code_start|> verbose = False
elif arg.startswith('-'):
print_usage()
else:
p_args.append(arg)
i += 1
if len(p_args) != 1:
print_usage()
return p_args[0], verbose
def print_usage():
sys.stderr.write("""Usage: {0} [options] DIRECTORY
Creates the stock Pyskool sound files in a subdirectory named 'sounds' in
DIRECTORY.
Options:
-q Be quiet
""".format(os.path.basename(sys.argv[0])))
sys.exit(1)
###############################################################################
# Begin
###############################################################################
odir, verbose = parse_args(sys.argv[1:])
sounds_dir = os.path.join(odir, 'sounds')
create_sounds(SKOOL_DAZE, sounds_dir, verbose)
create_sounds(BACK_TO_SKOOL, sounds_dir, verbose)
create_sounds(SKOOL_DAZE_TAKE_TOO, sounds_dir, verbose)
create_sounds(EZAD_LOOKS, sounds_dir, verbose)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import sys
import os
from pyskool.skoolsound import (create_sounds, SKOOL_DAZE, BACK_TO_SKOOL,
SKOOL_DAZE_TAKE_TOO, EZAD_LOOKS, BACK_TO_SKOOL_DAZE)
and context:
# Path: pyskool/skoolsound.py
# def create_sounds(game, odir, verbose=True, force=False, sample_rate=44100, max_amplitude=65536):
# wrote_wavs = False
# for sound in SOUNDS[game]:
# delays_f, subdir, fname = FILES[sound]
# sounds_dir = os.path.join(odir, subdir)
# if not os.path.isdir(sounds_dir):
# os.makedirs(sounds_dir)
# wav = os.path.join(sounds_dir, fname + '.wav')
# if force or not os.path.isfile(wav):
# if verbose:
# print('Writing {0}'.format(wav))
# samples = delays_to_samples(delays_f(), sample_rate, max_amplitude)
# write_wav(samples, wav, sample_rate)
# wrote_wavs = True
# if verbose and not wrote_wavs:
# print("All sound files present")
#
# SKOOL_DAZE = 'skool_daze'
#
# BACK_TO_SKOOL = 'back_to_skool'
#
# SKOOL_DAZE_TAKE_TOO = 'skool_daze_take_too'
#
# EZAD_LOOKS = 'ezad_looks'
#
# BACK_TO_SKOOL_DAZE = 'back_to_skool_daze'
which might include code, classes, or functions. Output only the next line. | create_sounds(BACK_TO_SKOOL_DAZE, sounds_dir, verbose) |
Based on the snippet: <|code_start|>
class PauseTestCase(TestCase):
"""
Check to make sure that entries can be paused and unpaused as expected.
Rules for pausing an entry:
- Must be owned by user
- If paused, unpause it
- Entry must be open
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try pausing an entry before being logged in
response = self.get_response(2)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
, predict the immediate next line with the help of imports:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
and context (classes, functions, sometimes code) from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Using the snippet: <|code_start|>
class PauseTestCase(TestCase):
"""
Check to make sure that entries can be paused and unpaused as expected.
Rules for pausing an entry:
- Must be owned by user
- If paused, unpause it
- Entry must be open
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try pausing an entry before being logged in
response = self.get_response(2)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
, determine the next line of code. You have imports:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
and context (class names, function names, or code) available:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Predict the next line for this snippet: <|code_start|>
class AddEntryTestCase(TestCase):
"""
Rules for adding an entry:
- User is logged in
- Project is specified
- Start time is in the past
- End time is in the past
- Start time is before end time
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try adding an entry before being logged in
response = self.get_response()
self.assertEquals(response.status_code, 302)
self.first_run = False
# log in
<|code_end|>
with the help of current file imports:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
from datetime import datetime, timedelta
and context from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
, which may contain function names, class names, or code. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Continue the code snippet: <|code_start|>
class AddEntryTestCase(TestCase):
"""
Rules for adding an entry:
- User is logged in
- Project is specified
- Start time is in the past
- End time is in the past
- Start time is before end time
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try adding an entry before being logged in
response = self.get_response()
self.assertEquals(response.status_code, 302)
self.first_run = False
# log in
<|code_end|>
. Use current file imports:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
from datetime import datetime, timedelta
and context (classes, functions, or code) from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Next line prediction: <|code_start|>
SITE = Site.objects.get_current()
admin_url = reverse('admin_pendulum_pendulumconfiguration_add')
login_url = getattr(settings, 'LOGIN_URL', '/accounts/login/')
class PendulumMiddleware:
"""
This middleware ensures that anyone trying to access Pendulum must be
logged in. If Pendulum hasn't been configured for the current site, the
staff users will be redirected to the page to configure it.
"""
def process_request(self, request):
try:
SITE.pendulumconfiguration
<|code_end|>
. Use current file imports:
(from django.contrib.auth.views import login
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.conf import settings
from pendulum.models import PendulumConfiguration)
and context including class names, function names, or small code snippets from other files:
# Path: pendulum/models.py
# class PendulumConfiguration(models.Model):
# """
# This will hold a single record that maintains the configuration of the
# application. In the admin interface, if the configuration is marked as
# "Is Monthly", that will take precedence over the "Install date" option (even
# if the install_date and period_length fields have values). If you wish to
# use the fixed-length (install_date + period_length) setup, uncheck the
# is_monthly field.
# """
#
# # tie the configuration to one site in particular
# site = models.OneToOneField(Site, help_text="""Please choose the site that these settings will apply to.""")
#
# """
# this represents whether the application will look for all entries in a
# month-long period
# """
# is_monthly = models.BooleanField(default=True, help_text="""If you check this box, you will be forced to use the monthly mode. Uncheck it to use fixed-length period""")
#
# """
# this is used in conjunction with the monthly setup; end date is assumed
# to be month_start - 1. For example, if the periods begin on the 16th of
# each month, the end date would be assumed to be the 15th of each month
# """
# month_start = models.PositiveIntegerField(default=1, blank=True, null=True,
# help_text="""Enter 1 for periods that begin on the 1st day of each month and end on the last day of each month. Alternatively, enter any other number (between 2 and 31) for the day of the month that periods start. For example, enter 16 for periods that begin on the 16th of each month and end on the 15th of the following month.""")
#
# """
# install_date represents the date the software was installed and began
# being used. period_length represents the number of days in a period. Week-
# long periods would have a period_length of 7. Two week-long periods would
# be 14 days. You get the idea. These should be able to handle _most_
# situations (maybe not all).
# """
# install_date = models.DateField(blank=True, null=True, help_text="""The date that Pendulum was installed. Does not necessarily have to be the date, just a date to be used as a reference point for adding the number of days from period length below. For example, if you have periods with a fixed length of 2 weeks, enter 14 days for period length and choose any Sunday to be the install date.""")
# period_length = models.PositiveIntegerField(blank=True, null=True, help_text="""The number of days in the fixed-length period. For example, enter 7 days for 1-week periods or 28 for 4-week long periods.""")
#
# def __unicode__(self):
# return u'Pendulum Configuration for %s' % self.site
#
# def __current_mode(self):
# if self.is_monthly:
# return u'Month-long'
# else:
# return u'Fixed-length'
# current_mode = property(__current_mode)
. Output only the next line. | except PendulumConfiguration.DoesNotExist: |
Using the snippet: <|code_start|> return self.name
def __log_count(self):
"""
Determine the number of entries associated with this activity
"""
return self.entries.all().count()
log_count = property(__log_count)
def __total_hours(self):
"""
Determine the number of hours spent doing each type of activity
"""
times = [e.total_hours for e in self.entries.all()]
return '%.02f' % sum(times)
total_hours = property(__total_hours)
class Meta:
ordering = ['name']
verbose_name_plural = 'activities'
class EntryManager(models.Manager):
#def get_query_set(self):
# return super(EntryManager, self).get_query_set().filter(site__exact=CURRENT_SITE)
def current(self, user=None):
"""
This will pull back any log entries for the current period.
"""
try:
<|code_end|>
, determine the next line of code. You have imports:
from django.db import models
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from datetime import datetime, date, timedelta
from pendulum import utils
import hashlib
import sha
and context (class names, function names, or code) available:
# Path: pendulum/utils.py
# def determine_period(the_date=date.today(), delta=0):
# def parse_time(time_str, input_formats=None):
# def get_total_time(seconds):
# DEFAULT_TIME_FORMATS = [
# '%H:%M', # 23:15 => 23:15:00
# '%H:%M:%S', # 05:50:21 => 05:50:21
# '%I:%M:%S %p', # 11:40:53 PM => 23:40:53
# '%I:%M %p', # 6:21 AM => 06:21:00
# '%I %p', # 1 pm => 13:00:00
# '%I:%M:%S%p', # 8:45:52pm => 23:45:52
# '%I:%M%p', # 12:03am => 00:03:00
# '%I%p', # 12pm => 12:00:00
# '%H', # 22 => 22:00:00
# ]
. Output only the next line. | set = self.in_period(utils.determine_period()) |
Predict the next line for this snippet: <|code_start|>
class RemoveEntryTestCase(TestCase):
"""
Test the functionality for removing an entry
Rules for removal:
- Owned by user
- The user will be prompted to confirm their decision
"""
fixtures = ['activities', 'projects', 'users', 'entries']
def setUp(self):
self.client = Client()
# try removing an entry before being logged in
response = self.get_response(4)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
with the help of current file imports:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
and context from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
, which may contain function names, class names, or code. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Next line prediction: <|code_start|>
class RemoveEntryTestCase(TestCase):
"""
Test the functionality for removing an entry
Rules for removal:
- Owned by user
- The user will be prompted to confirm their decision
"""
fixtures = ['activities', 'projects', 'users', 'entries']
def setUp(self):
self.client = Client()
# try removing an entry before being logged in
response = self.get_response(4)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
. Use current file imports:
(from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry)
and context including class names, function names, or small code snippets from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Predict the next line for this snippet: <|code_start|>
class PendulumDateTimeField(forms.SplitDateTimeField):
"""
This custom field is just a way to offer some more friendly ways to enter
a time, such as 1pm or 8:15 pm
"""
<|code_end|>
with the help of current file imports:
from django import forms
from pendulum.widgets import PendulumDateTimeWidget
from pendulum.utils import DEFAULT_TIME_FORMATS
and context from other files:
# Path: pendulum/widgets.py
# class PendulumDateTimeWidget(forms.MultiWidget):
# def __init__(self, attrs=None):
# widgets = [DateWidget, TimeWidget]
# super(PendulumDateTimeWidget, self).__init__(widgets, attrs)
#
# def decompress(self, value):
# if value:
# return [value.date(), value.time().replace(microsecond=0)]
# return [None, None]
#
# def format_output(self, rendered_widgets):
# # make things a little more accessible by adding labels to the output
# date_id = time_id = ''
# try:
# date_id = re_id.findall(rendered_widgets[0])[0]
# except:
# pass
# try:
# time_id = re_id.findall(rendered_widgets[1])[0]
# except:
# pass
#
# return mark_safe(u'''<div class="datetime">
# <label for="%s">Date:</label> %s<br />
# <label for="%s">Time:</label> %s
# </div>''' % (date_id, rendered_widgets[0], time_id, rendered_widgets[1]))
#
# Path: pendulum/utils.py
# DEFAULT_TIME_FORMATS = [
# '%H:%M', # 23:15 => 23:15:00
# '%H:%M:%S', # 05:50:21 => 05:50:21
# '%I:%M:%S %p', # 11:40:53 PM => 23:40:53
# '%I:%M %p', # 6:21 AM => 06:21:00
# '%I %p', # 1 pm => 13:00:00
# '%I:%M:%S%p', # 8:45:52pm => 23:45:52
# '%I:%M%p', # 12:03am => 00:03:00
# '%I%p', # 12pm => 12:00:00
# '%H', # 22 => 22:00:00
# ]
, which may contain function names, class names, or code. Output only the next line. | widget = PendulumDateTimeWidget |
Continue the code snippet: <|code_start|>
class PendulumDateTimeField(forms.SplitDateTimeField):
"""
This custom field is just a way to offer some more friendly ways to enter
a time, such as 1pm or 8:15 pm
"""
widget = PendulumDateTimeWidget
def __init__(self, date_formats=None, time_formats=None, help_text=None, *args, **kwargs):
<|code_end|>
. Use current file imports:
from django import forms
from pendulum.widgets import PendulumDateTimeWidget
from pendulum.utils import DEFAULT_TIME_FORMATS
and context (classes, functions, or code) from other files:
# Path: pendulum/widgets.py
# class PendulumDateTimeWidget(forms.MultiWidget):
# def __init__(self, attrs=None):
# widgets = [DateWidget, TimeWidget]
# super(PendulumDateTimeWidget, self).__init__(widgets, attrs)
#
# def decompress(self, value):
# if value:
# return [value.date(), value.time().replace(microsecond=0)]
# return [None, None]
#
# def format_output(self, rendered_widgets):
# # make things a little more accessible by adding labels to the output
# date_id = time_id = ''
# try:
# date_id = re_id.findall(rendered_widgets[0])[0]
# except:
# pass
# try:
# time_id = re_id.findall(rendered_widgets[1])[0]
# except:
# pass
#
# return mark_safe(u'''<div class="datetime">
# <label for="%s">Date:</label> %s<br />
# <label for="%s">Time:</label> %s
# </div>''' % (date_id, rendered_widgets[0], time_id, rendered_widgets[1]))
#
# Path: pendulum/utils.py
# DEFAULT_TIME_FORMATS = [
# '%H:%M', # 23:15 => 23:15:00
# '%H:%M:%S', # 05:50:21 => 05:50:21
# '%I:%M:%S %p', # 11:40:53 PM => 23:40:53
# '%I:%M %p', # 6:21 AM => 06:21:00
# '%I %p', # 1 pm => 13:00:00
# '%I:%M:%S%p', # 8:45:52pm => 23:45:52
# '%I:%M%p', # 12:03am => 00:03:00
# '%I%p', # 12pm => 12:00:00
# '%H', # 22 => 22:00:00
# ]
. Output only the next line. | time_formats = time_formats or DEFAULT_TIME_FORMATS |
Given snippet: <|code_start|>
class UpdateEntryTestCase(TestCase):
"""
Make sure that the code for updating a closed entry works as expected.
Rules for updating an entry:
- Owned by user
- Closed
- Cannot start in the future
- Cannot end in the future
- Start must be before end
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try updating an entry before being logged in
response = self.get_response(2)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
from datetime import datetime, timedelta
and context:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
which might include code, classes, or functions. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Next line prediction: <|code_start|>
class UpdateEntryTestCase(TestCase):
"""
Make sure that the code for updating a closed entry works as expected.
Rules for updating an entry:
- Owned by user
- Closed
- Cannot start in the future
- Cannot end in the future
- Start must be before end
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try updating an entry before being logged in
response = self.get_response(2)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
. Use current file imports:
(from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
from datetime import datetime, timedelta)
and context including class names, function names, or small code snippets from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Given the following code snippet before the placeholder: <|code_start|>
class ClockOutTestCase(TestCase):
"""
Make sure that entries can be closed properly.
Rules for clocking out:
- Entry must belong to user
- Entry must be open
- Entry may be paused, but must be unpaused after being closed
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try closing an entry before being logged in
response = self.get_response(2)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
, predict the next line using imports from the current file:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
and context including class names, function names, and sometimes code from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Given the code snippet: <|code_start|>
class ClockOutTestCase(TestCase):
"""
Make sure that entries can be closed properly.
Rules for clocking out:
- Entry must belong to user
- Entry must be open
- Entry may be paused, but must be unpaused after being closed
"""
fixtures = ['activities', 'projects', 'users', 'entries']
first_run = True
def setUp(self):
self.client = Client()
if self.first_run:
# try closing an entry before being logged in
response = self.get_response(2)
self.assertEquals(response.status_code, 302)
# log in
<|code_end|>
, generate the next line using the imports in this file:
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from pendulum.tests.utils import VALID_PASSWORD, VALID_USER, ffd
from pendulum.models import Entry
and context (functions, classes, or occasionally code) from other files:
# Path: pendulum/tests/utils.py
# VALID_USER, VALID_PASSWORD = 'testuser', 'password'
# def ffd(date):
#
# Path: pendulum/models.py
# class Entry(models.Model):
# """
# This class is where all of the time logs are taken care of
# """
#
# user = models.ForeignKey(User, related_name='pendulum_entries')
# project = models.ForeignKey(Project,
# limit_choices_to={'is_active': True,
# 'sites': CURRENT_SITE},
# related_name='entries')
# activity = models.ForeignKey(Activity, blank=True, null=True, related_name='entries')
# start_time = models.DateTimeField()
# end_time = models.DateTimeField(blank=True, null=True)
# seconds_paused = models.PositiveIntegerField(default=0)
# pause_time = models.DateTimeField(blank=True, null=True)
# comments = models.TextField(blank=True, null=True)
# date_updated = models.DateTimeField(auto_now=True)
# site = models.ForeignKey(Site, related_name='pendulum_entries')
#
# objects = EntryManager()
#
# def get_seconds(self):
# """
# Determines the difference between the starting and ending time. The
# result is returned as an integer of seconds.
# """
# if self.start_time and self.end_time:
# # only calculate when the start and end are defined
# delta = self.end_time - self.start_time
# seconds = delta.seconds - self.seconds_paused
# else:
# seconds = 0
# delta = timedelta(days=0)
#
# return seconds + (delta.days * 86400)
#
# def __total_hours(self):
# """
# Determined the total number of hours worked in this entry
# """
# return self.get_seconds() / 3600.0
# total_hours = property(__total_hours)
#
# def __total_time(self):
# """
# Determines the amount of time spent and return it as a string formatted
# as HH:MM:SS
# """
# return utils.get_total_time(self.get_seconds())
# total_time = property(__total_time)
#
# def __paused_time(self):
# """
# Returns the total time paused for this entry in HH:MM:SS format
# """
# return utils.get_total_time(self.seconds_paused)
# paused_time = property(__paused_time)
#
# def __hours(self):
# """
# Print the hours in a nice, rounded format
# """
# return "%.02f" % self.total_hours
# hours = property(__hours)
#
# def __is_paused(self):
# """
# Determine whether or not this entry is paused
# """
# return self.pause_time != None
# is_paused = property(__is_paused)
#
# def pause(self):
# """
# If this entry is not paused, pause it.
# """
# if not self.is_paused:
# self.pause_time = datetime.now()
#
# def unpause(self):
# """
# If this entry is paused, unpause it
# """
# if self.is_paused:
# delta = datetime.now() - self.pause_time
# self.seconds_paused += delta.seconds
# self.pause_time = None
#
# def toggle_paused(self):
# """
# Toggle the paused state of this entry. If the entry is already paused,
# it will be unpaused; if it is not paused, it will be paused.
# """
# if self.is_paused:
# self.unpause()
# else:
# self.pause()
#
# def __is_closed(self):
# """
# Determine whether this entry has been closed or not
# """
# return self.end_time != None
# is_closed = property(__is_closed)
#
# def clock_in(self, user, project):
# """
# Set this entry up for saving the first time, as an open entry.
# """
# if not self.is_closed:
# self.user = user
# self.project = project
# self.site = CURRENT_SITE
# self.start_time = datetime.now()
#
# def clock_out(self, activity, comments):
# """
# Save some vital pieces of information about this entry upon closing
# """
# if self.is_paused:
# self.unpause()
#
# if not self.is_closed:
# self.end_time = datetime.now()
# self.activity = activity
# self.comments = comments
#
# def __delete_key(self):
# """
# Make it a little more interesting for deleting logs
# """
# salt = '%i-%i-apple-%s-sauce' % (self.id, self.is_paused, self.is_closed)
# try:
# import hashlib
# except ImportError:
# import sha
# key = sha.new(salt).hexdigest()
# else:
# key = hashlib.sha1(salt).hexdigest()
# return key
# delete_key = property(__delete_key)
#
# def __unicode__(self):
# """
# The string representation of an instance of this class
# """
# return '%s on %s' % (self.user, self.project)
#
# class Meta:
# ordering = ['-start_time']
# verbose_name_plural = 'entries'
# permissions = (
# ('can_clock_in', 'Can use Pendulum to clock in'),
# ('can_pause', 'Can pause and unpause log entries'),
# ('can_clock_out', 'Can use Pendulum to clock out'),
# )
. Output only the next line. | response = self.client.login(username=VALID_USER, password=VALID_PASSWORD) |
Given snippet: <|code_start|> return ax
else:
plt.show(block=block)
def pit_filter(self, kernel_size):
"""
Filters pits in the raster. Intended for use with canopy height models (i.e. grid(0.5).interpolate("max", "z").
This function modifies the raster array **in place**.
:param kernel_size: The size of the kernel window to pass over the array. For example 3 -> 3x3 kernel window.
"""
self.array = medfilt2d(self.array, kernel_size=kernel_size)
def write(self, path):
"""
Writes the raster to a geotiff. Requires the Cloud.crs attribute to be filled by a projection string (ideally \
wkt or proj4).
:param path: The path to write to.
"""
if not self.grid.cloud.crs:
warn(
"No coordinate reference system defined. Please set the .crs attribute of the Cloud object.",
UserWarning,
)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pyfor.metrics
import rasterio
from pyfor import gisexport
from scipy.interpolate import griddata
from rasterio.transform import from_origin
from rasterio.transform import from_origin
from scipy.signal import medfilt2d
from warnings import warn
and context:
# Path: pyfor/gisexport.py
# def project_indices(indices, raster):
# def array_to_raster(array, affine, crs, path):
which might include code, classes, or functions. Output only the next line. | gisexport.array_to_raster(self.array, self._affine, self.grid.cloud.crs, path) |
Given snippet: <|code_start|> """
summary = {}
summary["Minimum (x y z)"] = [
float("{0:.2f}".format(elem)) for elem in self.data.min
]
summary["Maximum (x y z)"] = [
float("{0:.2f}".format(elem)) for elem in self.data.max
]
summary["Number of Points"] = len(self.data.points)
if hasattr(self, "extension"):
summary["File Size"] = getsize(self.filepath)
if self.extension.lower() == ".las" or self.extension.lower() == ".laz":
summary["LAS Specification"] = self.data.header.version
if self.crs is not None:
summary["CRS"] = self.crs
string_list = [key + ": " + str(val) + "\n" for key, val in summary.items()]
return "".join(str(x) for x in string_list)
def grid(self, cell_size):
"""
Generates a :class:`.Grid` object for the parent object given a cell size. \
See the documentation for :class:`.Grid` for more information.
:param cell_size: The resolution of the plot in the same units as the input file.
:return: A :class:`.Grid` object.
"""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import laspy
import plyfile
import os
import numpy as np
import pandas as pd
import matplotlib.cm as cm
import pathlib
import warnings
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from pyfor import rasterizer
from pyfor import clip
from os.path import getsize
from pyqtgraph.Qt import QtCore, QtGui
from pyfor.ground_filter import Zhang2003
from pyfor.metrics import standard_metrics_cloud
from scipy.spatial import ConvexHull
from shapely.geometry import Polygon
and context:
# Path: pyfor/rasterizer.py
# class Grid:
# class ImportedGrid(Grid):
# class Raster:
# def __init__(self, cloud, cell_size):
# def _update(self):
# def raster(self, func, dim, **kwargs):
# def empty_cells(self):
# def interpolate(self, func, dim, interp_method="nearest"):
# def metrics(self, func_dict, as_raster=False):
# def standard_metrics(self, heightbreak=0):
# def __init__(self, path, cloud):
# def _update(self):
# def __init__(self, array, grid):
# def from_rasterio(cls):
# def force_extent(self, bbox):
# def plot(self, cmap="viridis", block=False, return_plot=False):
# def pit_filter(self, kernel_size):
# def write(self, path):
# X, Y = np.mgrid[1 : self.n + 1, 1 : self.m + 1]
#
# Path: pyfor/clip.py
# def square_clip(points, bounds):
# def ray_trace(x, y, poly):
# def ray(x, y):
# def poly_clip(points, poly):
which might include code, classes, or functions. Output only the next line. | return rasterizer.Grid(self, cell_size) |
Continue the code snippet: <|code_start|> construct the intermediate bare earth model.
"""
filter = Zhang2003(cell_size)
if classified:
filter.bem(self, classified=classified)
filter.normalize(self)
else:
filter.normalize(self)
def subtract(self, path):
"""
Normalize using a pre-computed raster file, i.e. "subtract" the heights from the input raster **in place**. \
This assumes the raster and the point cloud are in the same coordinate system.
:param path: The path to the raster file, must be in a format supported by `rasterio`.
:return:
"""
imported_grid = rasterizer.ImportedGrid(path, self)
df = (
pd.DataFrame(np.flipud(imported_grid.in_raster.read(1)))
.stack()
.rename_axis(["bins_y", "bins_x"])
.reset_index(name="val")
)
df = self.data.points.reset_index().merge(df, how="left").set_index("index")
self.data.points["z"] = df["z"] - df["val"]
<|code_end|>
. Use current file imports:
import laspy
import plyfile
import os
import numpy as np
import pandas as pd
import matplotlib.cm as cm
import pathlib
import warnings
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from pyfor import rasterizer
from pyfor import clip
from os.path import getsize
from pyqtgraph.Qt import QtCore, QtGui
from pyfor.ground_filter import Zhang2003
from pyfor.metrics import standard_metrics_cloud
from scipy.spatial import ConvexHull
from shapely.geometry import Polygon
and context (classes, functions, or code) from other files:
# Path: pyfor/rasterizer.py
# class Grid:
# class ImportedGrid(Grid):
# class Raster:
# def __init__(self, cloud, cell_size):
# def _update(self):
# def raster(self, func, dim, **kwargs):
# def empty_cells(self):
# def interpolate(self, func, dim, interp_method="nearest"):
# def metrics(self, func_dict, as_raster=False):
# def standard_metrics(self, heightbreak=0):
# def __init__(self, path, cloud):
# def _update(self):
# def __init__(self, array, grid):
# def from_rasterio(cls):
# def force_extent(self, bbox):
# def plot(self, cmap="viridis", block=False, return_plot=False):
# def pit_filter(self, kernel_size):
# def write(self, path):
# X, Y = np.mgrid[1 : self.n + 1, 1 : self.m + 1]
#
# Path: pyfor/clip.py
# def square_clip(points, bounds):
# def ray_trace(x, y, poly):
# def ray(x, y):
# def poly_clip(points, poly):
. Output only the next line. | def clip(self, polygon): |
Given the following code snippet before the placeholder: <|code_start|>
def test_missing_variable():
template = '{x}'
x, y = DataItem('x', 1), DataItem('y', 2)
<|code_end|>
, predict the next line using imports from the current file:
from crosscompute.scripts.serve import parse_template_parts
from crosscompute.types import DataItem
and context including class names, function names, and sometimes code from other files:
# Path: crosscompute/scripts/serve.py
# def do(arguments=None):
# def configure_argument_parser_for_serving(a):
# def serve_with(automation, args):
# def serve(
# automation, host=HOST, port=PORT, with_browser=True,
# is_static=False, is_production=False,
# base_uri='', allowed_origins=None,
# disk_poll_in_milliseconds=DISK_POLL_IN_MILLISECONDS,
# disk_debounce_in_milliseconds=DISK_DEBOUNCE_IN_MILLISECONDS):
# L = getLogger(__name__)
. Output only the next line. | parts = parse_template_parts(template, [x, y]) |
Continue the code snippet: <|code_start|> get_absolute_path = mocker.patch(x + 'get_absolute_path')
exists = mocker.patch(x + 'exists')
posts_request.matchdict = {'path': 'x'}
tool_definition = {
'configuration_folder': TOOL_FOLDER,
'argument_names': ['a'],
'x.a_path': 'x'}
get_absolute_path.side_effect = BadPath()
with raises(HTTPNotFound):
f(posts_request, TOOL_FOLDER, tool_definition)
get_absolute_path.side_effect = None
exists.return_value = False
with raises(HTTPNotFound):
f(posts_request, TOOL_FOLDER, tool_definition)
exists.return_value = True
del tool_definition['x.a_path']
with raises(HTTPNotFound):
f(posts_request, TOOL_FOLDER, tool_definition)
def test_get_result_file_response(posts_request, result, mocker):
x = 'crosscompute.scripts.serve.'
get_absolute_path = mocker.patch(x + 'get_absolute_path')
exists = mocker.patch(x + 'exists')
posts_request.matchdict = {'folder_name': 'a', 'path': 'x'}
with raises(HTTPForbidden):
<|code_end|>
. Use current file imports:
from crosscompute.models import Result
from crosscompute.types import DataItem
from crosscompute.scripts.serve import (
get_result_file_response, get_tool_file_response,
parse_result_relative_path, parse_template_parts)
from invisibleroads_macros.disk import copy_path, make_folder
from invisibleroads_macros.exceptions import BadPath
from invisibleroads_uploads.models import Upload
from invisibleroads_uploads.tests import prepare_field_storage
from os.path import join
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPNotFound
from pytest import raises
from webob.multidict import MultiDict
from conftest import TOOL_FOLDER, WheeType
and context (classes, functions, or code) from other files:
# Path: crosscompute/scripts/serve.py
# def do(arguments=None):
# def configure_argument_parser_for_serving(a):
# def serve_with(automation, args):
# def serve(
# automation, host=HOST, port=PORT, with_browser=True,
# is_static=False, is_production=False,
# base_uri='', allowed_origins=None,
# disk_poll_in_milliseconds=DISK_POLL_IN_MILLISECONDS,
# disk_debounce_in_milliseconds=DISK_DEBOUNCE_IN_MILLISECONDS):
# L = getLogger(__name__)
. Output only the next line. | get_result_file_response(posts_request, result) |
Here is a snippet: <|code_start|> tool_definition['argument_names'] = ('x_path',)
# Use bad upload_id
raw_arguments = MultiDict({'x': 'a'})
with raises(HTTPBadRequest) as e:
result_request.prepare_arguments(tool_definition, raw_arguments)
assert e.value.detail['x'] == 'invalid'
# Use upload_id that does not have expected data_type
upload = Upload.save(data_folder, 'anonymous', 32, 'x.txt', 'x')
raw_arguments = MultiDict({'x': upload.id})
with raises(HTTPBadRequest) as e:
result_request.prepare_arguments(tool_definition, raw_arguments)
assert e.value.detail['x'] == 'invalid'
# Use upload_id that has expected data_type
upload = Upload.save(data_folder, 'anonymous', 32, 'x.txt', 'whee')
copy_path(join(upload.folder, WheeType.get_file_name()), upload.path)
raw_arguments = MultiDict({'x': upload.id})
result = result_request.prepare_arguments(
tool_definition, raw_arguments)
assert open(result.arguments['x_path']).read() == 'whee'
def test_parse_result_relative_path():
f = parse_result_relative_path
for x in ('', '1', '1/x', '1/a/x'):
with raises(ValueError):
f(x)
f('1/x/a')
def test_get_tool_file_response(posts_request, mocker):
<|code_end|>
. Write the next line using the current file imports:
from crosscompute.models import Result
from crosscompute.types import DataItem
from crosscompute.scripts.serve import (
get_result_file_response, get_tool_file_response,
parse_result_relative_path, parse_template_parts)
from invisibleroads_macros.disk import copy_path, make_folder
from invisibleroads_macros.exceptions import BadPath
from invisibleroads_uploads.models import Upload
from invisibleroads_uploads.tests import prepare_field_storage
from os.path import join
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPNotFound
from pytest import raises
from webob.multidict import MultiDict
from conftest import TOOL_FOLDER, WheeType
and context from other files:
# Path: crosscompute/scripts/serve.py
# def do(arguments=None):
# def configure_argument_parser_for_serving(a):
# def serve_with(automation, args):
# def serve(
# automation, host=HOST, port=PORT, with_browser=True,
# is_static=False, is_production=False,
# base_uri='', allowed_origins=None,
# disk_poll_in_milliseconds=DISK_POLL_IN_MILLISECONDS,
# disk_debounce_in_milliseconds=DISK_DEBOUNCE_IN_MILLISECONDS):
# L = getLogger(__name__)
, which may include functions, classes, or code. Output only the next line. | f = get_tool_file_response |
Given snippet: <|code_start|> # Use good path
raw_arguments = MultiDict({'x': 'xyz/x/x.txt'})
result = result_request.prepare_arguments(
tool_definition, raw_arguments)
assert open(result.arguments['x_path']).read() == 'whee'
def test_accept_upload_id(
self, result_request, tool_definition, data_folder):
tool_definition['argument_names'] = ('x_path',)
# Use bad upload_id
raw_arguments = MultiDict({'x': 'a'})
with raises(HTTPBadRequest) as e:
result_request.prepare_arguments(tool_definition, raw_arguments)
assert e.value.detail['x'] == 'invalid'
# Use upload_id that does not have expected data_type
upload = Upload.save(data_folder, 'anonymous', 32, 'x.txt', 'x')
raw_arguments = MultiDict({'x': upload.id})
with raises(HTTPBadRequest) as e:
result_request.prepare_arguments(tool_definition, raw_arguments)
assert e.value.detail['x'] == 'invalid'
# Use upload_id that has expected data_type
upload = Upload.save(data_folder, 'anonymous', 32, 'x.txt', 'whee')
copy_path(join(upload.folder, WheeType.get_file_name()), upload.path)
raw_arguments = MultiDict({'x': upload.id})
result = result_request.prepare_arguments(
tool_definition, raw_arguments)
assert open(result.arguments['x_path']).read() == 'whee'
def test_parse_result_relative_path():
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from crosscompute.models import Result
from crosscompute.types import DataItem
from crosscompute.scripts.serve import (
get_result_file_response, get_tool_file_response,
parse_result_relative_path, parse_template_parts)
from invisibleroads_macros.disk import copy_path, make_folder
from invisibleroads_macros.exceptions import BadPath
from invisibleroads_uploads.models import Upload
from invisibleroads_uploads.tests import prepare_field_storage
from os.path import join
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPNotFound
from pytest import raises
from webob.multidict import MultiDict
from conftest import TOOL_FOLDER, WheeType
and context:
# Path: crosscompute/scripts/serve.py
# def do(arguments=None):
# def configure_argument_parser_for_serving(a):
# def serve_with(automation, args):
# def serve(
# automation, host=HOST, port=PORT, with_browser=True,
# is_static=False, is_production=False,
# base_uri='', allowed_origins=None,
# disk_poll_in_milliseconds=DISK_POLL_IN_MILLISECONDS,
# disk_debounce_in_milliseconds=DISK_DEBOUNCE_IN_MILLISECONDS):
# L = getLogger(__name__)
which might include code, classes, or functions. Output only the next line. | f = parse_result_relative_path |
Here is a snippet: <|code_start|>
class TestParseTemplate(object):
def test_accept_whitespace(self):
data_item = DataItem('x', '')
data_items = [data_item]
<|code_end|>
. Write the next line using the current file imports:
from crosscompute.models import Result
from crosscompute.types import DataItem
from crosscompute.scripts.serve import (
get_result_file_response, get_tool_file_response,
parse_result_relative_path, parse_template_parts)
from invisibleroads_macros.disk import copy_path, make_folder
from invisibleroads_macros.exceptions import BadPath
from invisibleroads_uploads.models import Upload
from invisibleroads_uploads.tests import prepare_field_storage
from os.path import join
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPNotFound
from pytest import raises
from webob.multidict import MultiDict
from conftest import TOOL_FOLDER, WheeType
and context from other files:
# Path: crosscompute/scripts/serve.py
# def do(arguments=None):
# def configure_argument_parser_for_serving(a):
# def serve_with(automation, args):
# def serve(
# automation, host=HOST, port=PORT, with_browser=True,
# is_static=False, is_production=False,
# base_uri='', allowed_origins=None,
# disk_poll_in_milliseconds=DISK_POLL_IN_MILLISECONDS,
# disk_debounce_in_milliseconds=DISK_DEBOUNCE_IN_MILLISECONDS):
# L = getLogger(__name__)
, which may include functions, classes, or code. Output only the next line. | assert data_items == parse_template_parts('{x}', data_items) |
Continue the code snippet: <|code_start|> target_path = tmpdir.join('variables.json').strpath
value = 1
variable_id = 'a'
value_by_id_by_path = defaultdict(dict)
save_text_json(target_path, value, variable_id, value_by_id_by_path)
assert value_by_id_by_path[target_path][variable_id] == value
def test_save_text_txt(tmpdir):
target_path = tmpdir.join('book.txt').strpath
value = 'whee'
variable_id = 'a'
value_by_id_by_path = defaultdict(dict)
save_text_txt(target_path, value, variable_id, value_by_id_by_path)
assert load_text_txt(target_path, variable_id) == value
def test_save_image_png(tmpdir):
target_path = tmpdir.join('image.png').strpath
value = 'iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAAFUlEQVQI12P8//8/AzbAxIADDE4JAFbUAw1h62h+AAAAAElFTkSuQmCC'
variable_id = 'a'
value_by_id_by_path = defaultdict(dict)
save_image_png(target_path, value, variable_id, value_by_id_by_path)
assert load_image_png(target_path, variable_id) == value
def test_save_number_json(tmpdir):
target_path = tmpdir.join('variables.json').strpath
variable_id = 'a'
value_by_id_by_path = defaultdict(dict)
<|code_end|>
. Use current file imports:
import json
from collections import defaultdict
from pytest import raises
from crosscompute.exceptions import (
CrossComputeExecutionError)
from crosscompute.routines import (
load_image_png,
load_map_geojson,
load_markdown_md,
load_number_json,
load_table_csv,
load_text_json,
load_text_txt,
load_value_json,
render_object,
save_image_png,
save_map_geojson,
save_markdown_md,
save_number_json,
save_table_csv,
save_text_json,
save_text_txt)
and context (classes, functions, or code) from other files:
# Path: crosscompute/exceptions.py
# class CrossComputeExecutionError(CrossComputeError):
# pass
. Output only the next line. | with raises(CrossComputeExecutionError): |
Given snippet: <|code_start|> if 'path' in variable_configuration:
mode_name = variable_definition.mode_name
path = self.folder / mode_name / variable_configuration['path']
try:
variable_configuration.update(json.load(open(path, 'rt')))
except OSError:
L.error('path not found %s', format_path(path))
except json.JSONDecodeError:
L.error('must be json %s', format_path(path))
except TypeError:
L.error('must contain a dictionary %s', format_path(path))
return variable_configuration
def get_data(self, variable_definition):
variable_path = variable_definition.path
if variable_path == 'ENVIRONMENT':
return {}
mode_name = variable_definition.mode_name
variable_id = variable_definition.id
try:
variable_data = load_variable_data(
self.folder / mode_name / variable_path, variable_id)
except CrossComputeDataError as e:
return {'error': e}
return variable_data
def get_data_uri(self, variable_definition, element):
base_uri = element.base_uri
automation_uri = self.automation_definition.uri
batch_uri = self.batch_definition.uri
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import json
from invisibleroads_macros_log import format_path
from logging import getLogger
from ..constants import (
MODE_CODE_BY_NAME,
MODE_ROUTE,
VARIABLE_ROUTE)
from ..exceptions import (
CrossComputeDataError)
from .interface import Batch
from .variable import (
load_variable_data)
and context:
# Path: crosscompute/constants.py
# MODE_CODE_BY_NAME = {k: v for v, k in MODE_NAME_BY_CODE.items()}
#
# MODE_ROUTE = '/{mode_code}'
#
# VARIABLE_ROUTE = '/{variable_id}'
#
# Path: crosscompute/exceptions.py
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
#
# Path: crosscompute/routines/variable.py
# def load_variable_data(path, variable_id):
# file_data = FILE_DATA_CACHE[path]
# if path.suffix == '.dictionary':
# file_value = file_data['value']
# try:
# variable_value = file_value[variable_id]
# except KeyError:
# raise CrossComputeDataError(
# f'variable {variable_id} not found in {format_path(path)}')
# variable_data = {'value': variable_value}
# else:
# variable_data = file_data
# return variable_data
which might include code, classes, or functions. Output only the next line. | mode_code = MODE_CODE_BY_NAME[variable_definition.mode_name] |
Predict the next line after this snippet: <|code_start|> mode_name = variable_definition.mode_name
path = self.folder / mode_name / variable_configuration['path']
try:
variable_configuration.update(json.load(open(path, 'rt')))
except OSError:
L.error('path not found %s', format_path(path))
except json.JSONDecodeError:
L.error('must be json %s', format_path(path))
except TypeError:
L.error('must contain a dictionary %s', format_path(path))
return variable_configuration
def get_data(self, variable_definition):
variable_path = variable_definition.path
if variable_path == 'ENVIRONMENT':
return {}
mode_name = variable_definition.mode_name
variable_id = variable_definition.id
try:
variable_data = load_variable_data(
self.folder / mode_name / variable_path, variable_id)
except CrossComputeDataError as e:
return {'error': e}
return variable_data
def get_data_uri(self, variable_definition, element):
base_uri = element.base_uri
automation_uri = self.automation_definition.uri
batch_uri = self.batch_definition.uri
mode_code = MODE_CODE_BY_NAME[variable_definition.mode_name]
<|code_end|>
using the current file's imports:
import json
from invisibleroads_macros_log import format_path
from logging import getLogger
from ..constants import (
MODE_CODE_BY_NAME,
MODE_ROUTE,
VARIABLE_ROUTE)
from ..exceptions import (
CrossComputeDataError)
from .interface import Batch
from .variable import (
load_variable_data)
and any relevant context from other files:
# Path: crosscompute/constants.py
# MODE_CODE_BY_NAME = {k: v for v, k in MODE_NAME_BY_CODE.items()}
#
# MODE_ROUTE = '/{mode_code}'
#
# VARIABLE_ROUTE = '/{variable_id}'
#
# Path: crosscompute/exceptions.py
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
#
# Path: crosscompute/routines/variable.py
# def load_variable_data(path, variable_id):
# file_data = FILE_DATA_CACHE[path]
# if path.suffix == '.dictionary':
# file_value = file_data['value']
# try:
# variable_value = file_value[variable_id]
# except KeyError:
# raise CrossComputeDataError(
# f'variable {variable_id} not found in {format_path(path)}')
# variable_data = {'value': variable_value}
# else:
# variable_data = file_data
# return variable_data
. Output only the next line. | mode_uri = MODE_ROUTE.format(mode_code=mode_code) |
Given the following code snippet before the placeholder: <|code_start|> path = self.folder / mode_name / variable_configuration['path']
try:
variable_configuration.update(json.load(open(path, 'rt')))
except OSError:
L.error('path not found %s', format_path(path))
except json.JSONDecodeError:
L.error('must be json %s', format_path(path))
except TypeError:
L.error('must contain a dictionary %s', format_path(path))
return variable_configuration
def get_data(self, variable_definition):
variable_path = variable_definition.path
if variable_path == 'ENVIRONMENT':
return {}
mode_name = variable_definition.mode_name
variable_id = variable_definition.id
try:
variable_data = load_variable_data(
self.folder / mode_name / variable_path, variable_id)
except CrossComputeDataError as e:
return {'error': e}
return variable_data
def get_data_uri(self, variable_definition, element):
base_uri = element.base_uri
automation_uri = self.automation_definition.uri
batch_uri = self.batch_definition.uri
mode_code = MODE_CODE_BY_NAME[variable_definition.mode_name]
mode_uri = MODE_ROUTE.format(mode_code=mode_code)
<|code_end|>
, predict the next line using imports from the current file:
import json
from invisibleroads_macros_log import format_path
from logging import getLogger
from ..constants import (
MODE_CODE_BY_NAME,
MODE_ROUTE,
VARIABLE_ROUTE)
from ..exceptions import (
CrossComputeDataError)
from .interface import Batch
from .variable import (
load_variable_data)
and context including class names, function names, and sometimes code from other files:
# Path: crosscompute/constants.py
# MODE_CODE_BY_NAME = {k: v for v, k in MODE_NAME_BY_CODE.items()}
#
# MODE_ROUTE = '/{mode_code}'
#
# VARIABLE_ROUTE = '/{variable_id}'
#
# Path: crosscompute/exceptions.py
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
#
# Path: crosscompute/routines/variable.py
# def load_variable_data(path, variable_id):
# file_data = FILE_DATA_CACHE[path]
# if path.suffix == '.dictionary':
# file_value = file_data['value']
# try:
# variable_value = file_value[variable_id]
# except KeyError:
# raise CrossComputeDataError(
# f'variable {variable_id} not found in {format_path(path)}')
# variable_data = {'value': variable_value}
# else:
# variable_data = file_data
# return variable_data
. Output only the next line. | variable_uri = VARIABLE_ROUTE.format( |
Given the code snippet: <|code_start|>
def __init__(self, automation_definition, batch_definition):
self.automation_definition = automation_definition
self.batch_definition = batch_definition
self.folder = automation_definition.folder / batch_definition.folder
def get_variable_configuration(self, variable_definition):
variable_configuration = variable_definition.configuration
if 'path' in variable_configuration:
mode_name = variable_definition.mode_name
path = self.folder / mode_name / variable_configuration['path']
try:
variable_configuration.update(json.load(open(path, 'rt')))
except OSError:
L.error('path not found %s', format_path(path))
except json.JSONDecodeError:
L.error('must be json %s', format_path(path))
except TypeError:
L.error('must contain a dictionary %s', format_path(path))
return variable_configuration
def get_data(self, variable_definition):
variable_path = variable_definition.path
if variable_path == 'ENVIRONMENT':
return {}
mode_name = variable_definition.mode_name
variable_id = variable_definition.id
try:
variable_data = load_variable_data(
self.folder / mode_name / variable_path, variable_id)
<|code_end|>
, generate the next line using the imports in this file:
import json
from invisibleroads_macros_log import format_path
from logging import getLogger
from ..constants import (
MODE_CODE_BY_NAME,
MODE_ROUTE,
VARIABLE_ROUTE)
from ..exceptions import (
CrossComputeDataError)
from .interface import Batch
from .variable import (
load_variable_data)
and context (functions, classes, or occasionally code) from other files:
# Path: crosscompute/constants.py
# MODE_CODE_BY_NAME = {k: v for v, k in MODE_NAME_BY_CODE.items()}
#
# MODE_ROUTE = '/{mode_code}'
#
# VARIABLE_ROUTE = '/{variable_id}'
#
# Path: crosscompute/exceptions.py
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
#
# Path: crosscompute/routines/variable.py
# def load_variable_data(path, variable_id):
# file_data = FILE_DATA_CACHE[path]
# if path.suffix == '.dictionary':
# file_value = file_data['value']
# try:
# variable_value = file_value[variable_id]
# except KeyError:
# raise CrossComputeDataError(
# f'variable {variable_id} not found in {format_path(path)}')
# variable_data = {'value': variable_value}
# else:
# variable_data = file_data
# return variable_data
. Output only the next line. | except CrossComputeDataError as e: |
Here is a snippet: <|code_start|>
class DiskBatch(Batch):
def __init__(self, automation_definition, batch_definition):
self.automation_definition = automation_definition
self.batch_definition = batch_definition
self.folder = automation_definition.folder / batch_definition.folder
def get_variable_configuration(self, variable_definition):
variable_configuration = variable_definition.configuration
if 'path' in variable_configuration:
mode_name = variable_definition.mode_name
path = self.folder / mode_name / variable_configuration['path']
try:
variable_configuration.update(json.load(open(path, 'rt')))
except OSError:
L.error('path not found %s', format_path(path))
except json.JSONDecodeError:
L.error('must be json %s', format_path(path))
except TypeError:
L.error('must contain a dictionary %s', format_path(path))
return variable_configuration
def get_data(self, variable_definition):
variable_path = variable_definition.path
if variable_path == 'ENVIRONMENT':
return {}
mode_name = variable_definition.mode_name
variable_id = variable_definition.id
try:
<|code_end|>
. Write the next line using the current file imports:
import json
from invisibleroads_macros_log import format_path
from logging import getLogger
from ..constants import (
MODE_CODE_BY_NAME,
MODE_ROUTE,
VARIABLE_ROUTE)
from ..exceptions import (
CrossComputeDataError)
from .interface import Batch
from .variable import (
load_variable_data)
and context from other files:
# Path: crosscompute/constants.py
# MODE_CODE_BY_NAME = {k: v for v, k in MODE_NAME_BY_CODE.items()}
#
# MODE_ROUTE = '/{mode_code}'
#
# VARIABLE_ROUTE = '/{variable_id}'
#
# Path: crosscompute/exceptions.py
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
#
# Path: crosscompute/routines/variable.py
# def load_variable_data(path, variable_id):
# file_data = FILE_DATA_CACHE[path]
# if path.suffix == '.dictionary':
# file_value = file_data['value']
# try:
# variable_value = file_value[variable_id]
# except KeyError:
# raise CrossComputeDataError(
# f'variable {variable_id} not found in {format_path(path)}')
# variable_data = {'value': variable_value}
# else:
# variable_data = file_data
# return variable_data
, which may include functions, classes, or code. Output only the next line. | variable_data = load_variable_data( |
Predict the next line for this snippet: <|code_start|> if '</p>\n<p>' not in html:
html = html.removeprefix('<p>')
html = html.removesuffix('</p>')
return html
def is_port_in_use(port):
# https://stackoverflow.com/a/52872579
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('127.0.0.1', int(port))) == 0
def open_browser(uri, check_interval_in_seconds=1):
def wait_then_run():
try:
while True:
try:
open_uri(uri)
except HTTPError as e:
L.error(e)
return
except URLError:
sleep(check_interval_in_seconds)
else:
break
webbrowser.open(uri)
except KeyboardInterrupt:
pass
<|code_end|>
with the help of current file imports:
import socket
import webbrowser
from invisibleroads_macros_text import normalize_key
from logging import getLogger
from markdown import markdown
from time import sleep
from urllib.error import HTTPError, URLError
from urllib.request import urlopen as open_uri
from .process import LoggableProcess
and context from other files:
# Path: crosscompute/macros/process.py
# class LoggableProcess(Process):
#
# def start(self, *args, **kwargs):
# super().start(*args, **kwargs)
# L.debug(
# 'started %s%s process %s', self.name,
# ' daemon' if self.daemon else '', self.ident)
, which may contain function names, class names, or code. Output only the next line. | process = LoggableProcess(name='browser', target=wait_then_run) |
Based on the snippet: <|code_start|>
def test_format_slug():
assert format_slug('a b,c') == 'a-b-c'
def test_get_html_from_markdown():
<|code_end|>
, predict the immediate next line with the help of imports:
from crosscompute.macros.web import (
format_slug,
get_html_from_markdown)
and context (classes, functions, sometimes code) from other files:
# Path: crosscompute/macros/web.py
# def format_slug(text):
# return normalize_key(text, word_separator='-')
#
# def get_html_from_markdown(text):
# html = markdown(text)
# if '</p>\n<p>' not in html:
# html = html.removeprefix('<p>')
# html = html.removesuffix('</p>')
# return html
. Output only the next line. | html = get_html_from_markdown('x') |
Here is a snippet: <|code_start|>
def test_run_command(tmp_path):
o_path = tmp_path / 'o.txt'
e_path = tmp_path / 'e.txt'
with open(o_path, 'wt') as o_file, open(e_path, 'w+t') as e_file:
<|code_end|>
. Write the next line using the current file imports:
from pytest import raises
from crosscompute.exceptions import CrossComputeExecutionError
from crosscompute.routines.automation import _run_command
and context from other files:
# Path: crosscompute/exceptions.py
# class CrossComputeExecutionError(CrossComputeError):
# pass
#
# Path: crosscompute/routines/automation.py
# def _run_command(
# command_string, command_folder, script_environment, o_file, e_file):
# try:
# process = subprocess.run(
# shlex.split(command_string),
# check=True,
# cwd=command_folder,
# env=script_environment,
# stdout=o_file,
# stderr=e_file)
# except (IndexError, OSError):
# e = CrossComputeExecutionError(
# f'could not run {shlex.quote(command_string)} in {command_folder}')
# e.return_code = Error.COMMAND_NOT_FOUND
# raise e
# except subprocess.CalledProcessError as e:
# e_file.seek(0)
# error_text = e_file.read().rstrip()
# error = CrossComputeExecutionError(error_text)
# error.return_code = e.returncode
# raise error
# return process.returncode
, which may include functions, classes, or code. Output only the next line. | with raises(CrossComputeExecutionError): |
Continue the code snippet: <|code_start|>
def test_run_command(tmp_path):
o_path = tmp_path / 'o.txt'
e_path = tmp_path / 'e.txt'
with open(o_path, 'wt') as o_file, open(e_path, 'w+t') as e_file:
with raises(CrossComputeExecutionError):
<|code_end|>
. Use current file imports:
from pytest import raises
from crosscompute.exceptions import CrossComputeExecutionError
from crosscompute.routines.automation import _run_command
and context (classes, functions, or code) from other files:
# Path: crosscompute/exceptions.py
# class CrossComputeExecutionError(CrossComputeError):
# pass
#
# Path: crosscompute/routines/automation.py
# def _run_command(
# command_string, command_folder, script_environment, o_file, e_file):
# try:
# process = subprocess.run(
# shlex.split(command_string),
# check=True,
# cwd=command_folder,
# env=script_environment,
# stdout=o_file,
# stderr=e_file)
# except (IndexError, OSError):
# e = CrossComputeExecutionError(
# f'could not run {shlex.quote(command_string)} in {command_folder}')
# e.return_code = Error.COMMAND_NOT_FOUND
# raise e
# except subprocess.CalledProcessError as e:
# e_file.seek(0)
# error_text = e_file.read().rstrip()
# error = CrossComputeExecutionError(error_text)
# error.return_code = e.returncode
# raise error
# return process.returncode
. Output only the next line. | _run_command('', tmp_path, {}, o_file, e_file) |
Predict the next line after this snippet: <|code_start|> }
class LinkView(VariableView):
view_name = 'link'
def render_output(self, b: Batch, x: Element):
variable_definition = self.variable_definition
data_uri = b.get_data_uri(variable_definition, x)
c = b.get_variable_configuration(variable_definition)
name = c.get('name', basename(self.variable_path))
text = c.get('text', name)
body_text = (
f'<a id="{x.id}" href="{data_uri}" '
f'class="{self.mode_name} {self.view_name} {self.variable_id}" '
f'download="{name}">'
f'{text}</a>')
return {
'css_uris': [],
'js_uris': [],
'body_text': body_text,
'js_texts': [],
}
class StringView(VariableView):
view_name = 'string'
input_type = 'text'
<|code_end|>
using the current file's imports:
import csv
import json
import shutil
from dataclasses import dataclass
from importlib_metadata import entry_points
from invisibleroads_macros_log import format_path
from logging import getLogger
from os.path import basename, exists
from string import Template
from ..constants import (
FUNCTION_BY_NAME,
MAXIMUM_FILE_CACHE_LENGTH,
VARIABLE_ID_PATTERN)
from ..exceptions import (
CrossComputeConfigurationError,
CrossComputeDataError)
from ..macros.disk import FileCache
from ..macros.package import import_attribute
from ..macros.web import get_html_from_markdown
from .interface import Batch
and any relevant context from other files:
# Path: crosscompute/constants.py
# FUNCTION_BY_NAME = {
# 'slug': format_slug,
# 'title': str.title,
# }
#
# MAXIMUM_FILE_CACHE_LENGTH = 256
#
# VARIABLE_ID_PATTERN = re.compile(r'{\s*([^}]+?)\s*}')
#
# Path: crosscompute/exceptions.py
# class CrossComputeConfigurationError(CrossComputeError):
# pass
#
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/macros/disk.py
# class FileCache(LRUDict):
#
# def __init__(self, *args, load_file_data, maximum_length: int, **kwargs):
# super().__init__(*args, maximum_length=maximum_length, **kwargs)
# self._load_file_data = load_file_data
#
# def __getitem__(self, path):
# if path in self:
# file_time, file_data = super().__getitem__(path)
# if getmtime(path) == file_time:
# return file_data
# file_data = self._load_file_data(path)
# self.__setitem__(path, file_data)
# return file_data
#
# def __setitem__(self, path, data):
# value = (getmtime(path), data)
# super().__setitem__(path, value)
#
# Path: crosscompute/macros/package.py
# def import_attribute(attribute_string):
# module_string, attribute_name = attribute_string.rsplit('.', maxsplit=1)
# return getattr(import_module(module_string), attribute_name)
#
# Path: crosscompute/macros/web.py
# def get_html_from_markdown(text):
# html = markdown(text)
# if '</p>\n<p>' not in html:
# html = html.removeprefix('<p>')
# html = html.removesuffix('</p>')
# return html
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
. Output only the next line. | function_by_name = FUNCTION_BY_NAME |
Here is a snippet: <|code_start|> for (let i = 0; i < columnCount; i++) {
const column = columns[i];
const th = document.createElement('th');
th.innerText = column;
tr.append(th);
}
thead.append(tr);
for (let i = 0; i < rowCount; i++) {
const row = rows[i];
tr = document.createElement('tr');
for (let j = 0; j < columnCount; j++) {
const td = document.createElement('td');
td.innerText = row[j];
tr.append(td);
}
tbody.append(tr);
}
})();''')
VARIABLE_VIEW_BY_NAME = {_.name: import_attribute(
_.value) for _ in entry_points().select(group='crosscompute.views')}
YIELD_DATA_BY_ID_BY_EXTENSION = {
'.csv': yield_data_by_id_from_csv,
'.txt': yield_data_by_id_from_txt,
}
FILE_DATA_CACHE = FileCache(
load_file_data=load_file_data,
<|code_end|>
. Write the next line using the current file imports:
import csv
import json
import shutil
from dataclasses import dataclass
from importlib_metadata import entry_points
from invisibleroads_macros_log import format_path
from logging import getLogger
from os.path import basename, exists
from string import Template
from ..constants import (
FUNCTION_BY_NAME,
MAXIMUM_FILE_CACHE_LENGTH,
VARIABLE_ID_PATTERN)
from ..exceptions import (
CrossComputeConfigurationError,
CrossComputeDataError)
from ..macros.disk import FileCache
from ..macros.package import import_attribute
from ..macros.web import get_html_from_markdown
from .interface import Batch
and context from other files:
# Path: crosscompute/constants.py
# FUNCTION_BY_NAME = {
# 'slug': format_slug,
# 'title': str.title,
# }
#
# MAXIMUM_FILE_CACHE_LENGTH = 256
#
# VARIABLE_ID_PATTERN = re.compile(r'{\s*([^}]+?)\s*}')
#
# Path: crosscompute/exceptions.py
# class CrossComputeConfigurationError(CrossComputeError):
# pass
#
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/macros/disk.py
# class FileCache(LRUDict):
#
# def __init__(self, *args, load_file_data, maximum_length: int, **kwargs):
# super().__init__(*args, maximum_length=maximum_length, **kwargs)
# self._load_file_data = load_file_data
#
# def __getitem__(self, path):
# if path in self:
# file_time, file_data = super().__getitem__(path)
# if getmtime(path) == file_time:
# return file_data
# file_data = self._load_file_data(path)
# self.__setitem__(path, file_data)
# return file_data
#
# def __setitem__(self, path, data):
# value = (getmtime(path), data)
# super().__setitem__(path, value)
#
# Path: crosscompute/macros/package.py
# def import_attribute(attribute_string):
# module_string, attribute_name = attribute_string.rsplit('.', maxsplit=1)
# return getattr(import_module(module_string), attribute_name)
#
# Path: crosscompute/macros/web.py
# def get_html_from_markdown(text):
# html = markdown(text)
# if '</p>\n<p>' not in html:
# html = html.removeprefix('<p>')
# html = html.removesuffix('</p>')
# return html
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
, which may include functions, classes, or code. Output only the next line. | maximum_length=MAXIMUM_FILE_CACHE_LENGTH) |
Predict the next line for this snippet: <|code_start|>
def get_variable_value_by_id(data_by_id):
return {
variable_id: data['value'] for variable_id, data in data_by_id.items()
}
def format_text(text, data_by_id):
if not data_by_id:
return text
def f(match):
expression_text = match.group(1)
expression_terms = expression_text.split('|')
variable_id = expression_terms[0].strip()
try:
variable_data = data_by_id[variable_id]
except KeyError:
raise CrossComputeConfigurationError(
f'variable {variable_id} missing in batch configuration')
text = variable_data.get('value', '')
try:
text = apply_functions(
text, expression_terms[1:], FUNCTION_BY_NAME)
except KeyError as e:
raise CrossComputeConfigurationError(
f'{e} function not supported in {text}')
return str(text)
<|code_end|>
with the help of current file imports:
import csv
import json
import shutil
from dataclasses import dataclass
from importlib_metadata import entry_points
from invisibleroads_macros_log import format_path
from logging import getLogger
from os.path import basename, exists
from string import Template
from ..constants import (
FUNCTION_BY_NAME,
MAXIMUM_FILE_CACHE_LENGTH,
VARIABLE_ID_PATTERN)
from ..exceptions import (
CrossComputeConfigurationError,
CrossComputeDataError)
from ..macros.disk import FileCache
from ..macros.package import import_attribute
from ..macros.web import get_html_from_markdown
from .interface import Batch
and context from other files:
# Path: crosscompute/constants.py
# FUNCTION_BY_NAME = {
# 'slug': format_slug,
# 'title': str.title,
# }
#
# MAXIMUM_FILE_CACHE_LENGTH = 256
#
# VARIABLE_ID_PATTERN = re.compile(r'{\s*([^}]+?)\s*}')
#
# Path: crosscompute/exceptions.py
# class CrossComputeConfigurationError(CrossComputeError):
# pass
#
# class CrossComputeDataError(CrossComputeError):
# pass
#
# Path: crosscompute/macros/disk.py
# class FileCache(LRUDict):
#
# def __init__(self, *args, load_file_data, maximum_length: int, **kwargs):
# super().__init__(*args, maximum_length=maximum_length, **kwargs)
# self._load_file_data = load_file_data
#
# def __getitem__(self, path):
# if path in self:
# file_time, file_data = super().__getitem__(path)
# if getmtime(path) == file_time:
# return file_data
# file_data = self._load_file_data(path)
# self.__setitem__(path, file_data)
# return file_data
#
# def __setitem__(self, path, data):
# value = (getmtime(path), data)
# super().__setitem__(path, value)
#
# Path: crosscompute/macros/package.py
# def import_attribute(attribute_string):
# module_string, attribute_name = attribute_string.rsplit('.', maxsplit=1)
# return getattr(import_module(module_string), attribute_name)
#
# Path: crosscompute/macros/web.py
# def get_html_from_markdown(text):
# html = markdown(text)
# if '</p>\n<p>' not in html:
# html = html.removeprefix('<p>')
# html = html.removesuffix('</p>')
# return html
#
# Path: crosscompute/routines/interface.py
# class Batch(ABC):
#
# def get_data(self, variable_definition):
# '''
# Get the data of the variable in one of the following formats:
# {}
# {'value': 1}
# {'path': '/a/b/c.png'}
# {'uri': 'upload:xyz'}
# {'error': 'message'}
# '''
# return {}
#
# def get_data_uri(self, variable_definition):
# 'Get the resolved variable data uri'
# return ''
#
# def get_data_configuration(self, variable_definition):
# 'Get the resolved variable configuration'
# return {}
, which may contain function names, class names, or code. Output only the next line. | return VARIABLE_ID_PATTERN.sub(f, text) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.