Instruction stringlengths 362 7.83k | output_code stringlengths 1 945 |
|---|---|
Based on the snippet: <|code_start|># coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for structure_utils functions."""
class ValidateStructureTest(tf.test.TestCase):
def testValidateSingleElement(self):
elem = "element" # String
<|code_end|>
, predict the immediate next line with the help of imports:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.keras.utils import structure_utils
and context (classes, functions, sometimes code) from other files:
# Path: tensorflow_model_remediation/min_diff/keras/utils/structure_utils.py
# def _flatten_min_diff_structure(struct, run_validation=False):
# def _pack_min_diff_sequence_as(struct, flat_sequence):
# def _assert_same_min_diff_structure(struct1, struct2):
# def _err_msg(specifics_template, use_dict_keys=False):
# def _is_min_diff_element(element, element_type=None):
# def validate_min_diff_structure(struct,
# struct_name="struct",
# element_type=None):
. Output only the next line. | structure_utils.validate_min_diff_structure(elem) |
Predict the next line for this snippet: <|code_start|># coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_model_remediation.tools.tutorials_utils.uci.utils."""
class UCIDataTest(tf.test.TestCase):
def setUp(self):
super().setUp()
<|code_end|>
with the help of current file imports:
import unittest.mock as mock
import pandas as pd
import tensorflow as tf
from tensorflow_model_remediation.tools.tutorials_utils.uci import utils
and context from other files:
# Path: tensorflow_model_remediation/tools/tutorials_utils/uci/utils.py
# _UCI_DATA_URL_TEMPLATE = ('https://storage.googleapis.com/uci_dataset/adult.{}')
# _UCI_COLUMN_NAMES = [
# 'age', 'workclass', 'fnlwgt', 'education', 'education-num',
# 'marital-status', 'occupation', 'relationship', 'race', 'sex',
# 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'
# ]
# def _get_full_uci_dataframe(url):
# def _get_uci_data_from_url(url, sample=None):
# def get_uci_data(split='train', sample=None):
# def df_to_dataset(dataframe, shuffle=False, batch_size=None):
# def get_uci_min_diff_datasets(split='train',
# sample=None,
# original_batch_size=128,
# min_diff_batch_size=32):
# def get_uci_with_min_diff_dataset(split='train', sample=None):
# def get_uci_model(model_class=tf.keras.Model):
# def _add_input_feature(input_layer, feature=None):
, which may contain function names, class names, or code. Output only the next line. | utils._uci_full_dataframes = {} # Clear any caches. |
Continue the code snippet: <|code_start|> tf.cast(num_min_diff_examples, dtype=tf.float32)))
if self.enable_summary_histogram:
# Plot histogram of the MinDiff predictions.
summary_histogram = (
tf.summary.histogram
)
summary_histogram('min_diff_prediction_histogram', predictions)
# Plot histogram of the MinDiff predictions for each membership class.
# Pick out only min_diff head training data
pos_mask = tf.dtypes.cast(weights, tf.float32) * tf.cast(
tf.equal(membership, 1.0), tf.float32)
neg_mask = tf.dtypes.cast(weights, tf.float32) * tf.cast(
tf.equal(membership, 0.0), tf.float32)
if predictions.shape.dims:
sensitive_group_predictions = tf.squeeze(
tf.gather(predictions, indices=tf.where(pos_mask[:, 0])))
non_sensitive_group_predictions = tf.squeeze(
tf.gather(predictions, indices=tf.where(neg_mask[:, 0])))
summary_histogram('min_diff_sensitive_group_prediction_histogram',
sensitive_group_predictions)
summary_histogram('min_diff_non-sensitive_group_prediction_histogram',
non_sensitive_group_predictions)
return loss
<|code_end|>
. Use current file imports:
import abc
import re
import dill
import tensorflow as tf
from typing import Optional, Tuple
from tensorflow_model_remediation.common import docs
from tensorflow_model_remediation.common import types
from tensorflow_model_remediation.min_diff.losses.kernels import kernel_utils
and context (classes, functions, or code) from other files:
# Path: tensorflow_model_remediation/common/docs.py
# def _no_op_decorator(obj):
# def _get_safe_decorator(decorator_name):
#
# Path: tensorflow_model_remediation/common/types.py
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/kernel_utils.py
# _STRING_TO_KERNEL_DICT = {}
# def _register_kernel_names(kernel_class, names):
# def _get_kernel(kernel: Union[base_kernel.MinDiffKernel, str],
# kernel_var_name: str = 'kernel'):
. Output only the next line. | @docs.doc_private |
Predict the next line for this snippet: <|code_start|>
def __init__(self,
membership_transform=None,
predictions_transform=None,
membership_kernel=None,
predictions_kernel=None,
name: Optional[str] = None,
enable_summary_histogram: Optional[bool] = True):
"""Initialize `MinDiffLoss` instance.
Raises:
ValueError: If a `*_transform` parameter is passed in but is not callable.
ValueError: If a `*_kernel` parameter has an unrecognized type or value.
"""
super(MinDiffLoss, self).__init__(
reduction=tf.keras.losses.Reduction.NONE, name=name)
self.name = name or _to_snake_case(self.__class__.__name__)
_validate_transform(membership_transform, 'membership_transform')
self.membership_transform = (membership_transform)
_validate_transform(predictions_transform, 'predictions_transform')
self.predictions_transform = predictions_transform
self.membership_kernel = kernel_utils._get_kernel(membership_kernel,
'membership_kernel')
self.predictions_kernel = kernel_utils._get_kernel(predictions_kernel,
'predictions_kernel')
self.enable_summary_histogram = enable_summary_histogram
def __call__(self,
<|code_end|>
with the help of current file imports:
import abc
import re
import dill
import tensorflow as tf
from typing import Optional, Tuple
from tensorflow_model_remediation.common import docs
from tensorflow_model_remediation.common import types
from tensorflow_model_remediation.min_diff.losses.kernels import kernel_utils
and context from other files:
# Path: tensorflow_model_remediation/common/docs.py
# def _no_op_decorator(obj):
# def _get_safe_decorator(decorator_name):
#
# Path: tensorflow_model_remediation/common/types.py
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/kernel_utils.py
# _STRING_TO_KERNEL_DICT = {}
# def _register_kernel_names(kernel_class, names):
# def _get_kernel(kernel: Union[base_kernel.MinDiffKernel, str],
# kernel_var_name: str = 'kernel'):
, which may contain function names, class names, or code. Output only the next line. | membership: types.TensorType, |
Next line prediction: <|code_start|> (typically score distributions) between two groups of examples identified by
the value in the `membership` column.
If the predictions between the two groups are indistinguishable, the loss
should be 0. The more different the two scores are, the higher the loss.
"""
# pyformat: enable
def __init__(self,
membership_transform=None,
predictions_transform=None,
membership_kernel=None,
predictions_kernel=None,
name: Optional[str] = None,
enable_summary_histogram: Optional[bool] = True):
"""Initialize `MinDiffLoss` instance.
Raises:
ValueError: If a `*_transform` parameter is passed in but is not callable.
ValueError: If a `*_kernel` parameter has an unrecognized type or value.
"""
super(MinDiffLoss, self).__init__(
reduction=tf.keras.losses.Reduction.NONE, name=name)
self.name = name or _to_snake_case(self.__class__.__name__)
_validate_transform(membership_transform, 'membership_transform')
self.membership_transform = (membership_transform)
_validate_transform(predictions_transform, 'predictions_transform')
self.predictions_transform = predictions_transform
<|code_end|>
. Use current file imports:
(import abc
import re
import dill
import tensorflow as tf
from typing import Optional, Tuple
from tensorflow_model_remediation.common import docs
from tensorflow_model_remediation.common import types
from tensorflow_model_remediation.min_diff.losses.kernels import kernel_utils)
and context including class names, function names, or small code snippets from other files:
# Path: tensorflow_model_remediation/common/docs.py
# def _no_op_decorator(obj):
# def _get_safe_decorator(decorator_name):
#
# Path: tensorflow_model_remediation/common/types.py
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/kernel_utils.py
# _STRING_TO_KERNEL_DICT = {}
# def _register_kernel_names(kernel_class, names):
# def _get_kernel(kernel: Union[base_kernel.MinDiffKernel, str],
# kernel_var_name: str = 'kernel'):
. Output only the next line. | self.membership_kernel = kernel_utils._get_kernel(membership_kernel, |
Using the snippet: <|code_start|> # pyformat: disable
"""Invokes the kernel instance.
Arguments:
x: `tf.Tensor` of shape `[N, D]` (if tiling input) or `[N, M, D]` (if not
tiling input).
y: Optional `tf.Tensor` of shape `[M, D]` (if tiling input) or `[N, M, D]`
(if not tiling input).
If `y` is `None`, it is set to be the same as `x`:
```
if y is None:
y = x
```
Inputs are tiled if `self.tile_input == True` and left as is otherwise.
Returns:
`tf.Tensor` of shape `[N, M]`.
"""
# pyformat: enable
if y is None:
y = x
if self.tile_input:
x = x[:, tf.newaxis, :]
y = y[tf.newaxis, :, :]
return self.call(x, y)
@abc.abstractmethod
<|code_end|>
, determine the next line of code. You have imports:
import abc
import tensorflow as tf
from typing import Optional
from tensorflow_model_remediation.common import docs
from tensorflow_model_remediation.common import types
and context (class names, function names, or code) available:
# Path: tensorflow_model_remediation/common/docs.py
# def _no_op_decorator(obj):
# def _get_safe_decorator(decorator_name):
#
# Path: tensorflow_model_remediation/common/types.py
. Output only the next line. | @docs.do_not_doc_in_subclasses |
Predict the next line for this snippet: <|code_start|> Arguments:
tile_input: Boolean indicating whether to tile inputs before computing the
kernel (see below for details).
To be implemented by subclasses:
- `call()`: contains the logic for the kernel tensor calculation.
Example subclass Implementation:
```
class GuassKernel(MinDiffKernel):
def call(x, y):
return tf.exp(-tf.reduce_sum(tf.square(x - y), axis=2) / 0.01)
```
"Tiling" is a way of expanding the rank of the input tensors so that their
dimensions work for the operations we need.
If `x` and `y` are of rank `[N, D]` and `[M, D]` respectively, tiling expands
them to be: `[N, ?, D]` and `[?, M, D]` where `tf` broadcasting will ensure
that the operations between them work.
"""
# pyformat: enable
def __init__(self, tile_input: bool = True):
self.tile_input = tile_input
def __call__(self,
<|code_end|>
with the help of current file imports:
import abc
import tensorflow as tf
from typing import Optional
from tensorflow_model_remediation.common import docs
from tensorflow_model_remediation.common import types
and context from other files:
# Path: tensorflow_model_remediation/common/docs.py
# def _no_op_decorator(obj):
# def _get_safe_decorator(decorator_name):
#
# Path: tensorflow_model_remediation/common/types.py
, which may contain function names, class names, or code. Output only the next line. | x: types.TensorType, |
Using the snippet: <|code_start|># coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loss_utils functions."""
class GetMinDiffLossTest(tf.test.TestCase):
def testAcceptsNone(self):
loss = utils._get_loss(None)
self.assertIsNone(loss)
def testForAbsoluteCorrelationLoss(self):
loss = utils._get_loss('abs_corr')
<|code_end|>
, determine the next line of code. You have imports:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses import absolute_correlation_loss as abscorrloss
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses import loss_utils as utils
from tensorflow_model_remediation.min_diff.losses import mmd_loss
and context (class names, function names, or code) available:
# Path: tensorflow_model_remediation/min_diff/losses/absolute_correlation_loss.py
# _EPSILON = 1.0e-7
# class AbsoluteCorrelationLoss(base_loss.MinDiffLoss):
# def __init__(self, name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def call(
# self,
# sensitive_group_labels: types.TensorType,
# y_pred: types.TensorType,
# sample_weight: Optional[types.TensorType] = None) -> types.TensorType:
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/loss_utils.py
# _STRING_TO_LOSS_DICT = {}
# def _register_loss_names(loss_class, names):
# def _get_loss(loss: Union[base_loss.MinDiffLoss, str],
# loss_var_name: str = 'loss') -> base_loss.MinDiffLoss:
#
# Path: tensorflow_model_remediation/min_diff/losses/mmd_loss.py
# class MMDLoss(base_loss.MinDiffLoss):
# def __init__(self,
# kernel="gaussian",
# predictions_transform=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def _preprocess(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _calculate_mean(self, predictions_kernel: types.TensorType,
# normed_weights: types.TensorType,
# pos_mask: types.TensorType, neg_mask):
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def from_config(cls, config):
. Output only the next line. | self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss) |
Given the following code snippet before the placeholder: <|code_start|> def testForAbsoluteCorrelationLoss(self):
loss = utils._get_loss('abs_corr')
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('abS_coRr') # Strangely capitalized.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('abs_corr_loss') # Other accepted name.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('absolute_correlation') # Other accepted name.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('absolute_correlation_loss') # Other accepted name.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss_name = 'custom_name'
loss = utils._get_loss(abscorrloss.AbsoluteCorrelationLoss(loss_name))
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
self.assertEqual(loss.name, loss_name)
def testForMMDLoss(self):
loss = utils._get_loss('mmd')
self.assertIsInstance(loss, mmd_loss.MMDLoss)
loss = utils._get_loss('mmd_loss')
self.assertIsInstance(loss, mmd_loss.MMDLoss)
loss = utils._get_loss(mmd_loss.MMDLoss())
self.assertIsInstance(loss, mmd_loss.MMDLoss)
loss_name = 'custom_name'
loss = utils._get_loss(mmd_loss.MMDLoss(name=loss_name))
self.assertIsInstance(loss, mmd_loss.MMDLoss)
self.assertEqual(loss.name, loss_name)
def testForCustomLoss(self):
<|code_end|>
, predict the next line using imports from the current file:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses import absolute_correlation_loss as abscorrloss
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses import loss_utils as utils
from tensorflow_model_remediation.min_diff.losses import mmd_loss
and context including class names, function names, and sometimes code from other files:
# Path: tensorflow_model_remediation/min_diff/losses/absolute_correlation_loss.py
# _EPSILON = 1.0e-7
# class AbsoluteCorrelationLoss(base_loss.MinDiffLoss):
# def __init__(self, name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def call(
# self,
# sensitive_group_labels: types.TensorType,
# y_pred: types.TensorType,
# sample_weight: Optional[types.TensorType] = None) -> types.TensorType:
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/loss_utils.py
# _STRING_TO_LOSS_DICT = {}
# def _register_loss_names(loss_class, names):
# def _get_loss(loss: Union[base_loss.MinDiffLoss, str],
# loss_var_name: str = 'loss') -> base_loss.MinDiffLoss:
#
# Path: tensorflow_model_remediation/min_diff/losses/mmd_loss.py
# class MMDLoss(base_loss.MinDiffLoss):
# def __init__(self,
# kernel="gaussian",
# predictions_transform=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def _preprocess(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _calculate_mean(self, predictions_kernel: types.TensorType,
# normed_weights: types.TensorType,
# pos_mask: types.TensorType, neg_mask):
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def from_config(cls, config):
. Output only the next line. | class CustomLoss(base_loss.MinDiffLoss): |
Using the snippet: <|code_start|>
"""Tests for loss_utils functions."""
class GetMinDiffLossTest(tf.test.TestCase):
def testAcceptsNone(self):
loss = utils._get_loss(None)
self.assertIsNone(loss)
def testForAbsoluteCorrelationLoss(self):
loss = utils._get_loss('abs_corr')
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('abS_coRr') # Strangely capitalized.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('abs_corr_loss') # Other accepted name.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('absolute_correlation') # Other accepted name.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss = utils._get_loss('absolute_correlation_loss') # Other accepted name.
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
loss_name = 'custom_name'
loss = utils._get_loss(abscorrloss.AbsoluteCorrelationLoss(loss_name))
self.assertIsInstance(loss, abscorrloss.AbsoluteCorrelationLoss)
self.assertEqual(loss.name, loss_name)
def testForMMDLoss(self):
loss = utils._get_loss('mmd')
<|code_end|>
, determine the next line of code. You have imports:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses import absolute_correlation_loss as abscorrloss
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses import loss_utils as utils
from tensorflow_model_remediation.min_diff.losses import mmd_loss
and context (class names, function names, or code) available:
# Path: tensorflow_model_remediation/min_diff/losses/absolute_correlation_loss.py
# _EPSILON = 1.0e-7
# class AbsoluteCorrelationLoss(base_loss.MinDiffLoss):
# def __init__(self, name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def call(
# self,
# sensitive_group_labels: types.TensorType,
# y_pred: types.TensorType,
# sample_weight: Optional[types.TensorType] = None) -> types.TensorType:
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/loss_utils.py
# _STRING_TO_LOSS_DICT = {}
# def _register_loss_names(loss_class, names):
# def _get_loss(loss: Union[base_loss.MinDiffLoss, str],
# loss_var_name: str = 'loss') -> base_loss.MinDiffLoss:
#
# Path: tensorflow_model_remediation/min_diff/losses/mmd_loss.py
# class MMDLoss(base_loss.MinDiffLoss):
# def __init__(self,
# kernel="gaussian",
# predictions_transform=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def _preprocess(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _calculate_mean(self, predictions_kernel: types.TensorType,
# normed_weights: types.TensorType,
# pos_mask: types.TensorType, neg_mask):
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def from_config(cls, config):
. Output only the next line. | self.assertIsInstance(loss, mmd_loss.MMDLoss) |
Next line prediction: <|code_start|># Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test laplacian_kernel module."""
class LaplacianKernelTest(tf.test.TestCase):
def testLaplacianKernel(self):
# Under certain conditions, the kernel output resembles the identity
# (within the tolerance of the checks).
# Specifically, this is true when the minimum distance, d, between any two
# elements in the input is greater than D for D = -ln(tol) * kernel_length
# where:
# - tol: assertion tolerance (these tests ues 1e-6)
# - d: minimum{norm(x_i - y_i)
# for x_i = x[i, :], y_j = y[j, :] for all i neq j}
# - kernel_length: argument to kernel initialization
#
# In the test below, we use max(kernel_length) == 0.1 which gives us
# max(D)~=1.38. Given this bound, we sometines use d=2 (meaning that each
# element is at least 2 greater or smaller than every other element in the
# input). When we do this, we expect the output to be close to the identity.
for kernel_length in [0.05, 0.075, 0.1]:
<|code_end|>
. Use current file imports:
(import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses.kernels import laplacian_kernel)
and context including class names, function names, or small code snippets from other files:
# Path: tensorflow_model_remediation/min_diff/losses/kernels/laplacian_kernel.py
# _EPSILON = 1.0e-8
# class LaplacianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
. Output only the next line. | laplacian_kernel_fn = laplacian_kernel.LaplacianKernel(kernel_length) |
Predict the next line for this snippet: <|code_start|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test gaussian_kernel module."""
class GaussianKernelTest(tf.test.TestCase):
def testGaussianKernel(self):
# Under certain conditions, the kernel output resembles the identity
# (within the tolerance of the checks).
# Specifically, this is true when the minimum distance, d, between any two
# elements in the input is greater than D = -ln(tol) * kernel_length**2
# where:
# - tol: assertion tolerance (these tests ues 1e-6)
# - d: minimum{norm(x_i - y_i)
# for x_i = x[i, :], y_j = y[j, :] for all i neq j}
# - kernel_length: argument to kernel initialization
#
# In the test below, we use max(kernel_length) == 0.1 which gives us
# max(D)~=0.138. Given this bound, we sometines use d=0.2 (meaning that each
# element is at least 0.2 greater or smaller than every other element in the
# input). When we do this, we expect the output to be close to the identity.
for kernel_length in [0.05, 0.075, 0.1]:
<|code_end|>
with the help of current file imports:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses.kernels import gaussian_kernel
and context from other files:
# Path: tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
# class GaussianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
, which may contain function names, class names, or code. Output only the next line. | gaussian_kernel_fn = gaussian_kernel.GaussianKernel(kernel_length) |
Given the following code snippet before the placeholder: <|code_start|>
def _get_batch(tensors, batch_size, batch_num):
if isinstance(tensors, dict):
return {k: _get_batch(v, batch_size, batch_num) for k, v in tensors.items()}
if isinstance(tensors, tf.SparseTensor):
total_examples = tensors.dense_shape[0]
else:
total_examples = len(tensors)
start_ind = (batch_size * batch_num) % total_examples
end_ind = start_ind + batch_size
# Double tensor to enable repeating inputs.
if isinstance(tensors, tf.SparseTensor):
return tf.sparse.slice(tensors, [start_ind, 0], [batch_size, 2])
else:
return tensors[start_ind:end_ind]
def _get_min_diff_batch(sensitive_tensors, nonsensitive_tensors,
sensitive_batch_size, nonsensitive_batch_size,
batch_num):
sensitive_batch = _get_batch(sensitive_tensors, sensitive_batch_size,
batch_num)
nonsensitive_batch = _get_batch(nonsensitive_tensors, nonsensitive_batch_size,
batch_num)
if isinstance(sensitive_batch, dict):
return {
<|code_end|>
, predict the next line using imports from the current file:
import copy
import tensorflow as tf
from tensorflow_model_remediation.min_diff.keras.utils import input_utils
and context including class names, function names, and sometimes code from other files:
# Path: tensorflow_model_remediation/min_diff/keras/utils/input_utils.py
# class MinDiffPackedInputs(
# collections.namedtuple("MinDiffPackedInputs",
# ["original_inputs", "min_diff_data"])):
# def pack_min_diff_data(original_dataset: tf.data.Dataset,
# sensitive_group_dataset=None,
# nonsensitive_group_dataset=None,
# min_diff_dataset=None) -> tf.data.Dataset:
# def _map_fn(original_batch, min_diff_batch):
# def _pack_as_original(original_batch, x, y, w):
# def _tensor_concat(t1, t2):
# def build_min_diff_dataset(sensitive_group_dataset,
# nonsensitive_group_dataset) -> tf.data.Dataset:
# def _build_single_batch(single_sensitive_batch, single_nonsensitive_batch):
# def _map_fn(sensitive_batch, nonsensitive_batch):
# def unpack_original_inputs(inputs):
# def unpack_min_diff_data(inputs):
. Output only the next line. | key: input_utils._tensor_concat(sensitive_batch[key], |
Next line prediction: <|code_start|> enable_summary_histogram: Optional bool indicating if `tf.summary.histogram`
should be included within the loss. Defaults to True.
The Maximum Mean Discrepancy (MMD) is a measure of the distance between the
distributions of prediction scores on two groups of examples. The metric
guarantees that the result is 0 if and only if the two distributions it is
comparing are exactly the same.
The `membership` input indicates with a numerical value whether
each example is part of the sensitive group with a numerical value. This
currently only supports hard membership of `0.0` or `1.0`.
For more details, see the
[paper](http://papers.nips.cc/paper/3110-a-kernel-method-for-the-two-sample-problem.pdf).
"""
# pyformat: enable
def __init__(self,
kernel="gaussian",
predictions_transform=None,
name: Optional[str] = None,
enable_summary_histogram: Optional[bool] = True):
"""Initialize an instance of MMDLoss."""
super(MMDLoss, self).__init__(
predictions_transform=predictions_transform,
predictions_kernel=kernel,
name=name or "mmd_loss",
enable_summary_histogram=enable_summary_histogram)
def _preprocess(self,
<|code_end|>
. Use current file imports:
(from typing import Optional
from tensorflow_model_remediation.common import types
from tensorflow_model_remediation.min_diff.losses import base_loss
import tensorflow as tf)
and context including class names, function names, or small code snippets from other files:
# Path: tensorflow_model_remediation/common/types.py
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
. Output only the next line. | membership: types.TensorType, |
Here is a snippet: <|code_start|> The Gaussian kernel is a mathematical tool that approximates a given
distribution as a sum of gaussian distributions. This is particularly useful
when we are trying to determine a distribution from a set of points.
Arguments:
kernel_length: Length (sometimes also called 'width') of the kernel.
Defaults to `0.1`. This parameter essentially describes how far apart
points can be and still affect each other.
The choice for kernel length should be influenced by the average distance
of inputs. The smaller the distance, the smaller the kernel length likely
needs to be for best performance. In general, a good first guess is the
standard deviation of your predictions.
Note: A kernel length that is too large will result in losing most of the
kernel's non-linearity making it much less effective. A kernel length
that is too small will make the kernel highly sensitive to input noise
potentially leading to unstable results.
**kwargs: Named parameters that will be passed directly to the base
class' `__init__` function.
See [paper](https://arxiv.org/abs/1910.11779) for reference on how it can be
used in MinDiff.
"""
# pyformat: enable
def __init__(self, kernel_length: complex = 0.1, **kwargs):
super(GaussianKernel, self).__init__(**kwargs)
self.kernel_length = kernel_length
<|code_end|>
. Write the next line using the current file imports:
import tensorflow as tf
from tensorflow_model_remediation.common import types
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
and context from other files:
# Path: tensorflow_model_remediation/common/types.py
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/base_kernel.py
# class MinDiffKernel(abc.ABC):
# def __init__(self, tile_input: bool = True):
# def __call__(self,
# x: types.TensorType,
# y: Optional[types.TensorType] = None) -> types.TensorType:
# def call(self, x: types.TensorType, y: types.TensorType):
# def get_config(self):
# def from_config(cls, config):
, which may include functions, classes, or code. Output only the next line. | def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType: |
Using the snippet: <|code_start|># distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MinDiffLoss class."""
@tf.keras.utils.register_keras_serializable()
class CustomLoss(base_loss.MinDiffLoss):
def __init__(self,
membership_transform=None,
predictions_transform=None,
membership_kernel=None,
predictions_kernel=None,
name=None):
super(CustomLoss, self).__init__(
membership_transform=membership_transform,
predictions_transform=predictions_transform,
membership_kernel=membership_kernel,
predictions_kernel=predictions_kernel,
name=name)
def call(self):
pass # Dummy Placeholder. Will not be called unless subclassed.
<|code_end|>
, determine the next line of code. You have imports:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import gaussian_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import laplacian_kernel
and context (class names, function names, or code) available:
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/base_kernel.py
# class MinDiffKernel(abc.ABC):
# def __init__(self, tile_input: bool = True):
# def __call__(self,
# x: types.TensorType,
# y: Optional[types.TensorType] = None) -> types.TensorType:
# def call(self, x: types.TensorType, y: types.TensorType):
# def get_config(self):
# def from_config(cls, config):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
# class GaussianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/laplacian_kernel.py
# _EPSILON = 1.0e-8
# class LaplacianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
. Output only the next line. | class CustomKernel(base_kernel.MinDiffKernel): |
Predict the next line after this snippet: <|code_start|> transform_2 = lambda x: x + 3 # Different arbitrary transform.
val = 7 # Arbitrary value.
loss = CustomLoss(
membership_transform=transform_1, predictions_transform=transform_2)
self.assertEqual(loss.membership_transform(val), transform_1(val))
self.assertEqual(loss.predictions_transform(val), transform_2(val))
loss = CustomLoss(predictions_transform=transform_1)
self.assertEqual(loss.predictions_transform(val), transform_1(val))
self.assertIsNone(loss.membership_transform)
loss = CustomLoss(membership_transform=transform_2)
self.assertEqual(loss.membership_transform(val), transform_2(val))
self.assertIsNone(loss.predictions_transform)
def testTransformInputRaisesErrors(self):
with self.assertRaisesRegex(ValueError, 'should be a callable instance'):
_ = CustomLoss(membership_transform='not callable')
with self.assertRaisesRegex(ValueError, 'should be a callable instance'):
_ = CustomLoss(predictions_transform='not callable')
def testKernelAttributesDefaultsToNone(self):
loss = CustomLoss()
self.assertIsNone(loss.membership_kernel)
self.assertIsNone(loss.predictions_kernel)
def testKernelAttributes(self):
loss = CustomLoss(membership_kernel='gauss', predictions_kernel='laplace')
self.assertIsInstance(loss.membership_kernel,
<|code_end|>
using the current file's imports:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import gaussian_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import laplacian_kernel
and any relevant context from other files:
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/base_kernel.py
# class MinDiffKernel(abc.ABC):
# def __init__(self, tile_input: bool = True):
# def __call__(self,
# x: types.TensorType,
# y: Optional[types.TensorType] = None) -> types.TensorType:
# def call(self, x: types.TensorType, y: types.TensorType):
# def get_config(self):
# def from_config(cls, config):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
# class GaussianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/laplacian_kernel.py
# _EPSILON = 1.0e-8
# class LaplacianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
. Output only the next line. | gaussian_kernel.GaussianKernel) |
Continue the code snippet: <|code_start|>
loss = CustomLoss(
membership_transform=transform_1, predictions_transform=transform_2)
self.assertEqual(loss.membership_transform(val), transform_1(val))
self.assertEqual(loss.predictions_transform(val), transform_2(val))
loss = CustomLoss(predictions_transform=transform_1)
self.assertEqual(loss.predictions_transform(val), transform_1(val))
self.assertIsNone(loss.membership_transform)
loss = CustomLoss(membership_transform=transform_2)
self.assertEqual(loss.membership_transform(val), transform_2(val))
self.assertIsNone(loss.predictions_transform)
def testTransformInputRaisesErrors(self):
with self.assertRaisesRegex(ValueError, 'should be a callable instance'):
_ = CustomLoss(membership_transform='not callable')
with self.assertRaisesRegex(ValueError, 'should be a callable instance'):
_ = CustomLoss(predictions_transform='not callable')
def testKernelAttributesDefaultsToNone(self):
loss = CustomLoss()
self.assertIsNone(loss.membership_kernel)
self.assertIsNone(loss.predictions_kernel)
def testKernelAttributes(self):
loss = CustomLoss(membership_kernel='gauss', predictions_kernel='laplace')
self.assertIsInstance(loss.membership_kernel,
gaussian_kernel.GaussianKernel)
self.assertIsInstance(loss.predictions_kernel,
<|code_end|>
. Use current file imports:
import tensorflow as tf
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import gaussian_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import laplacian_kernel
and context (classes, functions, or code) from other files:
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/base_kernel.py
# class MinDiffKernel(abc.ABC):
# def __init__(self, tile_input: bool = True):
# def __call__(self,
# x: types.TensorType,
# y: Optional[types.TensorType] = None) -> types.TensorType:
# def call(self, x: types.TensorType, y: types.TensorType):
# def get_config(self):
# def from_config(cls, config):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
# class GaussianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/laplacian_kernel.py
# _EPSILON = 1.0e-8
# class LaplacianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
. Output only the next line. | laplacian_kernel.LaplacianKernel) |
Given snippet: <|code_start|> 0.4,
0.5,
0.6,
0.7,
0.8,
1.0,
]
return [header, example, empty_comment_example]
def _write_csv(self, examples):
filename = os.path.join(tempfile.mkdtemp(), 'input.csv')
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for example in examples:
csvwriter.writerow(example)
return filename
@mock.patch(
'tensorflow_model_remediation.tools.tutorials_utils.min_diff_keras_utils._create_embedding_layer',
autospec=True)
@mock.patch('tensorflow.keras.utils.get_file', autospec=True)
def test_download_and_process_civil_comments_data_and_create_model(
self, mock_get_file, mock__create_embedding_layer):
# First test download_and_process_civil_comments_data. Mock out the
# download.
filename = self._write_csv(
self._create_example_csv(use_fake_embedding=True))
mock_get_file.return_value = filename
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import csv
import os
import tempfile
import unittest.mock as mock
import tensorflow as tf
from tensorflow_model_remediation.tools.tutorials_utils import min_diff_keras_utils
and context:
# Path: tensorflow_model_remediation/tools/tutorials_utils/min_diff_keras_utils.py
# TEXT_FEATURE = 'comment_text'
# LABEL = 'toxicity'
# def download_and_process_civil_comments_data():
# def _create_embedding_layer(hub_url):
# def create_keras_sequential_model(
# hub_url='https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1',
# cnn_filter_sizes=[128, 128, 128],
# cnn_kernel_sizes=[5, 5, 5],
# cnn_pooling_sizes=[5, 5, 40]):
which might include code, classes, or functions. Output only the next line. | data_train, _, _, labels_train, _ = min_diff_keras_utils.download_and_process_civil_comments_data( |
Predict the next line for this snippet: <|code_start|> # pyformat: disable
"""Absolute correlation between predictions on two groups of examples.
Arguments:
name: Name used for logging or tracking. Defaults to
`'absolute_correlation_loss'`.
enable_summary_histogram: Optional bool indicating if `tf.summary.histogram`
should be included within the loss. Defaults to True.
Absolute correlation measures how correlated predictions are with membership
(regardless of direction). The metric guarantees that the result is 0 if and
only if the two distributions it is comparing are indistinguishable.
The `sensitive_group_labels` input is used to determine whether each example
is part of the sensitive group. This currently only supports hard membership
of `0.0` or `1.0`.
For more details, see the [paper](https://arxiv.org/abs/1901.04562).
"""
# pyformat: enable
def __init__(self, name: Optional[str] = None,
enable_summary_histogram: Optional[bool] = True):
"""Initialize Loss."""
super(AbsoluteCorrelationLoss,
self).__init__(name=name or 'absolute_correlation_loss',
enable_summary_histogram=enable_summary_histogram)
def call(
self,
<|code_end|>
with the help of current file imports:
from typing import Optional
from tensorflow_model_remediation.common import types
from tensorflow_model_remediation.min_diff.losses import base_loss
import tensorflow as tf
and context from other files:
# Path: tensorflow_model_remediation/common/types.py
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
, which may contain function names, class names, or code. Output only the next line. | sensitive_group_labels: types.TensorType, |
Given snippet: <|code_start|># coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for MinDiff losses."""
_STRING_TO_LOSS_DICT = {}
def _register_loss_names(loss_class, names):
for name in names:
_STRING_TO_LOSS_DICT[name] = loss_class
if not name.endswith('_loss'):
_STRING_TO_LOSS_DICT[name + '_loss'] = loss_class
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from typing import Union
from tensorflow_model_remediation.min_diff.losses import absolute_correlation_loss as abs_corr_loss
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses import mmd_loss
and context:
# Path: tensorflow_model_remediation/min_diff/losses/absolute_correlation_loss.py
# _EPSILON = 1.0e-7
# class AbsoluteCorrelationLoss(base_loss.MinDiffLoss):
# def __init__(self, name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def call(
# self,
# sensitive_group_labels: types.TensorType,
# y_pred: types.TensorType,
# sample_weight: Optional[types.TensorType] = None) -> types.TensorType:
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/mmd_loss.py
# class MMDLoss(base_loss.MinDiffLoss):
# def __init__(self,
# kernel="gaussian",
# predictions_transform=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def _preprocess(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _calculate_mean(self, predictions_kernel: types.TensorType,
# normed_weights: types.TensorType,
# pos_mask: types.TensorType, neg_mask):
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def from_config(cls, config):
which might include code, classes, or functions. Output only the next line. | _register_loss_names(abs_corr_loss.AbsoluteCorrelationLoss, |
Given the code snippet: <|code_start|># You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for MinDiff losses."""
_STRING_TO_LOSS_DICT = {}
def _register_loss_names(loss_class, names):
for name in names:
_STRING_TO_LOSS_DICT[name] = loss_class
if not name.endswith('_loss'):
_STRING_TO_LOSS_DICT[name + '_loss'] = loss_class
_register_loss_names(abs_corr_loss.AbsoluteCorrelationLoss,
['abs_corr', 'absolute_correlation'])
_register_loss_names(mmd_loss.MMDLoss, ['mmd', 'maximum_mean_discrepancy'])
<|code_end|>
, generate the next line using the imports in this file:
from typing import Union
from tensorflow_model_remediation.min_diff.losses import absolute_correlation_loss as abs_corr_loss
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses import mmd_loss
and context (functions, classes, or occasionally code) from other files:
# Path: tensorflow_model_remediation/min_diff/losses/absolute_correlation_loss.py
# _EPSILON = 1.0e-7
# class AbsoluteCorrelationLoss(base_loss.MinDiffLoss):
# def __init__(self, name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def call(
# self,
# sensitive_group_labels: types.TensorType,
# y_pred: types.TensorType,
# sample_weight: Optional[types.TensorType] = None) -> types.TensorType:
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/mmd_loss.py
# class MMDLoss(base_loss.MinDiffLoss):
# def __init__(self,
# kernel="gaussian",
# predictions_transform=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def _preprocess(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _calculate_mean(self, predictions_kernel: types.TensorType,
# normed_weights: types.TensorType,
# pos_mask: types.TensorType, neg_mask):
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def from_config(cls, config):
. Output only the next line. | def _get_loss(loss: Union[base_loss.MinDiffLoss, str], |
Given snippet: <|code_start|>#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for MinDiff losses."""
_STRING_TO_LOSS_DICT = {}
def _register_loss_names(loss_class, names):
for name in names:
_STRING_TO_LOSS_DICT[name] = loss_class
if not name.endswith('_loss'):
_STRING_TO_LOSS_DICT[name + '_loss'] = loss_class
_register_loss_names(abs_corr_loss.AbsoluteCorrelationLoss,
['abs_corr', 'absolute_correlation'])
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from typing import Union
from tensorflow_model_remediation.min_diff.losses import absolute_correlation_loss as abs_corr_loss
from tensorflow_model_remediation.min_diff.losses import base_loss
from tensorflow_model_remediation.min_diff.losses import mmd_loss
and context:
# Path: tensorflow_model_remediation/min_diff/losses/absolute_correlation_loss.py
# _EPSILON = 1.0e-7
# class AbsoluteCorrelationLoss(base_loss.MinDiffLoss):
# def __init__(self, name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def call(
# self,
# sensitive_group_labels: types.TensorType,
# y_pred: types.TensorType,
# sample_weight: Optional[types.TensorType] = None) -> types.TensorType:
#
# Path: tensorflow_model_remediation/min_diff/losses/base_loss.py
# class MinDiffLoss(tf.keras.losses.Loss, abc.ABC):
# def __init__(self,
# membership_transform=None,
# predictions_transform=None,
# membership_kernel=None,
# predictions_kernel=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def __call__(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _preprocess_inputs(
# self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None
# ) -> Tuple[types.TensorType, types.TensorType, types.TensorType]:
# def _apply_kernels(
# self, membership: types.TensorType, predictions: types.TensorType
# ) -> Tuple[types.TensorType, types.TensorType]:
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _serialize_config(self, config):
# def _serialize_value(key, value):
# def get_config(self):
# def _deserialize_config(cls, config):
# def _deserialize_value(key, value):
# def from_config(cls, config):
# def _validate_transform(transform: types.TensorTransformType,
# var_name: str) -> None:
# def _to_snake_case(name):
#
# Path: tensorflow_model_remediation/min_diff/losses/mmd_loss.py
# class MMDLoss(base_loss.MinDiffLoss):
# def __init__(self,
# kernel="gaussian",
# predictions_transform=None,
# name: Optional[str] = None,
# enable_summary_histogram: Optional[bool] = True):
# def _preprocess(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def _calculate_mean(self, predictions_kernel: types.TensorType,
# normed_weights: types.TensorType,
# pos_mask: types.TensorType, neg_mask):
# def call(self,
# membership: types.TensorType,
# predictions: types.TensorType,
# sample_weight: Optional[types.TensorType] = None):
# def from_config(cls, config):
which might include code, classes, or functions. Output only the next line. | _register_loss_names(mmd_loss.MMDLoss, ['mmd', 'maximum_mean_discrepancy']) |
Continue the code snippet: <|code_start|># you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for MinDiff kernels."""
_STRING_TO_KERNEL_DICT = {}
def _register_kernel_names(kernel_class, names):
for name in names:
_STRING_TO_KERNEL_DICT[name] = kernel_class
if not name.endswith('_kernel'):
_STRING_TO_KERNEL_DICT[name + '_kernel'] = kernel_class
_register_kernel_names(gaussian_kernel.GaussianKernel, ['gauss', 'gaussian'])
_register_kernel_names(laplacian_kernel.LaplacianKernel,
['laplace', 'laplacian'])
<|code_end|>
. Use current file imports:
from typing import Union
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import gaussian_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import laplacian_kernel
and context (classes, functions, or code) from other files:
# Path: tensorflow_model_remediation/min_diff/losses/kernels/base_kernel.py
# class MinDiffKernel(abc.ABC):
# def __init__(self, tile_input: bool = True):
# def __call__(self,
# x: types.TensorType,
# y: Optional[types.TensorType] = None) -> types.TensorType:
# def call(self, x: types.TensorType, y: types.TensorType):
# def get_config(self):
# def from_config(cls, config):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
# class GaussianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/laplacian_kernel.py
# _EPSILON = 1.0e-8
# class LaplacianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
. Output only the next line. | def _get_kernel(kernel: Union[base_kernel.MinDiffKernel, str], |
Continue the code snippet: <|code_start|># coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for MinDiff kernels."""
_STRING_TO_KERNEL_DICT = {}
def _register_kernel_names(kernel_class, names):
for name in names:
_STRING_TO_KERNEL_DICT[name] = kernel_class
if not name.endswith('_kernel'):
_STRING_TO_KERNEL_DICT[name + '_kernel'] = kernel_class
<|code_end|>
. Use current file imports:
from typing import Union
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import gaussian_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import laplacian_kernel
and context (classes, functions, or code) from other files:
# Path: tensorflow_model_remediation/min_diff/losses/kernels/base_kernel.py
# class MinDiffKernel(abc.ABC):
# def __init__(self, tile_input: bool = True):
# def __call__(self,
# x: types.TensorType,
# y: Optional[types.TensorType] = None) -> types.TensorType:
# def call(self, x: types.TensorType, y: types.TensorType):
# def get_config(self):
# def from_config(cls, config):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
# class GaussianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/laplacian_kernel.py
# _EPSILON = 1.0e-8
# class LaplacianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
. Output only the next line. | _register_kernel_names(gaussian_kernel.GaussianKernel, ['gauss', 'gaussian']) |
Predict the next line for this snippet: <|code_start|># coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for MinDiff kernels."""
_STRING_TO_KERNEL_DICT = {}
def _register_kernel_names(kernel_class, names):
for name in names:
_STRING_TO_KERNEL_DICT[name] = kernel_class
if not name.endswith('_kernel'):
_STRING_TO_KERNEL_DICT[name + '_kernel'] = kernel_class
_register_kernel_names(gaussian_kernel.GaussianKernel, ['gauss', 'gaussian'])
<|code_end|>
with the help of current file imports:
from typing import Union
from tensorflow_model_remediation.min_diff.losses.kernels import base_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import gaussian_kernel
from tensorflow_model_remediation.min_diff.losses.kernels import laplacian_kernel
and context from other files:
# Path: tensorflow_model_remediation/min_diff/losses/kernels/base_kernel.py
# class MinDiffKernel(abc.ABC):
# def __init__(self, tile_input: bool = True):
# def __call__(self,
# x: types.TensorType,
# y: Optional[types.TensorType] = None) -> types.TensorType:
# def call(self, x: types.TensorType, y: types.TensorType):
# def get_config(self):
# def from_config(cls, config):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/gaussian_kernel.py
# class GaussianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
#
# Path: tensorflow_model_remediation/min_diff/losses/kernels/laplacian_kernel.py
# _EPSILON = 1.0e-8
# class LaplacianKernel(base_kernel.MinDiffKernel):
# def __init__(self, kernel_length: complex = 0.1, **kwargs):
# def call(self, x: types.TensorType, y: types.TensorType) -> types.TensorType:
# def get_config(self):
, which may contain function names, class names, or code. Output only the next line. | _register_kernel_names(laplacian_kernel.LaplacianKernel, |
Given the code snippet: <|code_start|>
class SetupTestCase(unittest.TestCase):
def setUp(self):
self.api = openaq.OpenAQ()
def tearDown(self):
pass
def test_mass_to_mix(self):
<|code_end|>
, generate the next line using the imports in this file:
import unittest
import openaq
import pandas as pd
from openaq.utils import mass_to_mix
and context (functions, classes, or occasionally code) from other files:
# Path: openaq/utils.py
# def mass_to_mix(value, param, unit='ppb', **kwargs):
# """Convert units from ug/m3 to ppb or ppm. The conversion assumes an ambient
# pressure of 1 atmosphere and ambient temperature of 25 degC.
#
# :param value: the concentration in ug/m3
# :param param: the parameter to convert {'co', 'no', 'no2', 'so2', 'o3'}
# :param unit: the desired output unit {'ppb', 'ppm'}
#
# :type value: float
# :type param: string
# :type unit: string
#
# :returns: value
#
# :Example:
#
# >>> import openaq
# >>> conc_ugm3 = 100
# >>> conc_ppb = openaq.utils.mass_to_mix(conc_ugm3, param='co', unit='ppb')
# >>> conc_ppb
# 114.5
#
# """
# lookup = {
# 'co': 1.145,
# 'no': 1.25,
# 'no2': 1.88,
# 'so2': 2.62,
# 'o3': 2.0
# }
#
# param = param.lower()
#
# if param not in lookup.keys():
# return value
#
# value = value / lookup[param]
#
# if unit.lower() == 'ppm':
# value *= 1e-3
#
# return value
. Output only the next line. | mix = mass_to_mix(value=1.145, param='co', unit='ppb') |
Using the snippet: <|code_start|> def decorated_function(*args, **kwargs):
df = kwargs.pop('df', False)
index = kwargs.pop('index', 'local')
if df == True and _no_pandas == False:
status, resp = f(*args, **kwargs)
if status == 200:
resp = resp['results']
if f.__name__ == 'latest':
d = []
for i in resp:
for m in i['measurements']:
tmp = m
tmp['country'] = i['country']
tmp['city'] = i['city']
tmp['location'] = i['location']
tmp['lastUpdated'] = pd.to_datetime(tmp['lastUpdated'])
d.append(tmp)
resp = d
data = pd.io.json.json_normalize(resp)
# If there are any datetimes, make them datetimes!
for each in [i for i in data.columns if 'date' in i]:
if 'local' in each:
<|code_end|>
, determine the next line of code. You have imports:
from functools import wraps
from unittest import SkipTest
from .utils import to_naive_timestamp, clean_encodings
import warnings
import pandas as pd
and context (class names, function names, or code) available:
# Path: openaq/utils.py
# def to_naive_timestamp(timestamp):
# """
# Convert a timezone aware timestamp (as a string) and return the
# python datetime in the local timezone, but without the tzinfo attribute
# """
# return dateutil.parser.parse(timestamp).replace(tzinfo=None).isoformat()
#
# def clean_encodings(unit):
# if unit not in ('ppm', 'ppb', 'ppt', 'ugm3'):
# unit = unit.encode('utf-8')
#
# if unit == '\xc2\xb5g/m\xc2\xb3':
# unit = 'ugm3'
#
# return unit
. Output only the next line. | data[each] = pd.to_datetime(data[each].apply(lambda x: to_naive_timestamp(x))) |
Given snippet: <|code_start|> d.append(tmp)
resp = d
data = pd.io.json.json_normalize(resp)
# If there are any datetimes, make them datetimes!
for each in [i for i in data.columns if 'date' in i]:
if 'local' in each:
data[each] = pd.to_datetime(data[each].apply(lambda x: to_naive_timestamp(x)))
else:
data[each] = pd.to_datetime(data[each])
if f.__name__ in ('latest'):
data.index = data['lastUpdated']
del data['lastUpdated']
elif f.__name__ in ('measurements'):
if index == 'utc':
data.index = data['date.utc']
del data['date.utc']
elif index == 'local':
data.index = data['date.local']
del data['date.local']
else:
pass
# Clean up encodings
if 'unit' in data.columns:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from functools import wraps
from unittest import SkipTest
from .utils import to_naive_timestamp, clean_encodings
import warnings
import pandas as pd
and context:
# Path: openaq/utils.py
# def to_naive_timestamp(timestamp):
# """
# Convert a timezone aware timestamp (as a string) and return the
# python datetime in the local timezone, but without the tzinfo attribute
# """
# return dateutil.parser.parse(timestamp).replace(tzinfo=None).isoformat()
#
# def clean_encodings(unit):
# if unit not in ('ppm', 'ppb', 'ppt', 'ugm3'):
# unit = unit.encode('utf-8')
#
# if unit == '\xc2\xb5g/m\xc2\xb3':
# unit = 'ugm3'
#
# return unit
which might include code, classes, or functions. Output only the next line. | data['unit'] = data['unit'].apply(clean_encodings) |
Given snippet: <|code_start|> ax.xaxis.set_major_formatter( dates.DateFormatter("\n%b\n%Y") )
elif dt <= 28.: # less than four weeks
ax.xaxis.set_minor_locator( dates.DayLocator())
ax.xaxis.set_minor_formatter( dates.DateFormatter("%d"))
ax.xaxis.set_major_locator( dates.MonthLocator() )
ax.xaxis.set_major_formatter( dates.DateFormatter("\n%b\n%Y") )
elif dt <= 4 * 30.: # less than four months
ax.xaxis.set_minor_locator( dates.DayLocator( bymonthday=[1, 7, 14, 21] ))
ax.xaxis.set_minor_formatter( dates.DateFormatter("%d"))
ax.xaxis.set_major_locator( dates.MonthLocator())
ax.xaxis.set_major_formatter( dates.DateFormatter("\n%b\n%Y") )
else:
ax.xaxis.set_minor_locator( dates.MonthLocator(interval=2) )
ax.xaxis.set_minor_formatter( dates.DateFormatter("%b"))
ax.xaxis.set_major_locator( dates.MonthLocator(bymonth=[1]) )
ax.xaxis.set_major_formatter( dates.DateFormatter("\n%Y"))
return ax
rc_tsplot = {
'xtick.major.size': 8.,
'xtick.minor.size': 4.,
'xtick.direction': 'out',
'ytick.major.size': 10.,
}
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from .decorators import skipif
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as dates
and context:
# Path: openaq/decorators.py
# def skipif(skipcondition, msg = ""):
# """
# """
# def decorator(f):
# @wraps(f)
# def decorated_function(*args, **kwargs):
# if skipcondition == True:
# raise SkipTest(msg)
#
# return f(*args, **kwargs)
# return decorated_function
# return decorator
which might include code, classes, or functions. Output only the next line. | @skipif(_no_sns) |
Given snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
class PostfinanceProvider(BasePaymentProvider):
"""
The Payment Provider Postfinance.
"""
def __unicode__(self):
return 'Postfinance'
def payment_url(self):
return reverse('zipfelchappe_postfinance_payment')
def collect_pledge(self, pledge):
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from datetime import timedelta
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from ..payment_provider import BasePaymentProvider
from .tasks import process_pledge
from .app_settings import MAX_BLOCKING_DURATION_DAYS
and context:
# Path: zipfelchappe/payment_provider.py
# class BasePaymentProvider(object):
# """
# The abstract base class for all payment providers.
# """
# def __init__(self, name):
# self.name = name
#
# def __unicode__(self):
# return 'Base payment provider'
#
# def payment_url(self):
# """
# :return: The payment URL
# """
# raise NotImplementedError()
#
# # TODO: use this instead of global urls.
# def get_provider_url_patterns(self):
# """
# Return the local url patterns
# :return: A list of url instances
# """
# raise NotImplementedError
#
# def validate_project(self, project, db_instance=None):
# """
# A provider can validate a project.
# :param project: The project instance to validate
# :param db_instance: The project instance in the database
# :raise: ValidationError
# """
# pass
#
# def collect_pledge(self, pledge):
# """
# Collects payment for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def refund_pledge(self, pledge):
# """
# Frees reserved funds for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def collect_billable_payments(self, project):
# """
# Collects billable payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# pledges = project.authorized_pledges.filter(provider=self.name)
# for pledge in pledges:
# try:
# self.collect_pledge(pledge)
# except PaymentProviderException as e:
# logger.info(e.message)
#
# def refund_payments(self, project):
# """
# Refunds reserved payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# raise NotImplementedError()
#
# Path: zipfelchappe/postfinance/tasks.py
# def process_pledge(pledge):
# """ Collect postfinance payment for exactly one pledge """
# try:
# payment = pledge.postfinance_payment
# except Payment.DoesNotExist:
# raise PostfinanceException('Payment for pledge %s not found' % pledge.pk)
#
# if payment.STATUS == '91':
# # payment is in processing state, check status
# result = update_payment(payment.PAYID)
# if result['STATUS'] == '9':
# payment.STATUS = result['STATUS']
# payment.save()
#
# payment.pledge.status = Pledge.PAID
# payment.pledge.save()
# logger.info('Pledge {0} has been paid.'.format(pledge.pk))
# return result
# logger.debug('New status for pledge {0}: {1}:{2}'.format(
# pledge.pk, payment.STATUS, STATUS_DICT[payment.STATUS]
# ))
#
# elif payment.STATUS == '5':
# # Payment is authorized, request transaction
# try:
# result = request_payment(payment.PAYID)
# except Exception as e:
# payment.pledge.mark_failed(e.message)
# raise PostfinanceException(e.message)
#
# if 'STATUS' not in result or result['STATUS'] == '0':
# raise PostfinanceException('Incomplete or invalid status')
# else:
# payment.STATUS = result['STATUS']
# payment.save()
# logger.info('Pledge {0} has been paid. Status:{1}'.format(pledge.pk, result['STATUS']))
#
# return result
# else:
# raise PostfinanceException('Payment is not authorized')
#
# Path: zipfelchappe/postfinance/app_settings.py
# MAX_BLOCKING_DURATION_DAYS = 29 # maximum duration for funds to be blocked.
which might include code, classes, or functions. Output only the next line. | return process_pledge(pledge) |
Continue the code snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
class PostfinanceProvider(BasePaymentProvider):
"""
The Payment Provider Postfinance.
"""
def __unicode__(self):
return 'Postfinance'
def payment_url(self):
return reverse('zipfelchappe_postfinance_payment')
def collect_pledge(self, pledge):
return process_pledge(pledge)
def validate_project(self, project, db_instance=None):
"""
A provider can validate a project.
:param project: The project instance to validate
:param db_instance: The project instance in the database
:raise: ValidationError
"""
if project.start and project.end and \
<|code_end|>
. Use current file imports:
from datetime import timedelta
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from ..payment_provider import BasePaymentProvider
from .tasks import process_pledge
from .app_settings import MAX_BLOCKING_DURATION_DAYS
and context (classes, functions, or code) from other files:
# Path: zipfelchappe/payment_provider.py
# class BasePaymentProvider(object):
# """
# The abstract base class for all payment providers.
# """
# def __init__(self, name):
# self.name = name
#
# def __unicode__(self):
# return 'Base payment provider'
#
# def payment_url(self):
# """
# :return: The payment URL
# """
# raise NotImplementedError()
#
# # TODO: use this instead of global urls.
# def get_provider_url_patterns(self):
# """
# Return the local url patterns
# :return: A list of url instances
# """
# raise NotImplementedError
#
# def validate_project(self, project, db_instance=None):
# """
# A provider can validate a project.
# :param project: The project instance to validate
# :param db_instance: The project instance in the database
# :raise: ValidationError
# """
# pass
#
# def collect_pledge(self, pledge):
# """
# Collects payment for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def refund_pledge(self, pledge):
# """
# Frees reserved funds for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def collect_billable_payments(self, project):
# """
# Collects billable payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# pledges = project.authorized_pledges.filter(provider=self.name)
# for pledge in pledges:
# try:
# self.collect_pledge(pledge)
# except PaymentProviderException as e:
# logger.info(e.message)
#
# def refund_payments(self, project):
# """
# Refunds reserved payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# raise NotImplementedError()
#
# Path: zipfelchappe/postfinance/tasks.py
# def process_pledge(pledge):
# """ Collect postfinance payment for exactly one pledge """
# try:
# payment = pledge.postfinance_payment
# except Payment.DoesNotExist:
# raise PostfinanceException('Payment for pledge %s not found' % pledge.pk)
#
# if payment.STATUS == '91':
# # payment is in processing state, check status
# result = update_payment(payment.PAYID)
# if result['STATUS'] == '9':
# payment.STATUS = result['STATUS']
# payment.save()
#
# payment.pledge.status = Pledge.PAID
# payment.pledge.save()
# logger.info('Pledge {0} has been paid.'.format(pledge.pk))
# return result
# logger.debug('New status for pledge {0}: {1}:{2}'.format(
# pledge.pk, payment.STATUS, STATUS_DICT[payment.STATUS]
# ))
#
# elif payment.STATUS == '5':
# # Payment is authorized, request transaction
# try:
# result = request_payment(payment.PAYID)
# except Exception as e:
# payment.pledge.mark_failed(e.message)
# raise PostfinanceException(e.message)
#
# if 'STATUS' not in result or result['STATUS'] == '0':
# raise PostfinanceException('Incomplete or invalid status')
# else:
# payment.STATUS = result['STATUS']
# payment.save()
# logger.info('Pledge {0} has been paid. Status:{1}'.format(pledge.pk, result['STATUS']))
#
# return result
# else:
# raise PostfinanceException('Payment is not authorized')
#
# Path: zipfelchappe/postfinance/app_settings.py
# MAX_BLOCKING_DURATION_DAYS = 29 # maximum duration for funds to be blocked.
. Output only the next line. | project.end - project.start > timedelta(days=MAX_BLOCKING_DURATION_DAYS): |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class PledgeWorkflowTest(TestCase):
login_url = '/accounts/login/'
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS)
# Fixture Data for following tests
<|code_end|>
with the help of current file imports:
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.tests.utils import skipIfCustomUser
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent, app_reverse
from tests.factories import ProjectFactory, RewardFactory, UserFactory
from zipfelchappe import app_settings
and context from other files:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
, which may contain function names, class names, or code. Output only the next line. | self.project1 = ProjectFactory.create() |
Predict the next line for this snippet: <|code_start|>from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class PledgeWorkflowTest(TestCase):
login_url = '/accounts/login/'
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS)
# Fixture Data for following tests
self.project1 = ProjectFactory.create()
self.project2 = ProjectFactory.create()
self.user = UserFactory.create()
<|code_end|>
with the help of current file imports:
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.tests.utils import skipIfCustomUser
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent, app_reverse
from tests.factories import ProjectFactory, RewardFactory, UserFactory
from zipfelchappe import app_settings
and context from other files:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
, which may contain function names, class names, or code. Output only the next line. | self.reward = RewardFactory.create( |
Continue the code snippet: <|code_start|>from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class PledgeWorkflowTest(TestCase):
login_url = '/accounts/login/'
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS)
# Fixture Data for following tests
self.project1 = ProjectFactory.create()
self.project2 = ProjectFactory.create()
<|code_end|>
. Use current file imports:
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.tests.utils import skipIfCustomUser
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent, app_reverse
from tests.factories import ProjectFactory, RewardFactory, UserFactory
from zipfelchappe import app_settings
and context (classes, functions, or code) from other files:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
. Output only the next line. | self.user = UserFactory.create() |
Next line prediction: <|code_start|>from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class PledgeWorkflowTest(TestCase):
login_url = '/accounts/login/'
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
<|code_end|>
. Use current file imports:
(from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.tests.utils import skipIfCustomUser
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent, app_reverse
from tests.factories import ProjectFactory, RewardFactory, UserFactory
from zipfelchappe import app_settings)
and context including class names, function names, or small code snippets from other files:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
. Output only the next line. | ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS) |
Given the following code snippet before the placeholder: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
class PaypalProvider(BasePaymentProvider):
"""
The Payment Provider Postfinance.
"""
def __unicode__(self):
return 'Paypal'
def payment_url(self):
return reverse('zipfelchappe_paypal_payment')
def collect_pledge(self, pledge):
<|code_end|>
, predict the next line using imports from the current file:
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from ..payment_provider import BasePaymentProvider
from .tasks import process_pledge
from .app_settings import MAXIMUM_ALLOWED_REWARD
and context including class names, function names, and sometimes code from other files:
# Path: zipfelchappe/payment_provider.py
# class BasePaymentProvider(object):
# """
# The abstract base class for all payment providers.
# """
# def __init__(self, name):
# self.name = name
#
# def __unicode__(self):
# return 'Base payment provider'
#
# def payment_url(self):
# """
# :return: The payment URL
# """
# raise NotImplementedError()
#
# # TODO: use this instead of global urls.
# def get_provider_url_patterns(self):
# """
# Return the local url patterns
# :return: A list of url instances
# """
# raise NotImplementedError
#
# def validate_project(self, project, db_instance=None):
# """
# A provider can validate a project.
# :param project: The project instance to validate
# :param db_instance: The project instance in the database
# :raise: ValidationError
# """
# pass
#
# def collect_pledge(self, pledge):
# """
# Collects payment for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def refund_pledge(self, pledge):
# """
# Frees reserved funds for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def collect_billable_payments(self, project):
# """
# Collects billable payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# pledges = project.authorized_pledges.filter(provider=self.name)
# for pledge in pledges:
# try:
# self.collect_pledge(pledge)
# except PaymentProviderException as e:
# logger.info(e.message)
#
# def refund_payments(self, project):
# """
# Refunds reserved payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# raise NotImplementedError()
#
# Path: zipfelchappe/paypal/tasks.py
# def process_pledge(pledge):
# """
# Collect the paypal payment of one pledge
#
# Payments are asynchronous and we'll get notified about payment status by
# paypal via IPN Messages.
# """
#
# try:
# preapproval = pledge.paypal_preapproval
# except Preapproval.DoesNotExist:
# raise PaypalException('No preapproval for this pledge found')
#
# # All seems ok, try to execute paypal payment
# pp_payment = create_payment(preapproval)
# pp_data = pp_payment.json()
#
# Payment.objects.create(
# key=pp_data.get('payKey', 'ERROR_%s' % preapproval.key[:14]),
# preapproval=preapproval,
# status=pp_data.get('paymentExecStatus', 'ERROR'),
# data=json.dumps(pp_data, indent=2),
# )
#
# if pp_data and 'error' in pp_data:
# # Mark pledge as FAILED after 3 retries
# if preapproval.payments.count() >= 3:
# pledge.status = Pledge.FAILED
# pledge.save()
# for error in pp_data['error']:
# raise PaypalException(error['message'])
#
# return pp_data
#
# Path: zipfelchappe/paypal/app_settings.py
# MAXIMUM_ALLOWED_REWARD = 100 # US$
. Output only the next line. | return process_pledge(pledge) |
Predict the next line after this snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
class PaypalProvider(BasePaymentProvider):
"""
The Payment Provider Postfinance.
"""
def __unicode__(self):
return 'Paypal'
def payment_url(self):
return reverse('zipfelchappe_paypal_payment')
def collect_pledge(self, pledge):
return process_pledge(pledge)
def validate_project(self, project, db_instance=None):
"""
A provider can validate a project.
:param project: The project instance to validate
:param db_instance: The project instance in the database
:raise: ValidationError
"""
rewards = project.rewards.all()
if len(rewards) > 0:
for reward in rewards:
<|code_end|>
using the current file's imports:
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from ..payment_provider import BasePaymentProvider
from .tasks import process_pledge
from .app_settings import MAXIMUM_ALLOWED_REWARD
and any relevant context from other files:
# Path: zipfelchappe/payment_provider.py
# class BasePaymentProvider(object):
# """
# The abstract base class for all payment providers.
# """
# def __init__(self, name):
# self.name = name
#
# def __unicode__(self):
# return 'Base payment provider'
#
# def payment_url(self):
# """
# :return: The payment URL
# """
# raise NotImplementedError()
#
# # TODO: use this instead of global urls.
# def get_provider_url_patterns(self):
# """
# Return the local url patterns
# :return: A list of url instances
# """
# raise NotImplementedError
#
# def validate_project(self, project, db_instance=None):
# """
# A provider can validate a project.
# :param project: The project instance to validate
# :param db_instance: The project instance in the database
# :raise: ValidationError
# """
# pass
#
# def collect_pledge(self, pledge):
# """
# Collects payment for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def refund_pledge(self, pledge):
# """
# Frees reserved funds for the given pledge.
# :param pledge: An authorized pledge.
# """
# raise NotImplementedError()
#
# def collect_billable_payments(self, project):
# """
# Collects billable payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# pledges = project.authorized_pledges.filter(provider=self.name)
# for pledge in pledges:
# try:
# self.collect_pledge(pledge)
# except PaymentProviderException as e:
# logger.info(e.message)
#
# def refund_payments(self, project):
# """
# Refunds reserved payments for the given project.
# :param project: The project to collect payments for.
# :return: The amount of processed pledges.
# """
# raise NotImplementedError()
#
# Path: zipfelchappe/paypal/tasks.py
# def process_pledge(pledge):
# """
# Collect the paypal payment of one pledge
#
# Payments are asynchronous and we'll get notified about payment status by
# paypal via IPN Messages.
# """
#
# try:
# preapproval = pledge.paypal_preapproval
# except Preapproval.DoesNotExist:
# raise PaypalException('No preapproval for this pledge found')
#
# # All seems ok, try to execute paypal payment
# pp_payment = create_payment(preapproval)
# pp_data = pp_payment.json()
#
# Payment.objects.create(
# key=pp_data.get('payKey', 'ERROR_%s' % preapproval.key[:14]),
# preapproval=preapproval,
# status=pp_data.get('paymentExecStatus', 'ERROR'),
# data=json.dumps(pp_data, indent=2),
# )
#
# if pp_data and 'error' in pp_data:
# # Mark pledge as FAILED after 3 retries
# if preapproval.payments.count() >= 3:
# pledge.status = Pledge.FAILED
# pledge.save()
# for error in pp_data['error']:
# raise PaypalException(error['message'])
#
# return pp_data
#
# Path: zipfelchappe/paypal/app_settings.py
# MAXIMUM_ALLOWED_REWARD = 100 # US$
. Output only the next line. | if reward.minimum > MAXIMUM_ALLOWED_REWARD: |
Here is a snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
class CodPaymentFactory(factory.DjangoModelFactory):
class Meta:
<|code_end|>
. Write the next line using the current file imports:
import factory
from zipfelchappe.cod.models import CodPayment
and context from other files:
# Path: zipfelchappe/cod/models.py
# class CodPayment(CreateUpdateModel):
# pledge = models.OneToOneField(
# 'zipfelchappe.Pledge', related_name='cod_payment')
#
# payment_received = models.DateField(_('payment received'), null=True)
#
# payment_slip_requested = models.BooleanField(
# _('payment slip requested'), default=False, blank=True)
# payment_slip_sent = models.BooleanField(
# _('payment slip sent'), default=False, blank=True)
#
# payment_slip_first_name = models.CharField(
# _('first name'), max_length=50, blank=True)
# payment_slip_last_name = models.CharField(
# _('last name'), max_length=50, blank=True)
# payment_slip_address = models.CharField(
# _('address'), max_length=200, blank=True)
# payment_slip_zip_code = models.CharField(
# _('zip code'), max_length=20, blank=True)
# payment_slip_city = models.CharField(
# _('city'), max_length=50, blank=True)
#
# class Meta:
# verbose_name = _('cod payment')
# verbose_name_plural = _('cod payments')
# app_label = 'zipfelchappe'
# ordering = ['modified']
#
# def __unicode__(self):
# return '{0} {1}'.format(
# self.pledge.backer.first_name, self.pledge.backer.last_name)
, which may include functions, classes, or code. Output only the next line. | model = CodPayment |
Predict the next line after this snippet: <|code_start|>
class PaymentAdmin(admin.ModelAdmin):
def status_text(self, obj):
return obj.status_text()
list_display = ('order_id', 'amount', 'currency', 'status_text', 'updated')
list_filter = ('pledge__project',)
readonly_fields = (
'amount',
'currency',
'PAYID',
'STATUS',
'status_text',
'PM',
'ACCEPTANCE',
'CARDNO',
'BRAND',
)
<|code_end|>
using the current file's imports:
from django.contrib import admin
from .models import Payment
and any relevant context from other files:
# Path: zipfelchappe/postfinance/models.py
# class Payment(models.Model):
#
# order_id = models.CharField(_('order id'), max_length=100)
# pledge = models.OneToOneField('zipfelchappe.Pledge',
# related_name='postfinance_payment')
#
# created = models.DateTimeField(auto_now_add=True)
# updated = models.DateTimeField(auto_now=True)
#
# amount = models.CharField(_('amount'), max_length=20, blank=True)
# currency = models.CharField(_('currency'), max_length=10, blank=True)
#
# PAYID = models.CharField(max_length=100, blank=True)
# STATUS = models.CharField(max_length=100, blank=True)
#
# PM = models.CharField(max_length=100, blank=True)
# ACCEPTANCE = models.CharField(max_length=100, blank=True)
# CARDNO = models.CharField(max_length=100, blank=True)
# BRAND = models.CharField(max_length=100, blank=True)
#
# class Meta:
# verbose_name = _('payment')
# verbose_name_plural = _('payments')
#
# def __unicode__(self):
# return self.order_id
#
# def status_text(self):
# return STATUS_DICT.get(self.STATUS, _('unknown'))
#
# def get_amount_cents(self):
# return int(self.amount)
. Output only the next line. | admin.site.register(Payment, PaymentAdmin) |
Predict the next line for this snippet: <|code_start|>
class Command(BaseCommand):
help = 'Collect all paypal payments for finished projects (cronjob)'
def handle(self, *args, **options):
<|code_end|>
with the help of current file imports:
from django.core.management.base import BaseCommand
from zipfelchappe.paypal.tasks import process_payments
and context from other files:
# Path: zipfelchappe/paypal/tasks.py
# def process_payments():
# """
# Collects the paypal payments for all successfully financed projects
# that end within the next 24 hours.
# """
#
# billable_projects = Project.objects.billable()
#
# # Pledges that are ready to be payed
# processing_pledges = Pledge.objects.filter(
# project__in=billable_projects,
# provider='paypal',
# status=Pledge.AUTHORIZED,
# paypal_preapproval__status='ACTIVE',
# paypal_preapproval__approved=True,
# )
#
# for pledge in processing_pledges:
# try:
# process_pledge(pledge)
# except PaypalException as e:
# pledge.status = pledge.FAILED
# pledge.save()
# # TODO: use logger
# print e.message
#
# return processing_pledges.count()
, which may contain function names, class names, or code. Output only the next line. | pledges_processed = process_payments() |
Given the code snippet: <|code_start|>
def render_mail(template, context):
""" helper to load subject and content from template """
subject = render_to_string('zipfelchappe/emails/%s_subject.txt' % template,
context).strip()
message = render_to_string('zipfelchappe/emails/%s_message.txt' % template,
context)
return subject, message
def send_pledge_completed_message(request, pledge, mail_template=None):
""" Send message after backer successfully pledged to a project """
# Try to get template from project if not explictly passed
if mail_template is None:
try:
<|code_end|>
, generate the next line using the imports in this file:
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template import Context, Template
from django.template.loader import render_to_string
from .models import MailTemplate
and context (functions, classes, or occasionally code) from other files:
# Path: zipfelchappe/models.py
# class MailTemplate(CreateUpdateModel, TranslatedMixin):
# """ Content override of mails that are sent on specific actions """
#
# ACTION_THANKYOU = 'thankyou'
#
# ACTION_CHOICES = (
# (ACTION_THANKYOU, _('Thank you')),
# )
#
# project = models.ForeignKey('Project', related_name='mail_templates')
#
# action = models.CharField(_('action'), max_length=30,
# choices=ACTION_CHOICES, default=ACTION_THANKYOU)
#
# subject = models.CharField(_('subject'), max_length=200)
#
# template = models.TextField(_('template')) # no richtext here
#
# class Meta:
# verbose_name = _('email')
# verbose_name_plural = _('emails')
# unique_together = (('project', 'action'),)
#
# def __unicode__(self):
# return '%s mail for %s' % (self.action, self.project)
. Output only the next line. | mail_template = MailTemplate.objects.get( |
Predict the next line after this snippet: <|code_start|>from __future__ import unicode_literals, absolute_import
class PreapprovalAdmin(admin.ModelAdmin):
list_display = ('pledge', 'key', 'amount', 'status', 'approved', 'sender')
list_filter = ('pledge__project', 'approved')
readonly_fields = ('created', 'modified')
search_fields = ('key', 'sender')
def has_add_permission(self, request):
return False
class PaymentAdmin(admin.ModelAdmin):
list_display = ('key', 'status')
list_filter = ('preapproval__pledge__project', 'status')
search_fields = ('key', 'preapproval_key')
readonly_fields = ('key', 'preapproval', 'status', 'data')
def has_add_permission(self, request):
return False
<|code_end|>
using the current file's imports:
from django.contrib import admin
from .models import Preapproval, Payment
and any relevant context from other files:
# Path: zipfelchappe/paypal/models.py
# class Preapproval(CreateUpdateModel):
#
# pledge = models.OneToOneField('zipfelchappe.Pledge',
# related_name='paypal_preapproval')
#
# key = models.CharField(_('key'), unique=True, db_index=True, max_length=20)
#
# amount = CurrencyField(_('amount'), max_digits=10, decimal_places=2)
#
# status = models.CharField(_('status'), max_length=20, blank=True, null=True)
#
# approved = models.BooleanField(_('approved'), default=False)
#
# sender = models.EmailField(_('sender'), blank=True, null=True)
#
# data = models.TextField(_('data'), blank=True)
#
# class Meta:
# verbose_name = _('preapproval')
# verbose_name_plural = _('preapprovals')
#
# def __unicode__(self):
# return self.key
#
# class Payment(CreateUpdateModel):
#
# # These are some know statuses for payments
# CREATED = 'CREATED'
# COMPLETED = 'COMPLETED'
# INCOMPLETE = 'INCOMPLETE'
# ERROR = 'ERROR'
# REVERSALERROR = 'REVERSALERROR'
# PROCESSING = 'PROCESSING'
# PENDING = 'PENDING'
#
# key = models.CharField(_('key'), max_length=20, blank=True)
#
# preapproval = models.ForeignKey('Preapproval', related_name='payments')
#
# status = models.CharField(_('status'), max_length=20, blank=True, null=True)
#
# data = models.TextField(_('data'), blank=True)
#
# class Meta:
# verbose_name = _('payment')
# verbose_name_plural = _('payments')
#
# def __unicode__(self):
# return self.key
. Output only the next line. | admin.site.register(Preapproval, PreapprovalAdmin) |
Based on the snippet: <|code_start|>from __future__ import unicode_literals, absolute_import
class PreapprovalAdmin(admin.ModelAdmin):
list_display = ('pledge', 'key', 'amount', 'status', 'approved', 'sender')
list_filter = ('pledge__project', 'approved')
readonly_fields = ('created', 'modified')
search_fields = ('key', 'sender')
def has_add_permission(self, request):
return False
class PaymentAdmin(admin.ModelAdmin):
list_display = ('key', 'status')
list_filter = ('preapproval__pledge__project', 'status')
search_fields = ('key', 'preapproval_key')
readonly_fields = ('key', 'preapproval', 'status', 'data')
def has_add_permission(self, request):
return False
admin.site.register(Preapproval, PreapprovalAdmin)
<|code_end|>
, predict the immediate next line with the help of imports:
from django.contrib import admin
from .models import Preapproval, Payment
and context (classes, functions, sometimes code) from other files:
# Path: zipfelchappe/paypal/models.py
# class Preapproval(CreateUpdateModel):
#
# pledge = models.OneToOneField('zipfelchappe.Pledge',
# related_name='paypal_preapproval')
#
# key = models.CharField(_('key'), unique=True, db_index=True, max_length=20)
#
# amount = CurrencyField(_('amount'), max_digits=10, decimal_places=2)
#
# status = models.CharField(_('status'), max_length=20, blank=True, null=True)
#
# approved = models.BooleanField(_('approved'), default=False)
#
# sender = models.EmailField(_('sender'), blank=True, null=True)
#
# data = models.TextField(_('data'), blank=True)
#
# class Meta:
# verbose_name = _('preapproval')
# verbose_name_plural = _('preapprovals')
#
# def __unicode__(self):
# return self.key
#
# class Payment(CreateUpdateModel):
#
# # These are some know statuses for payments
# CREATED = 'CREATED'
# COMPLETED = 'COMPLETED'
# INCOMPLETE = 'INCOMPLETE'
# ERROR = 'ERROR'
# REVERSALERROR = 'REVERSALERROR'
# PROCESSING = 'PROCESSING'
# PENDING = 'PENDING'
#
# key = models.CharField(_('key'), max_length=20, blank=True)
#
# preapproval = models.ForeignKey('Preapproval', related_name='payments')
#
# status = models.CharField(_('status'), max_length=20, blank=True, null=True)
#
# data = models.TextField(_('data'), blank=True)
#
# class Meta:
# verbose_name = _('payment')
# verbose_name_plural = _('payments')
#
# def __unicode__(self):
# return self.key
. Output only the next line. | admin.site.register(Payment, PaymentAdmin) |
Continue the code snippet: <|code_start|> return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def get_object_or_none(klass, *args, **kwargs):
"""
Modelled after get_object_or_404
"""
if isinstance(klass, models.query.QuerySet):
queryset = klass
elif isinstance(klass, models.manager.Manager):
queryset = klass.all()
else:
queryset = klass._default_manager.all()
try:
return queryset.get(*args, **kwargs)
except (queryset.model.DoesNotExist, ValueError):
return None
def get_user_search_fields():
''' Get names of searchable fields on user model
Due to custom user models the search_fields values for fields on the
user model can't be set statically. This model returns the available
fields on the model which can be used to dynamically generate
search_fields values
'''
# get field names for user fields
<|code_end|>
. Use current file imports:
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from .app_settings import (
USER_EMAIL_FIELD, USER_FIRST_NAME_FIELD, USER_LAST_NAME_FIELD
)
and context (classes, functions, or code) from other files:
# Path: zipfelchappe/app_settings.py
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
#
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
#
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
. Output only the next line. | fields = [USER_EMAIL_FIELD, USER_FIRST_NAME_FIELD, USER_LAST_NAME_FIELD] |
Based on the snippet: <|code_start|> return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def get_object_or_none(klass, *args, **kwargs):
"""
Modelled after get_object_or_404
"""
if isinstance(klass, models.query.QuerySet):
queryset = klass
elif isinstance(klass, models.manager.Manager):
queryset = klass.all()
else:
queryset = klass._default_manager.all()
try:
return queryset.get(*args, **kwargs)
except (queryset.model.DoesNotExist, ValueError):
return None
def get_user_search_fields():
''' Get names of searchable fields on user model
Due to custom user models the search_fields values for fields on the
user model can't be set statically. This model returns the available
fields on the model which can be used to dynamically generate
search_fields values
'''
# get field names for user fields
<|code_end|>
, predict the immediate next line with the help of imports:
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from .app_settings import (
USER_EMAIL_FIELD, USER_FIRST_NAME_FIELD, USER_LAST_NAME_FIELD
)
and context (classes, functions, sometimes code) from other files:
# Path: zipfelchappe/app_settings.py
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
#
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
#
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
. Output only the next line. | fields = [USER_EMAIL_FIELD, USER_FIRST_NAME_FIELD, USER_LAST_NAME_FIELD] |
Here is a snippet: <|code_start|> return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def get_object_or_none(klass, *args, **kwargs):
"""
Modelled after get_object_or_404
"""
if isinstance(klass, models.query.QuerySet):
queryset = klass
elif isinstance(klass, models.manager.Manager):
queryset = klass.all()
else:
queryset = klass._default_manager.all()
try:
return queryset.get(*args, **kwargs)
except (queryset.model.DoesNotExist, ValueError):
return None
def get_user_search_fields():
''' Get names of searchable fields on user model
Due to custom user models the search_fields values for fields on the
user model can't be set statically. This model returns the available
fields on the model which can be used to dynamically generate
search_fields values
'''
# get field names for user fields
<|code_end|>
. Write the next line using the current file imports:
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from .app_settings import (
USER_EMAIL_FIELD, USER_FIRST_NAME_FIELD, USER_LAST_NAME_FIELD
)
and context from other files:
# Path: zipfelchappe/app_settings.py
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
#
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
#
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
, which may include functions, classes, or code. Output only the next line. | fields = [USER_EMAIL_FIELD, USER_FIRST_NAME_FIELD, USER_LAST_NAME_FIELD] |
Predict the next line after this snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class AdminViewsTest(TestCase):
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS)
# Fixture Data for following tests
<|code_end|>
using the current file's imports:
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent
from django.utils.translation import ugettext as _
from tests.factories import ProjectFactory, RewardFactory, PledgeFactory, UserFactory
from zipfelchappe import app_settings
and any relevant context from other files:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class PledgeFactory(factory.DjangoModelFactory):
# status = Pledge.AUTHORIZED
# backer = factory.SubFactory(BackerFactory)
#
# class Meta:
# model = Pledge
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
. Output only the next line. | self.project1 = ProjectFactory.create() |
Given snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class AdminViewsTest(TestCase):
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS)
# Fixture Data for following tests
self.project1 = ProjectFactory.create()
self.project2 = ProjectFactory.create()
self.user = UserFactory.create()
self.admin = UserFactory.create(is_superuser=True, is_staff=True)
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent
from django.utils.translation import ugettext as _
from tests.factories import ProjectFactory, RewardFactory, PledgeFactory, UserFactory
from zipfelchappe import app_settings
and context:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class PledgeFactory(factory.DjangoModelFactory):
# status = Pledge.AUTHORIZED
# backer = factory.SubFactory(BackerFactory)
#
# class Meta:
# model = Pledge
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
which might include code, classes, or functions. Output only the next line. | self.reward = RewardFactory.create( |
Using the snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class AdminViewsTest(TestCase):
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS)
# Fixture Data for following tests
self.project1 = ProjectFactory.create()
self.project2 = ProjectFactory.create()
self.user = UserFactory.create()
self.admin = UserFactory.create(is_superuser=True, is_staff=True)
self.reward = RewardFactory.create(
project=self.project1,
minimum=20.00,
quantity=1
)
# Fresh Client for every test
self.client = Client()
def test_collect_pledges(self):
<|code_end|>
, determine the next line of code. You have imports:
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent
from django.utils.translation import ugettext as _
from tests.factories import ProjectFactory, RewardFactory, PledgeFactory, UserFactory
from zipfelchappe import app_settings
and context (class names, function names, or code) available:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class PledgeFactory(factory.DjangoModelFactory):
# status = Pledge.AUTHORIZED
# backer = factory.SubFactory(BackerFactory)
#
# class Meta:
# model = Pledge
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
. Output only the next line. | pledge1 = PledgeFactory.create( |
Predict the next line after this snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class AdminViewsTest(TestCase):
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS)
# Fixture Data for following tests
self.project1 = ProjectFactory.create()
self.project2 = ProjectFactory.create()
<|code_end|>
using the current file's imports:
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent
from django.utils.translation import ugettext as _
from tests.factories import ProjectFactory, RewardFactory, PledgeFactory, UserFactory
from zipfelchappe import app_settings
and any relevant context from other files:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class PledgeFactory(factory.DjangoModelFactory):
# status = Pledge.AUTHORIZED
# backer = factory.SubFactory(BackerFactory)
#
# class Meta:
# model = Pledge
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
. Output only the next line. | self.user = UserFactory.create() |
Here is a snippet: <|code_start|># coding: utf-8
from __future__ import absolute_import, unicode_literals
@skipIfCustomUser
class AdminViewsTest(TestCase):
def setUp(self):
# feincms page containing zipfelchappe app content
self.page = Page.objects.create(title='Projects', slug='projects')
ct = self.page.content_type_for(ApplicationContent)
<|code_end|>
. Write the next line using the current file imports:
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.urlresolvers import reverse
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from feincms.module.page.models import Page
from feincms.content.application.models import ApplicationContent
from django.utils.translation import ugettext as _
from tests.factories import ProjectFactory, RewardFactory, PledgeFactory, UserFactory
from zipfelchappe import app_settings
and context from other files:
# Path: tests/factories.py
# class ProjectFactory(factory.DjangoModelFactory):
# title = factory.Sequence(lambda n: 'Testproject %s' % n)
# slug = factory.Sequence(lambda n: 'test%s' % n)
# goal = 200.00
# currency = 'CHF'
# start = timezone.now()
# end = timezone.now() + timedelta(days=1)
#
# class Meta:
# model = Project
#
# class RewardFactory(factory.DjangoModelFactory):
# description = factory.Sequence(lambda n: 'testreward%s' % n)
#
# class Meta:
# model = Reward
#
# class PledgeFactory(factory.DjangoModelFactory):
# status = Pledge.AUTHORIZED
# backer = factory.SubFactory(BackerFactory)
#
# class Meta:
# model = Pledge
#
# class UserFactory(factory.DjangoModelFactory):
# username = factory.Sequence(lambda n: 'user%s' % n)
# email = factory.Sequence(lambda n: 'user%s@example.org' % n)
# is_active = True
# is_superuser = False
# is_staff = False
# # password 'test'
# password = 'pbkdf2_sha256$10000$s9Ed0KfEQgTY$CsbbUpXaWk+8eAB+Oga2hBqD82kU4vl+QQaqr/wCZXY='
# first_name = 'Hans'
# last_name = 'Muster'
#
# class Meta:
# model = get_user_model()
#
# Path: zipfelchappe/app_settings.py
# CURRENCIES = getattr(settings, 'ZIPFELCHAPPE_CURRENCIES', ('CHF', 'EUR', 'USD'))
# DISQUS_SHORTNAME = getattr(settings, 'ZIPFELCHAPPE_DISQUS_SHORTNAME', None)
# PAGINATE_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BY', 10)
# PAGINATE_BACKERS_BY = getattr(settings, 'ZIPFELCHAPPE_PAGINATE_BACKERS', 25)
# ALLOW_ANONYMOUS_PLEDGES = getattr(settings, 'ZIPFELCHAPPE_ALLOW_ANONYMOUS_PLEDGES', True)
# BACKER_PROFILE = getattr(settings, 'ZIPFELCHAPPE_BACKER_PROFILE', None)
# ROOT_URLS = getattr(settings, 'ZIPFELCHAPPE_URLS', 'zipfelchappe.urls')
# DEFAULT_IMAGE_URL = getattr(settings, 'ZIPFELCHAPPE_PLACEHOLDER_IMAGE_URL', '')
# USER_FIRST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_FIRST_NAME_FIELD', 'first_name')
# USER_LAST_NAME_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_LAST_NAME_FIELD', 'last_name')
# USER_EMAIL_FIELD = getattr(
# settings, 'ZIPFELCHAPPE_USER_EMAIL_FIELD', 'email')
# MAX_PROJECT_DURATION_DAYS = getattr(settings, 'ZIPFELCHAPPE_MAX_PROJECT_DURATION', 120)
# TERMS_URL = settings.ZIPFELCHAPPE_TERMS_URL
# MANAGERS = getattr(settings, 'ZIPFELCHAPPE_MANAGERS', settings.MANAGERS)
, which may include functions, classes, or code. Output only the next line. | ct.objects.create(parent=self.page, urlconf_path=app_settings.ROOT_URLS) |
Given the code snippet: <|code_start|>
urlpatterns = [
url(r'^$', PaymentView.as_view(), name='zipfelchappe_cod_payment'),
url(r'^request-payment-slip/$', RequestPaymentSlipView.as_view(),
name='zipfelchappe_cod_payment_slip'),
<|code_end|>
, generate the next line using the imports in this file:
from django.conf.urls import url
from .views import PaymentView, RequestPaymentSlipView, PaymentSlipRequestRecievedView
and context (functions, classes, or occasionally code) from other files:
# Path: zipfelchappe/cod/views.py
# class PaymentView(PledgeRequiredMixin, View):
# def get(self, request, *args, **kwargs):
# # update pledge status and create payment object
# self.pledge.status = Pledge.AUTHORIZED
# self.pledge.save()
# payment = CodPayment.objects.create(
# pledge=self.pledge,
# payment_slip_first_name=self.pledge.backer.first_name,
# payment_slip_last_name=self.pledge.backer.last_name
# )
#
# # send mail with payment info
# self.send_info_mail(request)
#
# # remove pledge id from session
# del self.request.session['pledge_id']
#
# initial_data = {'first_name': self.pledge.backer.first_name,
# 'last_name': self.pledge.backer.last_name,
# 'address': getattr(request.user, 'address', ''),
# 'zip_code': getattr(request.user, 'zip_code', ''),
# 'city': getattr(request.user, 'city', ''),
# 'payment': payment.pk} # TODO: use UUID
#
# form = RequestPaymentSlipForm(initial=initial_data)
#
# # return confirmation
# return render(request, 'zipfelchappe/cod/confirmation.html', {
# 'request_payment_slip_form': form,
# 'pledge': self.pledge
# })
#
# def send_info_mail(self, request):
# # send mail with payment info
# context = Context({'request': request, 'pledge': self.pledge})
# subject, message = render_mail('cod_wiretransfer', context)
#
# send_mail(
# subject, message, settings.DEFAULT_FROM_EMAIL,
# [self.pledge.backer.email], fail_silently=False)
#
# class RequestPaymentSlipView(View):
# """
# This view stores the customers address information and sends a mail to the project owner.
# """
# def post(self, request, *args, **kwargs):
# form = RequestPaymentSlipForm(request.POST)
#
# if form.is_valid():
# # update payment object
# payment = form.cleaned_data['payment']
# payment.payment_slip_requested = True
# payment.payment_slip_first_name = form.cleaned_data.get(
# 'first_name')
# payment.payment_slip_last_name = form.cleaned_data.get('last_name')
# payment.payment_slip_address = form.cleaned_data.get('address')
# payment.payment_slip_zip_code = form.cleaned_data.get('zip_code')
# payment.payment_slip_city = form.cleaned_data.get('city')
# payment.save()
# # store the address in the user profile if the profile is empty.
# if request.user.is_authenticated():
# if hasattr(request.user, 'address') and hasattr(request.user, 'zip_code') \
# and hasattr(request.user, 'city') and request.user.address == '':
# request.user.address = payment.payment_slip_address
# request.user.zip_code = payment.payment_slip_zip_code
# request.user.city = payment.payment_slip_city
# request.user.save()
#
# try:
# self.send_info_mail(payment)
# except IOError:
# logger.exception('Failed sending email to MANAGERS.')
#
# # show confirmation
# return redirect('zipfelchappe_cod_request_received')
# else:
# return render(
# request, 'zipfelchappe/cod/request_payment_slip_form.html',
# {'form': form}
# )
#
# def send_info_mail(self, payment):
# context = Context({'payment': payment})
# subject, message = render_mail('cod_payment_slip', context)
# receivers = [r[1] for r in MANAGERS]
# send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, receivers, fail_silently=False)
#
# class PaymentSlipRequestRecievedView(TemplateView):
# template_name = 'zipfelchappe/cod/payment_slip_confirmation.html'
. Output only the next line. | url(r'^request-payment-slip/ok/$', PaymentSlipRequestRecievedView.as_view(), |
Given snippet: <|code_start|>
def test_camel_case_transform() -> None:
tx = CamelCaseTransform()
assert tx.transform("one") == "one"
assert tx.transform("one_two_three") == "oneTwoThree"
def test_camel_case_to_underscore_transform() -> None:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from rest_framework_json_schema.transforms import (
CamelCaseTransform,
CamelCaseToUnderscoreTransform,
)
and context:
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# class CamelCaseToUnderscoreTransform(Transform):
# """Transform camelCase to snake_underscore_case."""
#
# def transform(self, name: str) -> str:
# """Transform camelCase to snake_underscore_case."""
# words: List[str] = []
# last = 0
#
# for i, c in enumerate(name):
# if c[0].isupper():
# # Start of a new word
# words.append(name[last].lower() + name[last + 1 : i])
# last = i
# # Add the last word
# words.append(name[last].lower() + name[last + 1 :])
#
# return "_".join(words)
which might include code, classes, or functions. Output only the next line. | tx = CamelCaseToUnderscoreTransform() |
Using the snippet: <|code_start|>"""Pagination serializers determine the structure for paginated responses."""
class JSONAPILimitOffsetPagination(LimitOffsetPagination):
"""Implement JSON API limit/offset pagination."""
def get_paginated_response(self, data: ReturnList) -> Response:
"""
Return the paginated response.
Place the links under the correct location to be used by the
JSONAPIRenderer to include in the output payload.
https://jsonapi.org/format/#fetching-pagination
"""
return Response(
<|code_end|>
, determine the next line of code. You have imports:
from collections import OrderedDict
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.utils.serializer_helpers import ReturnList
from .helpers import JSONReturnList
and context (class names, function names, or code) available:
# Path: rest_framework_json_schema/helpers.py
# class JSONReturnList(ReturnList):
# """Extend a DRF ReturnList to include meta and links."""
#
# def __init__(self, *args: Any, **kwargs: Any) -> None:
# """Create a return list."""
# self.meta = kwargs.pop("meta", None)
# self.links = kwargs.pop("links", None)
# super().__init__(*args, **kwargs)
. Output only the next line. | JSONReturnList( |
Given snippet: <|code_start|>
def test_filter_no_transform() -> None:
"""The transform works with no filter."""
result = get_query_filters(
{"filter[name]": "John", "filter[lastName]": "Coltrane", "limit": 50}
)
assert result == {"name": "John", "lastName": "Coltrane"}
def test_filter_transform() -> None:
"""Given a transform, the filter applies correctly."""
result = get_query_filters(
{"filter[name]": "John", "filter[lastName]": "Coltrane", "limit": 50},
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from rest_framework_json_schema.filters import get_query_filters
from rest_framework_json_schema.transforms import CamelCaseToUnderscoreTransform
and context:
# Path: rest_framework_json_schema/filters.py
# def get_query_filters(
# params: Dict[str, Any], transformer: Optional[Transform] = None
# ) -> Dict[str, Any]:
# """
# Parse JSON API filter query parameters and apply an optional transformation.
#
# https://jsonapi.org/format/#fetching-filtering
# """
#
# result = {}
# transformer = transformer or NullTransform()
#
# for key, value in params.items():
# m = FILTER.match(key)
# if m:
# result[transformer.transform(m.group(1))] = value
#
# return result
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseToUnderscoreTransform(Transform):
# """Transform camelCase to snake_underscore_case."""
#
# def transform(self, name: str) -> str:
# """Transform camelCase to snake_underscore_case."""
# words: List[str] = []
# last = 0
#
# for i, c in enumerate(name):
# if c[0].isupper():
# # Start of a new word
# words.append(name[last].lower() + name[last + 1 : i])
# last = i
# # Add the last word
# words.append(name[last].lower() + name[last + 1 :])
#
# return "_".join(words)
which might include code, classes, or functions. Output only the next line. | CamelCaseToUnderscoreTransform(), |
Given the code snippet: <|code_start|>
@mark_urls
def test_accept_list(factory: APIRequestFactory) -> None:
"""The negotiator filters application/vnd.api+json with params, but not any non-vnd.api+json"""
negotiator = JSONAPIContentNegotiation()
request = factory.get(
reverse("artist-list"), HTTP_ACCEPT="application/vnd.api+json"
)
accept_list = negotiator.get_accept_list(request)
assert accept_list == ["application/vnd.api+json"]
accept = (
"text/html,application/vnd.api+json;indent=4,application/xml;q=0.9,*/*;q=0.8"
)
request = factory.get(reverse("artist-list"), HTTP_ACCEPT=accept)
accept_list = negotiator.get_accept_list(request)
assert accept_list == ["text/html", "application/xml;q=0.9", "*/*;q=0.8"]
@mark_urls
def test_media_params(factory: APIRequestFactory) -> None:
"""
Support correct media params.
Servers MUST respond with a 406 Not Acceptable status code if a request's Accept header
contains the JSON API media type and all instances of that media type are
modified with media type parameters.
"""
<|code_end|>
, generate the next line using the imports in this file:
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.negotiation import JSONAPIContentNegotiation
from tests.support.decorators import mark_urls
from tests.support.views import ArtistViewSet
and context (functions, classes, or occasionally code) from other files:
# Path: rest_framework_json_schema/negotiation.py
# class JSONAPIContentNegotiation(DefaultContentNegotiation):
# """
# Implement the following two MUSTs in the JSON API.
#
# Servers MUST respond with a 415 Unsupported Media Type status code if a
# request specifies the header Content-Type: application/vnd.api+json
# with any media type parameters.
#
# Servers MUST respond with a 406 Not Acceptable status code if a request's
# Accept header contains the JSON API media type and all instances of that
# media type are modified with media type parameters.
#
# https://jsonapi.org/format/#content-negotiation
# """
#
# def get_accept_list(self, request: Request) -> List[str]:
# """Filter any JSON API specification that includes media parameters."""
# accept_list = super().get_accept_list(request)
#
# def jsonapi_params(media_type_str: str) -> bool:
# media_type = _MediaType(media_type_str)
# # We don't use _MediaType.match() because we want an *exact* match, without matching */*
# return (
# media_type.full_type == "application/vnd.api+json" and media_type.params
# )
#
# return [accept for accept in accept_list if not jsonapi_params(accept)]
#
# Path: tests/support/decorators.py
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
. Output only the next line. | view_list = ArtistViewSet.as_view({"get": "list"}) |
Here is a snippet: <|code_start|> include: Dict,
) -> Tuple[Union[ObjDataType, List[ObjDataType], None], List[ObjDataType]]:
"""Render primary data and included resources."""
schema = self.get_schema(data, renderer_context)
assert schema, "Unable to get schema class"
fields = self.get_fields(renderer_context)
context = Context(renderer_context.get("request", None), include, fields)
if isinstance(data, dict):
return self.render_obj(data, schema(), renderer_context, context)
elif isinstance(data, list):
return self.render_list(data, schema(), renderer_context, context)
return None, []
def render_exception(self, data: Any, renderer_context: Mapping[str, Any]) -> Any:
"""Render an exception result."""
return [data]
def is_exception(self, data: Any, renderer_context: Mapping[str, Any]) -> bool:
"""Return whether we're trying to render an exception."""
return renderer_context["response"].exception
def get_schema(
self, data: Any, renderer_context: Mapping[str, Any]
) -> Type[ResourceObject]:
"""Override this if you have a different way to get the schema."""
try:
serializer = data.serializer
except AttributeError:
<|code_end|>
. Write the next line using the current file imports:
import re
from collections import OrderedDict
from typing import Any, Dict, Optional, Mapping, List, Union, Tuple, Type
from rest_framework.renderers import JSONRenderer
from .exceptions import NoSchema
from .schema import Context, ResourceObject, ObjDataType, RenderResultType
from .utils import parse_include
and context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class NoSchema(Exception):
# """
# Schema not found on the data to render.
#
# This is a programmer error.
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
, which may include functions, classes, or code. Output only the next line. | raise NoSchema() |
Given snippet: <|code_start|>"""Renderers are used to serialize a response into specific media types."""
RX_FIELDS = re.compile(r"^fields\[([a-zA-Z0-9\-_]+)\]$")
class JSONAPIRenderer(JSONRenderer):
"""Renderer which serializes to JSON API."""
media_type: str = "application/vnd.api+json"
format: str = "vnd.api+json"
# You can specify top-level items here.
meta: Optional[Dict[str, Any]] = None
jsonapi: Optional[Any] = None
def render_obj(
self,
obj: ObjDataType,
schema: ResourceObject,
renderer_context: Mapping[str, Any],
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from collections import OrderedDict
from typing import Any, Dict, Optional, Mapping, List, Union, Tuple, Type
from rest_framework.renderers import JSONRenderer
from .exceptions import NoSchema
from .schema import Context, ResourceObject, ObjDataType, RenderResultType
from .utils import parse_include
and context:
# Path: rest_framework_json_schema/exceptions.py
# class NoSchema(Exception):
# """
# Schema not found on the data to render.
#
# This is a programmer error.
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
which might include code, classes, or functions. Output only the next line. | context: Context, |
Using the snippet: <|code_start|>"""Renderers are used to serialize a response into specific media types."""
RX_FIELDS = re.compile(r"^fields\[([a-zA-Z0-9\-_]+)\]$")
class JSONAPIRenderer(JSONRenderer):
"""Renderer which serializes to JSON API."""
media_type: str = "application/vnd.api+json"
format: str = "vnd.api+json"
# You can specify top-level items here.
meta: Optional[Dict[str, Any]] = None
jsonapi: Optional[Any] = None
def render_obj(
self,
obj: ObjDataType,
<|code_end|>
, determine the next line of code. You have imports:
import re
from collections import OrderedDict
from typing import Any, Dict, Optional, Mapping, List, Union, Tuple, Type
from rest_framework.renderers import JSONRenderer
from .exceptions import NoSchema
from .schema import Context, ResourceObject, ObjDataType, RenderResultType
from .utils import parse_include
and context (class names, function names, or code) available:
# Path: rest_framework_json_schema/exceptions.py
# class NoSchema(Exception):
# """
# Schema not found on the data to render.
#
# This is a programmer error.
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
. Output only the next line. | schema: ResourceObject, |
Next line prediction: <|code_start|>"""Renderers are used to serialize a response into specific media types."""
RX_FIELDS = re.compile(r"^fields\[([a-zA-Z0-9\-_]+)\]$")
class JSONAPIRenderer(JSONRenderer):
"""Renderer which serializes to JSON API."""
media_type: str = "application/vnd.api+json"
format: str = "vnd.api+json"
# You can specify top-level items here.
meta: Optional[Dict[str, Any]] = None
jsonapi: Optional[Any] = None
def render_obj(
self,
<|code_end|>
. Use current file imports:
(import re
from collections import OrderedDict
from typing import Any, Dict, Optional, Mapping, List, Union, Tuple, Type
from rest_framework.renderers import JSONRenderer
from .exceptions import NoSchema
from .schema import Context, ResourceObject, ObjDataType, RenderResultType
from .utils import parse_include)
and context including class names, function names, or small code snippets from other files:
# Path: rest_framework_json_schema/exceptions.py
# class NoSchema(Exception):
# """
# Schema not found on the data to render.
#
# This is a programmer error.
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
. Output only the next line. | obj: ObjDataType, |
Given snippet: <|code_start|>"""Renderers are used to serialize a response into specific media types."""
RX_FIELDS = re.compile(r"^fields\[([a-zA-Z0-9\-_]+)\]$")
class JSONAPIRenderer(JSONRenderer):
"""Renderer which serializes to JSON API."""
media_type: str = "application/vnd.api+json"
format: str = "vnd.api+json"
# You can specify top-level items here.
meta: Optional[Dict[str, Any]] = None
jsonapi: Optional[Any] = None
def render_obj(
self,
obj: ObjDataType,
schema: ResourceObject,
renderer_context: Mapping[str, Any],
context: Context,
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import re
from collections import OrderedDict
from typing import Any, Dict, Optional, Mapping, List, Union, Tuple, Type
from rest_framework.renderers import JSONRenderer
from .exceptions import NoSchema
from .schema import Context, ResourceObject, ObjDataType, RenderResultType
from .utils import parse_include
and context:
# Path: rest_framework_json_schema/exceptions.py
# class NoSchema(Exception):
# """
# Schema not found on the data to render.
#
# This is a programmer error.
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
which might include code, classes, or functions. Output only the next line. | ) -> RenderResultType: |
Here is a snippet: <|code_start|> elif isinstance(data, list):
return self.render_list(data, schema(), renderer_context, context)
return None, []
def render_exception(self, data: Any, renderer_context: Mapping[str, Any]) -> Any:
"""Render an exception result."""
return [data]
def is_exception(self, data: Any, renderer_context: Mapping[str, Any]) -> bool:
"""Return whether we're trying to render an exception."""
return renderer_context["response"].exception
def get_schema(
self, data: Any, renderer_context: Mapping[str, Any]
) -> Type[ResourceObject]:
"""Override this if you have a different way to get the schema."""
try:
serializer = data.serializer
except AttributeError:
raise NoSchema()
if not getattr(serializer, "many", False):
return serializer.schema
else:
return serializer.child.schema
def get_include(self, renderer_context: Mapping[str, Any]) -> Dict[str, Dict]:
"""Return the parsed include parameter, if it exists."""
request = renderer_context.get("request", None)
if request:
<|code_end|>
. Write the next line using the current file imports:
import re
from collections import OrderedDict
from typing import Any, Dict, Optional, Mapping, List, Union, Tuple, Type
from rest_framework.renderers import JSONRenderer
from .exceptions import NoSchema
from .schema import Context, ResourceObject, ObjDataType, RenderResultType
from .utils import parse_include
and context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class NoSchema(Exception):
# """
# Schema not found on the data to render.
#
# This is a programmer error.
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
, which may include functions, classes, or code. Output only the next line. | return parse_include(request.query_params.get("include", "")) |
Given the code snippet: <|code_start|>"""Utilities and helpers for filtering."""
FILTER = re.compile(r"^filter\[(\w+)\]$")
def get_query_filters(
params: Dict[str, Any], transformer: Optional[Transform] = None
) -> Dict[str, Any]:
"""
Parse JSON API filter query parameters and apply an optional transformation.
https://jsonapi.org/format/#fetching-filtering
"""
result = {}
<|code_end|>
, generate the next line using the imports in this file:
import re
from typing import Any, Dict, Optional
from .transforms import NullTransform, Transform
and context (functions, classes, or occasionally code) from other files:
# Path: rest_framework_json_schema/transforms.py
# class NullTransform(Transform):
# """A transform that doesn't do anything."""
#
# def transform(self, name: str) -> str:
# """Do nothing."""
# return name
#
# class Transform(ABC):
# """Provide the base interface for transforms."""
#
# @abstractmethod
# def transform(self, name: str) -> str:
# """Return the transformed name."""
# ...
. Output only the next line. | transformer = transformer or NullTransform() |
Given the following code snippet before the placeholder: <|code_start|>
@mark_urls
def test_detail_attributes(factory: APIRequestFactory) -> None:
"""You can render primary data object attributes."""
request = factory.get(reverse("artist-detail", kwargs={"pk": 1}))
<|code_end|>
, predict the next line using imports from the current file:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.views import ArtistViewSet, AlbumViewSet, TrackViewSet
and context including class names, function names, and sometimes code from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class TrackViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving tracks."""
#
# serializer_class = TrackSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Track]:
# """Return the list of tracks."""
# return get_tracks()
. Output only the next line. | view_detail = ArtistViewSet.as_view({"get": "retrieve"}) |
Given the code snippet: <|code_start|> assert json.loads(response.content) == {
"meta": {
"data": {
"description": "A simple ViewSet for listing or retrieving artists.",
"name": "Artist",
"parses": ["application/vnd.api+json"],
"renders": ["application/vnd.api+json"],
}
}
}
@mark_urls
def test_attributes_fields(factory: APIRequestFactory) -> None:
"""The fields attribute returns specific fields."""
url = reverse("artist-detail", kwargs={"pk": 1})
request = factory.get(url, {"fields[artist]": "firstName"})
view_detail = ArtistViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=1)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {"id": "1", "type": "artist", "attributes": {"firstName": "John"}}
}
@mark_urls
def test_relationships_empty(factory: APIRequestFactory) -> None:
"""You can render empty relationships."""
request = factory.get(reverse("album-detail", kwargs={"pk": 3}))
<|code_end|>
, generate the next line using the imports in this file:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.views import ArtistViewSet, AlbumViewSet, TrackViewSet
and context (functions, classes, or occasionally code) from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class TrackViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving tracks."""
#
# serializer_class = TrackSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Track]:
# """Return the list of tracks."""
# return get_tracks()
. Output only the next line. | view_detail = AlbumViewSet.as_view({"get": "retrieve"}) |
Given the following code snippet before the placeholder: <|code_start|> request = factory.get(
reverse("album-detail", kwargs={"pk": 0}), {"include": "artist"}
)
view_detail = AlbumViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=0)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "0",
"type": "album",
"attributes": {"albumName": "A Love Supreme"},
"relationships": {
"artist": {"data": {"id": "1", "type": "artist"}},
"tracks": {"data": []},
},
},
"included": [
{
"id": "1",
"type": "artist",
"attributes": {"firstName": "John", "lastName": "Coltrane"},
}
],
}
@mark_urls
def test_include_to_many_and_paths(factory: APIRequestFactory) -> None:
"""You can include a to-many relationship as a compound document."""
<|code_end|>
, predict the next line using imports from the current file:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.views import ArtistViewSet, AlbumViewSet, TrackViewSet
and context including class names, function names, and sometimes code from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class TrackViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving tracks."""
#
# serializer_class = TrackSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Track]:
# """Return the list of tracks."""
# return get_tracks()
. Output only the next line. | track_detail = TrackViewSet.as_view({"get": "retrieve"}) |
Using the snippet: <|code_start|>
@mark_urls
def test_detail_attributes(factory: APIRequestFactory) -> None:
"""You can update primary data attributes."""
request = factory.put(
reverse("artist-detail", kwargs={"pk": 1}),
{
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "Art", "lastName": "Blakey"},
}
},
)
view_detail = ArtistViewSet.as_view({"put": "update"})
response = view_detail(request, pk=1)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "Art", "lastName": "Blakey"},
}
}
<|code_end|>
, determine the next line of code. You have imports:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
and context (class names, function names, or code) available:
# Path: tests/support/decorators.py
#
# Path: tests/support/serializers.py
# def get_artists() -> QuerySet:
# """Get all artists."""
# return QuerySet(ARTISTS)
#
# def get_albums() -> QuerySet:
# """Get all albums."""
# return QuerySet(ALBUMS)
#
# def get_tracks() -> QuerySet:
# """Get all tracks."""
# return QuerySet(TRACKS)
#
# def get_non_default_ids() -> QuerySet:
# """Get all tracks."""
# return QuerySet(NON_DEFAULT_IDS)
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class NonDefaultIdViewSet(BaseViewSet):
# """A ViewSet to test non-default IDs."""
#
# serializer_class = NonDefaultIdSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[NonDefaultId]:
# """Return the list of non default ID objects."""
# return get_non_default_ids()
. Output only the next line. | artist = get_artists().get(1) |
Predict the next line after this snippet: <|code_start|> album_list = AlbumViewSet.as_view({"post": "create"})
artist = get_artists().get(0)
track = get_tracks().get(0)
request = factory.post(
reverse("album-list"),
{
"data": {
"type": "album",
"attributes": {"albumName": "On the Corner"},
"relationships": {
"artist": {"data": {"id": artist.id, "type": "artist"}},
"tracks": {"data": [{"id": track.id, "type": "track"}]},
},
}
},
)
response = album_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "4",
"type": "album",
"attributes": {"albumName": "On the Corner"},
"relationships": {
"artist": {"data": {"id": str(artist.id), "type": "artist"}},
"tracks": {"data": [{"id": "0", "type": "track"}]},
},
}
}
<|code_end|>
using the current file's imports:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
and any relevant context from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/serializers.py
# def get_artists() -> QuerySet:
# """Get all artists."""
# return QuerySet(ARTISTS)
#
# def get_albums() -> QuerySet:
# """Get all albums."""
# return QuerySet(ALBUMS)
#
# def get_tracks() -> QuerySet:
# """Get all tracks."""
# return QuerySet(TRACKS)
#
# def get_non_default_ids() -> QuerySet:
# """Get all tracks."""
# return QuerySet(NON_DEFAULT_IDS)
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class NonDefaultIdViewSet(BaseViewSet):
# """A ViewSet to test non-default IDs."""
#
# serializer_class = NonDefaultIdSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[NonDefaultId]:
# """Return the list of non default ID objects."""
# return get_non_default_ids()
. Output only the next line. | album = get_albums()[4] |
Based on the snippet: <|code_start|> reverse("artist-list"),
{
"data": {
"type": "artist",
"attributes": {"firstName": "Thelonious", "lastName": "Monk"},
}
},
)
view_list = ArtistViewSet.as_view({"post": "create"})
response = view_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "6",
"type": "artist",
"attributes": {"firstName": "Thelonious", "lastName": "Monk"},
}
}
artist = get_artists().get(6)
assert artist.id == 6
assert artist.first_name == "Thelonious"
assert artist.last_name == "Monk"
@mark_urls
def test_parse_relationships(factory: APIRequestFactory) -> None:
"""You can parse relationships."""
album_list = AlbumViewSet.as_view({"post": "create"})
artist = get_artists().get(0)
<|code_end|>
, predict the immediate next line with the help of imports:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
and context (classes, functions, sometimes code) from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/serializers.py
# def get_artists() -> QuerySet:
# """Get all artists."""
# return QuerySet(ARTISTS)
#
# def get_albums() -> QuerySet:
# """Get all albums."""
# return QuerySet(ALBUMS)
#
# def get_tracks() -> QuerySet:
# """Get all tracks."""
# return QuerySet(TRACKS)
#
# def get_non_default_ids() -> QuerySet:
# """Get all tracks."""
# return QuerySet(NON_DEFAULT_IDS)
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class NonDefaultIdViewSet(BaseViewSet):
# """A ViewSet to test non-default IDs."""
#
# serializer_class = NonDefaultIdSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[NonDefaultId]:
# """Return the list of non default ID objects."""
# return get_non_default_ids()
. Output only the next line. | track = get_tracks().get(0) |
Given the code snippet: <|code_start|> "id": "4",
"type": "album",
"attributes": {"albumName": "On the Corner"},
"relationships": {
"artist": {"data": {"id": str(artist.id), "type": "artist"}},
"tracks": {"data": [{"id": "0", "type": "track"}]},
},
}
}
album = get_albums()[4]
assert album.album_name == "On the Corner"
assert album.artist.id == artist.id
@mark_urls
def test_post_non_default_id(factory: APIRequestFactory) -> None:
"""POSTing with a non-default ID works."""
non_default_list = NonDefaultIdViewSet.as_view({"post": "create"})
request = factory.post(
reverse("non-default-id-list"),
{
"data": {
"id": "foo",
"type": "non-defaults",
"attributes": {"name": "my name"},
}
},
)
response = non_default_list(request)
assert response.status_code == 201
<|code_end|>
, generate the next line using the imports in this file:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
and context (functions, classes, or occasionally code) from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/serializers.py
# def get_artists() -> QuerySet:
# """Get all artists."""
# return QuerySet(ARTISTS)
#
# def get_albums() -> QuerySet:
# """Get all albums."""
# return QuerySet(ALBUMS)
#
# def get_tracks() -> QuerySet:
# """Get all tracks."""
# return QuerySet(TRACKS)
#
# def get_non_default_ids() -> QuerySet:
# """Get all tracks."""
# return QuerySet(NON_DEFAULT_IDS)
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class NonDefaultIdViewSet(BaseViewSet):
# """A ViewSet to test non-default IDs."""
#
# serializer_class = NonDefaultIdSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[NonDefaultId]:
# """Return the list of non default ID objects."""
# return get_non_default_ids()
. Output only the next line. | models = get_non_default_ids() |
Given the code snippet: <|code_start|>
@mark_urls
def test_detail_attributes(factory: APIRequestFactory) -> None:
"""You can update primary data attributes."""
request = factory.put(
reverse("artist-detail", kwargs={"pk": 1}),
{
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "Art", "lastName": "Blakey"},
}
},
)
<|code_end|>
, generate the next line using the imports in this file:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
and context (functions, classes, or occasionally code) from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/serializers.py
# def get_artists() -> QuerySet:
# """Get all artists."""
# return QuerySet(ARTISTS)
#
# def get_albums() -> QuerySet:
# """Get all albums."""
# return QuerySet(ALBUMS)
#
# def get_tracks() -> QuerySet:
# """Get all tracks."""
# return QuerySet(TRACKS)
#
# def get_non_default_ids() -> QuerySet:
# """Get all tracks."""
# return QuerySet(NON_DEFAULT_IDS)
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class NonDefaultIdViewSet(BaseViewSet):
# """A ViewSet to test non-default IDs."""
#
# serializer_class = NonDefaultIdSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[NonDefaultId]:
# """Return the list of non default ID objects."""
# return get_non_default_ids()
. Output only the next line. | view_detail = ArtistViewSet.as_view({"put": "update"}) |
Based on the snippet: <|code_start|> """You can create using primary data attributes."""
request = factory.post(
reverse("artist-list"),
{
"data": {
"type": "artist",
"attributes": {"firstName": "Thelonious", "lastName": "Monk"},
}
},
)
view_list = ArtistViewSet.as_view({"post": "create"})
response = view_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "6",
"type": "artist",
"attributes": {"firstName": "Thelonious", "lastName": "Monk"},
}
}
artist = get_artists().get(6)
assert artist.id == 6
assert artist.first_name == "Thelonious"
assert artist.last_name == "Monk"
@mark_urls
def test_parse_relationships(factory: APIRequestFactory) -> None:
"""You can parse relationships."""
<|code_end|>
, predict the immediate next line with the help of imports:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
and context (classes, functions, sometimes code) from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/serializers.py
# def get_artists() -> QuerySet:
# """Get all artists."""
# return QuerySet(ARTISTS)
#
# def get_albums() -> QuerySet:
# """Get all albums."""
# return QuerySet(ALBUMS)
#
# def get_tracks() -> QuerySet:
# """Get all tracks."""
# return QuerySet(TRACKS)
#
# def get_non_default_ids() -> QuerySet:
# """Get all tracks."""
# return QuerySet(NON_DEFAULT_IDS)
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class NonDefaultIdViewSet(BaseViewSet):
# """A ViewSet to test non-default IDs."""
#
# serializer_class = NonDefaultIdSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[NonDefaultId]:
# """Return the list of non default ID objects."""
# return get_non_default_ids()
. Output only the next line. | album_list = AlbumViewSet.as_view({"post": "create"}) |
Using the snippet: <|code_start|> "attributes": {"albumName": "On the Corner"},
"relationships": {
"artist": {"data": {"id": artist.id, "type": "artist"}},
"tracks": {"data": [{"id": track.id, "type": "track"}]},
},
}
},
)
response = album_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "4",
"type": "album",
"attributes": {"albumName": "On the Corner"},
"relationships": {
"artist": {"data": {"id": str(artist.id), "type": "artist"}},
"tracks": {"data": [{"id": "0", "type": "track"}]},
},
}
}
album = get_albums()[4]
assert album.album_name == "On the Corner"
assert album.artist.id == artist.id
@mark_urls
def test_post_non_default_id(factory: APIRequestFactory) -> None:
"""POSTing with a non-default ID works."""
<|code_end|>
, determine the next line of code. You have imports:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.serializers import (
get_artists,
get_albums,
get_tracks,
get_non_default_ids,
)
from tests.support.views import ArtistViewSet, AlbumViewSet, NonDefaultIdViewSet
and context (class names, function names, or code) available:
# Path: tests/support/decorators.py
#
# Path: tests/support/serializers.py
# def get_artists() -> QuerySet:
# """Get all artists."""
# return QuerySet(ARTISTS)
#
# def get_albums() -> QuerySet:
# """Get all albums."""
# return QuerySet(ALBUMS)
#
# def get_tracks() -> QuerySet:
# """Get all tracks."""
# return QuerySet(TRACKS)
#
# def get_non_default_ids() -> QuerySet:
# """Get all tracks."""
# return QuerySet(NON_DEFAULT_IDS)
#
# Path: tests/support/views.py
# class ArtistViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving artists."""
#
# serializer_class = ArtistSerializer
# # This is not testing pagination
# pagination_class: Optional[Type[BasePagination]] = None
#
# def get_queryset(self) -> QuerySet[Artist]:
# """Return the list of artists."""
# return get_artists()
#
# class AlbumViewSet(BaseViewSet):
# """A simple ViewSet for listing or retrieving albums."""
#
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[Album]:
# """Return the list of albums."""
# return get_albums()
#
# def get_serializer(self, *args: Any, **kwargs: Any) -> BaseSerializer:
# """Test the use of dynamic serializers with the parser."""
# return AlbumSerializer(*args, **kwargs)
#
# @action_route
# def relationship_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# @action_route
# def related_artist(self) -> Response:
# """Do nothing."""
# # Not currently called, just reversed.
# return Response()
#
# class NonDefaultIdViewSet(BaseViewSet):
# """A ViewSet to test non-default IDs."""
#
# serializer_class = NonDefaultIdSerializer
# pagination_class = None
#
# def get_queryset(self) -> QuerySet[NonDefaultId]:
# """Return the list of non default ID objects."""
# return get_non_default_ids()
. Output only the next line. | non_default_list = NonDefaultIdViewSet.as_view({"post": "create"}) |
Here is a snippet: <|code_start|>
def __init__(self, **kwargs: Any) -> None:
"""Create a resource object."""
for key, value in kwargs.items():
setattr(self, key, value)
self.transformed_names: Dict[str, str] = {}
transformer = self.transformer()
for name in self.attributes:
self.transformed_names[name] = transformer.transform(name)
# Normalize the relationships array to always be a tuple of (name, relobj)
def _normalize_rel(rel: RelOptType) -> RelType:
return (rel, RelationshipObject()) if isinstance(rel, str) else rel
self.relationships = self.norm_relationships = [
_normalize_rel(rel) for rel in self.relationships
]
for (name, _rel) in self.norm_relationships:
self.transformed_names[name] = transformer.transform(name)
def parse(self, data: ObjDataType, context: Context) -> Dict:
"""
Parse a Resource Object representation into an internal representation.
Verifies that the object is of the correct type.
"""
type = data.get("type")
if type != self.type:
<|code_end|>
. Write the next line using the current file imports:
from collections import OrderedDict
from typing import (
Any,
Optional,
Dict,
Sequence,
Tuple,
List,
Callable,
Union,
Type,
cast,
Iterator,
overload,
)
from django.urls import reverse
from rest_framework.request import Request
from .exceptions import TypeConflict, IncludeInvalid
from .transforms import NullTransform, Transform
and context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/transforms.py
# class NullTransform(Transform):
# """A transform that doesn't do anything."""
#
# def transform(self, name: str) -> str:
# """Do nothing."""
# return name
#
# class Transform(ABC):
# """Provide the base interface for transforms."""
#
# @abstractmethod
# def transform(self, name: str) -> str:
# """Return the transformed name."""
# ...
, which may include functions, classes, or code. Output only the next line. | raise TypeConflict( |
Given the code snippet: <|code_start|> relationships, included = self.render_relationships(data, context)
if relationships:
result["relationships"] = relationships
links = self.render_links(data, context)
if links:
result["links"] = links
meta = self.render_meta(data, context)
if meta:
result["meta"] = meta
return result, included
def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
"""Render model attributes to the output type."""
attributes = self.filter_by_fields(self.attributes, context.fields, lambda x: x)
return OrderedDict(
(self.transformed_names[attr], self.from_data(data, attr))
for attr in attributes
if attr in data
)
def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
"""Render model relationships to the output type, including included resources."""
relationships: Dict[str, Dict] = OrderedDict()
included = []
# Validate that all top-level include keys are actually relationships
rel_keys = {rel[0] for rel in self.relationships}
for key in context.include:
if key not in rel_keys:
<|code_end|>
, generate the next line using the imports in this file:
from collections import OrderedDict
from typing import (
Any,
Optional,
Dict,
Sequence,
Tuple,
List,
Callable,
Union,
Type,
cast,
Iterator,
overload,
)
from django.urls import reverse
from rest_framework.request import Request
from .exceptions import TypeConflict, IncludeInvalid
from .transforms import NullTransform, Transform
and context (functions, classes, or occasionally code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/transforms.py
# class NullTransform(Transform):
# """A transform that doesn't do anything."""
#
# def transform(self, name: str) -> str:
# """Do nothing."""
# return name
#
# class Transform(ABC):
# """Provide the base interface for transforms."""
#
# @abstractmethod
# def transform(self, name: str) -> str:
# """Return the transformed name."""
# ...
. Output only the next line. | raise IncludeInvalid("Invalid relationship to include: %s" % key) |
Predict the next line after this snippet: <|code_start|>
return OrderedDict(
(link_name, link_obj.render(data, context.request))
for (link_name, link_obj) in self.links
)
def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
"""
Render metadata.
Implement this in your subclass if you have more complex meta information
that depends on data or the request.
"""
return self.meta
class ResourceObject(BaseLinkedObject):
"""
Implement the base document resource object.
http://jsonapi.org/format/#document-resource-objects
"""
# REQUIRED members
id: str = "id"
type: str = "unknown"
# OPTIONAL members
attributes: Sequence[str] = ()
relationships: Sequence[RelOptType] = ()
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from typing import (
Any,
Optional,
Dict,
Sequence,
Tuple,
List,
Callable,
Union,
Type,
cast,
Iterator,
overload,
)
from django.urls import reverse
from rest_framework.request import Request
from .exceptions import TypeConflict, IncludeInvalid
from .transforms import NullTransform, Transform
and any relevant context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/transforms.py
# class NullTransform(Transform):
# """A transform that doesn't do anything."""
#
# def transform(self, name: str) -> str:
# """Do nothing."""
# return name
#
# class Transform(ABC):
# """Provide the base interface for transforms."""
#
# @abstractmethod
# def transform(self, name: str) -> str:
# """Return the transformed name."""
# ...
. Output only the next line. | transformer: Type[Transform] = NullTransform |
Predict the next line for this snippet: <|code_start|>
return OrderedDict(
(link_name, link_obj.render(data, context.request))
for (link_name, link_obj) in self.links
)
def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
"""
Render metadata.
Implement this in your subclass if you have more complex meta information
that depends on data or the request.
"""
return self.meta
class ResourceObject(BaseLinkedObject):
"""
Implement the base document resource object.
http://jsonapi.org/format/#document-resource-objects
"""
# REQUIRED members
id: str = "id"
type: str = "unknown"
# OPTIONAL members
attributes: Sequence[str] = ()
relationships: Sequence[RelOptType] = ()
<|code_end|>
with the help of current file imports:
from collections import OrderedDict
from typing import (
Any,
Optional,
Dict,
Sequence,
Tuple,
List,
Callable,
Union,
Type,
cast,
Iterator,
overload,
)
from django.urls import reverse
from rest_framework.request import Request
from .exceptions import TypeConflict, IncludeInvalid
from .transforms import NullTransform, Transform
and context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/transforms.py
# class NullTransform(Transform):
# """A transform that doesn't do anything."""
#
# def transform(self, name: str) -> str:
# """Do nothing."""
# return name
#
# class Transform(ABC):
# """Provide the base interface for transforms."""
#
# @abstractmethod
# def transform(self, name: str) -> str:
# """Return the transformed name."""
# ...
, which may contain function names, class names, or code. Output only the next line. | transformer: Type[Transform] = NullTransform |
Given snippet: <|code_start|>
@pytest.fixture
def factory() -> APIRequestFactory:
"""Provide an API Request Factory."""
return APIRequestFactory()
@pytest.fixture(autouse=True)
def auto_reset_data() -> None:
"""Automatically reset test data before each test."""
<|code_end|>
, continue by predicting the next line. Consider current file imports:
import pytest
from rest_framework.test import APIRequestFactory
from tests.support.serializers import reset_data
and context:
# Path: tests/support/serializers.py
# def reset_data() -> None:
# """Reset test data."""
# global ARTISTS
# global ALBUMS
# global TRACKS
# global NON_DEFAULT_IDS
# ARTISTS = deepcopy(INITIAL_ARTISTS)
# ALBUMS = deepcopy(INITIAL_ALBUMS)
# TRACKS = deepcopy(INITIAL_TRACKS)
# NON_DEFAULT_IDS = deepcopy(NON_DEFAULT_IDS)
which might include code, classes, or functions. Output only the next line. | reset_data() |
Predict the next line after this snippet: <|code_start|> )
result = obj.parse(
{
"id": "123",
"type": "users",
"attributes": {"firstName": "John", "lastName": "Coltrane"},
},
context,
)
assert result == {"id": "123", "first_name": "John", "last_name": "Coltrane"}
@mark_urls
def test_parse_partial(context: Context) -> None:
"""Attributes not passed in don't get included in the result data."""
obj = ResourceObject(
type="users",
attributes=("first_name", "last_name"),
transformer=CamelCaseTransform,
)
result = obj.parse({"type": "users", "attributes": {"firstName": "John"}}, context)
assert result == {"first_name": "John"}
@mark_urls
def test_parse_type_conflict(schema_request: Request) -> None:
"""
The parser throws a type conflict exception on type conflicts.
"""
obj = ResourceObject(type="users")
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and any relevant context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | with pytest.raises(TypeConflict): |
Predict the next line after this snippet: <|code_start|> ),
),
)
),
OrderedDict(
(
("id", "1"),
("type", "track"),
("attributes", OrderedDict((("name", "Acknowledgement"),))),
)
),
OrderedDict(
(
("id", "2"),
("type", "track"),
("attributes", OrderedDict((("name", "Resolution"),))),
)
),
]
@mark_urls
def test_render_invalid_include(schema_request: Request) -> None:
"""An invalid include path throws an exception."""
class ArtistObject(ResourceObject):
type = "artist"
attributes = ("first_name", "last_name")
relationships = ("albums",)
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and any relevant context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | with pytest.raises(IncludeInvalid): |
Given snippet: <|code_start|>
@pytest.fixture
def schema_request(factory: APIRequestFactory) -> Request:
factory = APIRequestFactory()
return factory.get("/")
@pytest.fixture
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
which might include code, classes, or functions. Output only the next line. | def context(schema_request: Request) -> Context: |
Here is a snippet: <|code_start|>
@pytest.fixture
def schema_request(factory: APIRequestFactory) -> Request:
factory = APIRequestFactory()
return factory.get("/")
@pytest.fixture
def context(schema_request: Request) -> Context:
return Context(schema_request)
@mark_urls
def test_resource_object_default(context: Context) -> None:
"""With no attributes, nothing is included, just the ID."""
<|code_end|>
. Write the next line using the current file imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
, which may include functions, classes, or code. Output only the next line. | primary, included = ResourceObject().render( |
Based on the snippet: <|code_start|> "data",
[
OrderedDict(
(("id", "5"), ("type", "artist"))
),
OrderedDict(
(("id", "6"), ("type", "artist"))
),
],
),
)
),
),
)
),
),
)
)
assert included == []
@mark_urls
def test_render_complex_relationship(context: Context) -> None:
"""
Allow specifying a relationship object that specifies additional data for the relationship.
Relationship links will most likely depend on some part of original object's data
(like the pk)
"""
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | class ArtistRelationship(RelationshipObject): |
Based on the snippet: <|code_start|> # Empty to-one relationship: relationship is 'None'
# Empty to-many relationship: relationship is '[]'
# Single relationship: a ResourceIdObject
# To-Many: an array of ResourceIdObjects
obj = AlbumObject()
primary, included = obj.render({"id": "123", "artist": None}, context)
assert primary == OrderedDict(
(
("id", "123"),
("type", "album"),
(
"relationships",
OrderedDict((("artist", OrderedDict((("data", None),))),)),
),
)
)
assert included == []
primary, included = obj.render({"id": "123", "artist": []}, context)
assert primary == OrderedDict(
(
("id", "123"),
("type", "album"),
("relationships", OrderedDict((("artist", OrderedDict((("data", []),))),))),
)
)
assert included == []
primary, included = obj.render(
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | {"id": "123", "artist": ResourceIdObject(id=5, type="artist")}, context |
Based on the snippet: <|code_start|> ("attributes", OrderedDict((("name", "John"),))),
)
)
assert included == []
@mark_urls
def test_subclass(context: Context) -> None:
"""You can use a subclass to specify schema information."""
class TestObject(ResourceObject):
id = "user_id"
type = "users"
attributes = ("name",)
primary, included = TestObject().render({"user_id": "123", "name": "John"}, context)
assert primary == OrderedDict(
(
("id", "123"),
("type", "users"),
("attributes", OrderedDict((("name", "John"),))),
)
)
assert included == []
@mark_urls
def test_links(context: Context) -> None:
"""You can specify links which are methods on the object."""
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | class ObjectLink(LinkObject): |
Based on the snippet: <|code_start|>def test_subclass(context: Context) -> None:
"""You can use a subclass to specify schema information."""
class TestObject(ResourceObject):
id = "user_id"
type = "users"
attributes = ("name",)
primary, included = TestObject().render({"user_id": "123", "name": "John"}, context)
assert primary == OrderedDict(
(
("id", "123"),
("type", "users"),
("attributes", OrderedDict((("name", "John"),))),
)
)
assert included == []
@mark_urls
def test_links(context: Context) -> None:
"""You can specify links which are methods on the object."""
class ObjectLink(LinkObject):
def render(self, data: ObjDataType, request: Request) -> Any:
return {"href": "/users/%s" % data["id"], "meta": {"something": "hello"}}
class TestObject(ResourceObject):
type = "artists"
links = (
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | ("self", UrlLink(view_name="artist-detail", url_kwargs={"pk": "id"})), |
Based on the snippet: <|code_start|> )
)
assert included == []
@mark_urls
def test_subclass(context: Context) -> None:
"""You can use a subclass to specify schema information."""
class TestObject(ResourceObject):
id = "user_id"
type = "users"
attributes = ("name",)
primary, included = TestObject().render({"user_id": "123", "name": "John"}, context)
assert primary == OrderedDict(
(
("id", "123"),
("type", "users"),
("attributes", OrderedDict((("name", "John"),))),
)
)
assert included == []
@mark_urls
def test_links(context: Context) -> None:
"""You can specify links which are methods on the object."""
class ObjectLink(LinkObject):
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | def render(self, data: ObjDataType, request: Request) -> Any: |
Given the code snippet: <|code_start|> )
)
assert included == []
@mark_urls
def test_render_complex_relationship(context: Context) -> None:
"""
Allow specifying a relationship object that specifies additional data for the relationship.
Relationship links will most likely depend on some part of original object's data
(like the pk)
"""
class ArtistRelationship(RelationshipObject):
links = (
(
"self",
UrlLink(view_name="album-relationship-artist", url_kwargs={"pk": "id"}),
),
(
"related",
UrlLink(view_name="album-related-artist", url_kwargs={"pk": "id"}),
),
)
meta = {"foo": "bar"}
class AlbumObject(ResourceObject):
type = "album"
relationships = (("album_artist", ArtistRelationship()),)
<|code_end|>
, generate the next line using the imports in this file:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context (functions, classes, or occasionally code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | transformer = CamelCaseTransform |
Predict the next line after this snippet: <|code_start|> OrderedDict((("artist2", OrderedDict((("data", None),))),)),
),
)
)
assert included == []
@mark_urls
def test_render_included(schema_request: Request) -> None:
"""You can render included resources."""
class ArtistObject(ResourceObject):
type = "artist"
attributes = ("first_name", "last_name")
class AlbumObject(ResourceObject):
type = "album"
relationships = ("artist",)
# This a faked ResourceIdObject that allows you the schema to not know about
# the serializer directly.
class ArtistLink(ResourceIdObject):
def get_schema(self) -> ResourceObject:
return ArtistObject()
def get_data(self) -> Dict[str, Any]:
return {"id": self.id, "first_name": "John", "last_name": "Coltrane"}
primary, included = AlbumObject().render(
{"id": "123", "artist": ArtistLink(id=5, type="artist")},
<|code_end|>
using the current file's imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and any relevant context from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | Context(schema_request, parse_include("artist")), |
Based on the snippet: <|code_start|>
@pytest.fixture
def schema_request(factory: APIRequestFactory) -> Request:
factory = APIRequestFactory()
return factory.get("/")
@pytest.fixture
def context(schema_request: Request) -> Context:
return Context(schema_request)
<|code_end|>
, predict the immediate next line with the help of imports:
from collections import OrderedDict
from typing import Any, Dict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_schema.exceptions import TypeConflict, IncludeInvalid
from rest_framework_json_schema.schema import (
Context,
ResourceObject,
RelationshipObject,
ResourceIdObject,
LinkObject,
UrlLink,
ObjDataType,
)
from rest_framework_json_schema.transforms import CamelCaseTransform
from rest_framework_json_schema.utils import parse_include
from tests.support.decorators import mark_urls
import pytest
and context (classes, functions, sometimes code) from other files:
# Path: rest_framework_json_schema/exceptions.py
# class TypeConflict(Exception):
# """
# The type passed to this resource object is incorrect.
#
# https://jsonapi.org/format/#crud-creating-responses-409
# """
#
# class IncludeInvalid(Exception):
# """
# The relationship cannot be included.
#
# https://jsonapi.org/format/#fetching-includes
# """
#
# Path: rest_framework_json_schema/schema.py
# class Context:
# class BaseLinkedObject:
# class ResourceObject(BaseLinkedObject):
# class ResourceIdObject(BaseLinkedObject):
# class RelationshipObject(BaseLinkedObject):
# class LinkObject:
# class UrlLink(LinkObject):
# def __init__(
# self,
# request: Request,
# include: Optional[Dict] = None,
# fields: Optional[Dict] = None,
# ) -> None:
# def render_links(self, data: ObjDataType, context: Context) -> OrderedDict:
# def render_meta(self, data: Any, context: Context) -> Optional[Dict]:
# def __init__(self, **kwargs: Any) -> None:
# def _normalize_rel(rel: RelOptType) -> RelType:
# def parse(self, data: ObjDataType, context: Context) -> Dict:
# def render(self, data: ObjDataType, context: Context) -> RenderResultType:
# def render_attributes(self, data: Dict, context: Context) -> ObjDataType:
# def render_relationships(self, data: Dict, context: Context) -> RenderResultType:
# def render_relationship(
# self, data: Dict, rel_name: str, rel: "RelationshipObject", context: Context
# ) -> RenderResultType:
# def from_data(self, data: Dict, attr: str) -> Any:
# def filter_by_fields(
# self, names: Sequence[RelType], fields: Dict, name_fn: Callable[[RelType], str]
# ) -> Iterator[RelType]:
# def filter_by_fields(
# self, names: Sequence[str], fields: Dict, name_fn: Callable[[str], str]
# ) -> Iterator[str]:
# def filter_by_fields(
# self, names: Sequence[Any], fields: Dict, name_fn: Callable[[Any], str]
# ) -> Iterator[Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, request: Request) -> Dict[str, Any]:
# def get_schema(self) -> ResourceObject:
# def get_data(self) -> Dict[str, Any]:
# def __init__(self, **kwargs: Any) -> None:
# def render_included(
# self, rel_data: "ResourceIdObject", context: Context
# ) -> List[Dict[str, Any]]:
# def render(
# self,
# obj_data: ObjDataType,
# rel_data: ResourceIdObject,
# context: Context,
# include_this: bool,
# ) -> RenderResultType:
# def parse(
# self, obj_data: ObjDataType, context: Context
# ) -> Union[str, List[str], None]:
# def __init__(self, **kwargs: Any) -> None:
# def render(self, data: ObjDataType, request: Request) -> Any:
# def render(self, data: ObjDataType, request: Request) -> Any:
#
# Path: rest_framework_json_schema/transforms.py
# class CamelCaseTransform(Transform):
# """Transform snake_underscore_case to camelCase."""
#
# def transform(self, name: str) -> str:
# """Transform snake_underscore_case to camelCase."""
# split = name.split("_")
# return split[0] + "".join([_upper(c) for c in split[1:]])
#
# Path: rest_framework_json_schema/utils.py
# def parse_include(include: str) -> Dict[str, Dict]:
# """
# Parse an include parameter into its constituent paths.
#
# It returns a tree of include paths, for instance:
#
# a,a.b,a.c.d,e.f,g
# Returns:
# {
# 'a': {
# 'b': {},
# 'c', {
# 'd': {}
# }
# },
# 'e': {
# 'f': {}
# },
# 'g': {}
# }
# """
# result: Dict[str, Dict] = {}
# split = include.split(",")
# for path in split:
# if path:
# components = path.split(".")
# level = result
# for c in components:
# if c not in level:
# level[c] = {}
# level = level[c]
# return result
#
# Path: tests/support/decorators.py
. Output only the next line. | @mark_urls |
Given the code snippet: <|code_start|>
@mark_urls
def test_pagination_limit_offset(factory: APIRequestFactory) -> None:
"""Pagination works according to spec."""
list_url = reverse("page-list")
request = factory.get(list_url, {"offset": 2, "limit": 2})
<|code_end|>
, generate the next line using the imports in this file:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.views import PaginateViewSet, NonJSONPaginateViewSet
and context (functions, classes, or occasionally code) from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/views.py
# class PaginateViewSet(ArtistViewSet):
# """Viewset that implements JSON API pagination."""
#
# pagination_class = JSONAPILimitOffsetPagination
#
# class NonJSONPaginateViewSet(ArtistViewSet):
# """Tests when a viewset is paginated but without a JSONAPI Paginator."""
#
# pagination_class = LimitOffsetPagination
. Output only the next line. | view_list = PaginateViewSet.as_view({"get": "list"}) |
Given the following code snippet before the placeholder: <|code_start|>
response = view_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content.decode()) == {
"meta": {"count": 6},
"links": {
"page[next]": f"http://testserver{list_url}?limit=2&offset=4",
"page[previous]": f"http://testserver{list_url}?limit=2",
},
"data": [
{
"id": "2",
"type": "artist",
"attributes": {"firstName": "Charles", "lastName": "Mingus"},
},
{
"id": "3",
"type": "artist",
"attributes": {"firstName": "Bill", "lastName": "Evans"},
},
],
}
@mark_urls
def test_no_pagination_limit_offset(factory: APIRequestFactory) -> None:
list_url = reverse("page-list")
request = factory.get(list_url, {"offset": 2, "limit": 2})
<|code_end|>
, predict the next line using imports from the current file:
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.views import PaginateViewSet, NonJSONPaginateViewSet
and context including class names, function names, and sometimes code from other files:
# Path: tests/support/decorators.py
#
# Path: tests/support/views.py
# class PaginateViewSet(ArtistViewSet):
# """Viewset that implements JSON API pagination."""
#
# pagination_class = JSONAPILimitOffsetPagination
#
# class NonJSONPaginateViewSet(ArtistViewSet):
# """Tests when a viewset is paginated but without a JSONAPI Paginator."""
#
# pagination_class = LimitOffsetPagination
. Output only the next line. | view_list = NonJSONPaginateViewSet.as_view({"get": "list"}) |
Given snippet: <|code_start|># -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: DrasticProduct.py,v 1.4 2009/08/31 21:02:06 rliebscher Exp $"
class DrasticProduct(Norm):
def __init__(self):
Norm.__init__(self,Norm.T_NORM)
def __call__(self,*args):
if len(args) != 2:
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from fuzzy.norm.Norm import Norm,NormException
and context:
# Path: fuzzy/norm/Norm.py
# class Norm(object):
# """Abstract Base class of any fuzzy norm"""
#
# # types of norm
# UNKNOWN = 0 #: type of norm unknown
# T_NORM = 1 #: norm is t-norm
# S_NORM = 2 #: norm is s-norm
#
# def __init__(self,type=0):
# """Initialize type of norm"""
# self._type = type
#
# def __call__(self,*args):
# """
# Calculate result of norm(arg1,arg2,...)
#
# @param args: list of floats as arguments for norm.
# @type args: list of float
# @return: result of norm calulation
# @rtype: float
# @raise NormException: any problem in calculation (wrong number of arguments, numerical problems)
# """
# raise NormException("abstract class %s can't be called" % self.__class__.__name__)
#
# def getType(self):
# """
# Return type of norm:
# 0 = not defined or not classified
# 1 = t-norm ( = Norm.T_NORM)
# 2 = s-norm ( = Norm.S_NORM)
#
# """
# return self._type
#
# class NormException(Exception):
# """Base class for any exception in norm calculations."""
# pass
which might include code, classes, or functions. Output only the next line. | raise NormException("%s is supported only for 2 parameters" % self.__class__.__name__ ) |
Here is a snippet: <|code_start|># ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
"""
Base class for any kind of parametric fuzzy norm.
"""
__revision__ = "$Id: ParametricNorm.py,v 1.8 2009/10/07 21:08:14 rliebscher Exp $"
class ParametricNorm(Norm):
"""Abstract base class for any parametric fuzzy norm
@ivar p: parameter for norm
@type p: float
"""
_range = None
def __init__(self,type,p):
"""Initialize type and parameter
@param p: parameter for norm
@type p: float
"""
super(ParametricNorm,self).__init__(type)
self.p = p
<|code_end|>
. Write the next line using the current file imports:
from fuzzy.norm.Norm import Norm
from fuzzy.utils import prop
from fuzzy.utils import checkRange
and context from other files:
# Path: fuzzy/norm/Norm.py
# class Norm(object):
# """Abstract Base class of any fuzzy norm"""
#
# # types of norm
# UNKNOWN = 0 #: type of norm unknown
# T_NORM = 1 #: norm is t-norm
# S_NORM = 2 #: norm is s-norm
#
# def __init__(self,type=0):
# """Initialize type of norm"""
# self._type = type
#
# def __call__(self,*args):
# """
# Calculate result of norm(arg1,arg2,...)
#
# @param args: list of floats as arguments for norm.
# @type args: list of float
# @return: result of norm calulation
# @rtype: float
# @raise NormException: any problem in calculation (wrong number of arguments, numerical problems)
# """
# raise NormException("abstract class %s can't be called" % self.__class__.__name__)
#
# def getType(self):
# """
# Return type of norm:
# 0 = not defined or not classified
# 1 = t-norm ( = Norm.T_NORM)
# 2 = s-norm ( = Norm.S_NORM)
#
# """
# return self._type
#
# Path: fuzzy/utils.py
# def prop(func):
# """Function decorator for defining property attributes
#
# The decorated function is expected to return a dictionary
# containing one or more of the following pairs:
# - fget - function for getting attribute value
# - fset - function for setting attribute value
# - fdel - function for deleting attribute
# This can be conveniently constructed by the locals() builtin
# function; see:
# U{http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/205183}
# """
# return property(doc=func.__doc__, **func())
, which may include functions, classes, or code. Output only the next line. | @prop |
Predict the next line after this snippet: <|code_start|># -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: GeometricMean.py,v 1.4 2009/08/07 07:19:19 rliebscher Exp $"
class GeometricMean(Norm):
def __init__(self):
Norm.__init__(self,0)
def __call__(self,*args):
<|code_end|>
using the current file's imports:
from fuzzy.norm.Norm import Norm,product
and any relevant context from other files:
# Path: fuzzy/norm/Norm.py
# class Norm(object):
# """Abstract Base class of any fuzzy norm"""
#
# # types of norm
# UNKNOWN = 0 #: type of norm unknown
# T_NORM = 1 #: norm is t-norm
# S_NORM = 2 #: norm is s-norm
#
# def __init__(self,type=0):
# """Initialize type of norm"""
# self._type = type
#
# def __call__(self,*args):
# """
# Calculate result of norm(arg1,arg2,...)
#
# @param args: list of floats as arguments for norm.
# @type args: list of float
# @return: result of norm calulation
# @rtype: float
# @raise NormException: any problem in calculation (wrong number of arguments, numerical problems)
# """
# raise NormException("abstract class %s can't be called" % self.__class__.__name__)
#
# def getType(self):
# """
# Return type of norm:
# 0 = not defined or not classified
# 1 = t-norm ( = Norm.T_NORM)
# 2 = s-norm ( = Norm.S_NORM)
#
# """
# return self._type
#
# def product(*args):
# """Calculate product of args.
#
# @param args: list of floats to multiply
# @type args: list of float
# @return: product of args
# @rtype: float
# """
# r = args[0]
# for x in args[1:]:
# r *= x
# return r
. Output only the next line. | return pow(product(*args),1.0/len(args)) |
Here is a snippet: <|code_start|>#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: COGS.py,v 1.4 2009/08/07 07:19:18 rliebscher Exp $"
class COGS(Base):
"""defuzzification for singletons."""
def __init__(self, INF=None, ACC=None, failsafe=None,*args,**keywords):
"""
@param failsafe: if is not possible to calculate a center of gravity,
return this value if not None or forward the exception
"""
super(COGS, self).__init__(INF,ACC,*args,**keywords)
self.failsafe = failsafe # which value if COG not calculable
def getValue(self,variable):
"""Defuzzyfication using center of gravity method."""
sum_1,sum_2 = 0.,0.
for adjective in variable.adjectives.values():
# get precomputed adjective set
set = adjective.set
if not isinstance(set,fuzzy.set.Singleton.Singleton):
<|code_end|>
. Write the next line using the current file imports:
from fuzzy.defuzzify.Base import Base,DefuzzificationException
import fuzzy.set.Singleton
and context from other files:
# Path: fuzzy/defuzzify/Base.py
# class Base(object):
# """Abstract base class for defuzzification
# which results in a numeric value.
#
# @ivar INF: inference norm, used with set of adjective and given value for it
# @type INF: L{fuzzy.norm.Norm.Norm}
# @ivar ACC: norm for accumulation of set of adjectives
# @type ACC: L{fuzzy.norm.Norm.Norm}
# @cvar _INF: default value when INF is None
# @type _INF: L{fuzzy.norm.Norm.Norm}
# @cvar _ACC: default value when ACC is None
# @type _ACC: L{fuzzy.norm.Norm.Norm}
# @ivar activated_sets: results of activation of adjectives of variable.
# @type activated_sets: {string:L{fuzzy.set.Polygon.Polygon}}
# @ivar accumulated_set: result of accumulation of activated sets
# @type accumulated_set: L{fuzzy.set.Polygon.Polygon}
# """
#
# # default values if instance values are not set
# _INF = Min()
# _ACC = Max()
#
# def __init__(self, INF=None, ACC=None):
# """
# @param INF: inference norm, used with set of adjective and given value for it
# @type INF: L{fuzzy.norm.Norm.Norm}
# @param ACC: norm for accumulation of set of adjectives
# @type ACC: L{fuzzy.norm.Norm.Norm}
# """
# self.ACC = ACC # accumulation
# self.INF = INF # inference
# self.activated_sets = {}
# self.accumulated_set = None
#
# def getValue(self,variable):
# """Defuzzyfication."""
# raise DefuzzificationException("don't use the abstract base class")
#
# # helper methods for sub classes
#
# def accumulate(self,variable,segment_size=None):
# """combining adjective values into one set"""
# self.activated_sets = {}
# temp = None
# for name,adjective in variable.adjectives.items():
# # get precomputed adjective set
# temp2 = norm((self.INF or self._INF),adjective.set,adjective.getMembership(),segment_size)
# self.activated_sets[name] = temp2
# # accumulate all adjectives
# if temp is None:
# temp = temp2
# else:
# temp = merge((self.ACC or self._ACC),temp,temp2,segment_size)
# self.accumulated_set = temp
# return temp
#
# def value_table(self,set):
# """get a value table of the polygon representation"""
# # get polygon representation
# ig = set.getIntervalGenerator()
# next = ig.nextInterval(None,None)
# while next is not None:
# x = next
# y = set(x)
# yield (x,y)
# # get next point from polygon
# next = ig.nextInterval(next,None)
#
# class DefuzzificationException(fuzzy.Exception.Exception):
# pass
, which may include functions, classes, or code. Output only the next line. | raise DefuzzificationException("Only Singleton for COGS defuzzification allowed.") |
Given the following code snippet before the placeholder: <|code_start|># -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: FrankUnion.py,v 1.4 2009/09/24 20:32:20 rliebscher Exp $"
class FrankUnion(ParametricNorm):
"""Frank 1979"""
_range = [ (0.,1.),(1.,inf_p) ]
def __init__(self,p=0.5):
ParametricNorm.__init__(self,ParametricNorm.S_NORM,p)
def __call__(self,*args):
if len(args) != 2:
<|code_end|>
, predict the next line using imports from the current file:
from fuzzy.norm.Norm import NormException
from fuzzy.norm.ParametricNorm import ParametricNorm
from math import log
from fuzzy.utils import inf_p
and context including class names, function names, and sometimes code from other files:
# Path: fuzzy/norm/Norm.py
# class NormException(Exception):
# """Base class for any exception in norm calculations."""
# pass
#
# Path: fuzzy/norm/ParametricNorm.py
# class ParametricNorm(Norm):
# """Abstract base class for any parametric fuzzy norm
#
# @ivar p: parameter for norm
# @type p: float
# """
# _range = None
#
# def __init__(self,type,p):
# """Initialize type and parameter
#
# @param p: parameter for norm
# @type p: float
# """
# super(ParametricNorm,self).__init__(type)
# self.p = p
#
# @prop
# def p():
# """x
# @type: float"""
# def fget(self):
# return self._p
# def fset(self,value):
# self._checkParam(value)
# self._p = value
# return locals()
#
# @prop
# def p_range():
# """range(s) of valid values for p"""
# def fget(self):
# return self._range
# return locals()
#
# def _checkParam(self,value):
# """check parameter if allowed for paramter p
# @param value: the value to be checked
# @type value: float"""
# from fuzzy.utils import checkRange
# if not checkRange(value,self._range):
# raise Exception("Parameter value %s is not allowed" % str(value))
#
# Path: fuzzy/utils.py
# def prop(func):
# def checkRange(value,ranges):
. Output only the next line. | raise NormException("%s is supported only for 2 parameters" % self.__class__.__name__ ) |
Continue the code snippet: <|code_start|>#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Represents a fuzzy rule."""
__revision__ = "$Id: Rule.py,v 1.13 2009/10/07 21:08:13 rliebscher Exp $"
class Rule(object):
"""This is realizes an important part of the inference engine.
It represents and calculates the value of a fuzzy rule
and sets the given adjective to the appropriate value.
@cvar _CER: the default value (=Min()) for the norm used to calculate the certainty of a rule.
@type _CER: L{fuzzy.norm.Norm.Norm}
@ivar adjective: fuzzy adjective to set
@type adjective: L{fuzzy.Adjective.Adjective}
@ivar operator: Operator which provides the value to set
@type operator: L{fuzzy.operator.Operator.Operator}
@ivar certainty: how sure are we about this rule
@type certainty: float
@ivar CER: fuzzy norm to use with certainty (normally a t-norm)
@type CER: L{fuzzy.norm.Norm.Norm}
"""
# default if not set in instance
<|code_end|>
. Use current file imports:
from fuzzy.norm.Min import Min
import fuzzy.Adjective
and context (classes, functions, or code) from other files:
# Path: fuzzy/norm/Min.py
# class Min(Norm):
#
# def __init__(self):
# Norm.__init__(self,Norm.T_NORM)
#
# def __call__(self,*args):
# """Return minimum of given values."""
# return min(args)
. Output only the next line. | _CER = Min() |
Given snippet: <|code_start|># later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Abstract base class for any parametric fuzzy complement"""
__revision__ = "$Id: Parametric.py,v 1.2 2009/10/07 21:08:14 rliebscher Exp $"
class Parametric(Base):
"""Abstract base class for any parametric fuzzy complement
@ivar p: parameter for complement
@type p: float
"""
_range = None
def __init__(self,p,*args,**keywords):
"""Initialize type and parameter
@param p: parameter for complement
@type p: float
"""
super(Parametric,self).__init__(*args,**keywords)
self.p = p
<|code_end|>
, continue by predicting the next line. Consider current file imports:
from fuzzy.complement.Base import Base
from fuzzy.utils import prop
from fuzzy.utils import checkRange
and context:
# Path: fuzzy/complement/Base.py
# class Base(object):
# """Base class for all complement methods"""
#
# def __init__(self,*args,**keywords):
# """Initialize the complement instance"""
# super(Base, self).__init__(*args,**keywords)
#
# def __call__(self,value):
# """Calculate the complement of the value.
# @param value: the value to complement
# @type value: float
# @return: the complemented value
# @rtype: float
# """
# raise ComplementException("don't use the abstract base class")
#
# Path: fuzzy/utils.py
# def prop(func):
# """Function decorator for defining property attributes
#
# The decorated function is expected to return a dictionary
# containing one or more of the following pairs:
# - fget - function for getting attribute value
# - fset - function for setting attribute value
# - fdel - function for deleting attribute
# This can be conveniently constructed by the locals() builtin
# function; see:
# U{http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/205183}
# """
# return property(doc=func.__doc__, **func())
which might include code, classes, or functions. Output only the next line. | @prop |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.